brms/0000755000175000017500000000000014146772153011361 5ustar nileshnileshbrms/MD50000644000175000017500000005500714146772153011700 0ustar nileshnilesh5c969426af9875ee475bc96c2887a586 *DESCRIPTION 4709d1b79105885f84d149ff841cdcdf *NAMESPACE 922748973edfac4d7ab212effe2ab25c *NEWS.md 8af5e09967f0cf14e129c54ef1137ffc *R/autocor.R c1a4725c588857aa00de4614829a02e6 *R/backends.R fe2835e8d799bd1a51acc794cda44eb8 *R/bayes_R2.R 887700e231273b32d73c20cd21525caa *R/bridgesampling.R df20072e168e8ad07a436a8a0caff72b *R/brm.R d25b63ab0fe1f9b34638c9333f2c8882 *R/brm_multiple.R 443af9129305de0abc18c66f7f2c3f1f *R/brms-package.R 1cd0eff10441ed72df42fa3d14a8b7a4 *R/brmsfit-class.R d994b0b30bb91337de442c257aba9ebf *R/brmsfit-helpers.R 3d6f40ac34151ea66d3617c751c5bb33 *R/brmsfit-methods.R 15e091ee4ff4e61bddc719442fa94df8 *R/brmsformula.R 3844d3e49b702f7ba77cb2f96c709875 *R/brmsterms.R 56392048b072f9fe0dd2d9edb62d5dca *R/conditional_effects.R dc7c9083b52f28739f24e1dfbb253921 *R/conditional_smooths.R 45eed8c948e4386c62599a212b9107d3 *R/data-helpers.R 38eaf45b65b06f720827f2e841190df4 *R/data-predictor.R 93049af0ef65cc6df0f8712faf61f4f5 *R/data-response.R 04040252b1f71fc2cd3d8909331fb8fd *R/datasets.R bbfb5baa937e6916bf8cf82be845d936 *R/diagnostics.R afd673de00a896fbca4210661d616f39 *R/distributions.R 7e2c859de2fb889755f69e8f2cc3d626 *R/emmeans.R 6c9a03b6ba571e97a00cf77ff81f5962 *R/exclude_pars.R 67b0efaf0e48fc8c40018e823fc03961 *R/exclude_terms.R 0ef268e21b98082fd1a2ac2b9d37d293 *R/families.R 95ba5aa8269b657d0bdaf70d74fdec3f *R/family-lists.R 981b978ffe0509ee04e7f5a7f21201f9 *R/formula-ac.R ff9de2b88d52af63d7856713db638fd5 *R/formula-ad.R bd1e7040e058ce10e3d7a502c7203806 *R/formula-cs.R 4d545c4e4f5375dff2ddb5a8715b0269 *R/formula-gp.R 0e809dfaa53ec872e4af2fef49c99b36 *R/formula-re.R 467070f2e384be47b60ad19b26ed68cf *R/formula-sm.R 859b86dd1993cd1824fa608eb547db78 *R/formula-sp.R 02956f98bd3683709f2324fd8d01398e *R/ggplot-themes.R d9ac9900f47749aad22d7ef477e3e6bf *R/hypothesis.R af0214db1d18ca24b9334746b0194567 *R/kfold.R 153863d26650a38b0d559144a4a5324b *R/launch_shinystan.R d804a22bf8778c9c67b2c5bbaf8190d7 *R/log_lik.R 753a609b7b77523063ae0c7ca874ac3a *R/loo.R 2b4fbf14a35b94bf4468206f1754298e *R/loo_moment_match.R 0d7bcb1ca865e2e6801e2ba082d30092 *R/loo_predict.R 25320fc4efc4008cd76ef74e416b3a6b *R/loo_subsample.R 8945944acd49625df84706c36d74a846 *R/lsp.R d4da3068f030d7280d148c3abac6b2df *R/make_stancode.R be0a6c7dc80bfe2ba4d880aa4cf7c935 *R/make_standata.R 4b2c0dad0377adb197b7ae9af96ceec0 *R/misc.R f80b3e799f356088092433edb15dcdf1 *R/model_weights.R ed20b05ccd8bc2147053501d2aba781c *R/numeric-helpers.R de9c5c1da682f72989398d15c412548b *R/plot.R e16db33e5777be44ad40b12baff947eb *R/posterior.R ffb8dd70704280747bdb20dd4aa569d0 *R/posterior_epred.R 0660e8a54c79add74d07c04851783b5a *R/posterior_predict.R a7b3dc917dbe38cf28f1795fbfcf7543 *R/posterior_samples.R 4f42087dd88fd6a1ef05beec6e50cca6 *R/posterior_smooths.R c3815284b5f7b543241963dc5382ed1d *R/pp_check.R d8728e6fddbc38b7b70e63b8947998de *R/pp_mixture.R 34b1d3584c09d0dde449b5c0924022d3 *R/predictive_error.R bcedc5ad9c57658cdc11a325436d0d93 *R/predictor.R 4f5f18cdff59095157dca16697028bfe *R/prepare_predictions.R 0635ab5e2d6ab26d5565d7281eeb28b6 *R/prior_draws.R ca5f0307036c96ecb2ef154a3014013b *R/priors.R ac45961c52b312ebaa6557cfa538e5c1 *R/projpred.R d2c16068bc19ced0d8ed7ff9a3d6d76b *R/reloo.R 356ed74a0d24d6aef1e9fa232a1848f1 *R/rename_pars.R f441583e81088c5922530b20294ed159 *R/restructure.R e2660f0dd08c0d60a7385285273bdaac *R/stan-helpers.R 9c1ceb793ced5310110705719d08e52f *R/stan-likelihood.R 291eb5219f4c0d6d3e3d23bf2c093df9 *R/stan-predictor.R 58e14a2a071b96cdcbb56dabeb4510c1 *R/stan-prior.R 3f54060f452bb3981d96cf7197f2f7c2 *R/stan-response.R bac24b8db1a16cdb3a37c5548124f55b *R/stanvars.R 6e1b6a618dbe79f04b5baf2fb3666f69 *R/summary.R 420ccccb64e75da22aef6b0b737f3b77 *R/sysdata.rda ad5de9a92ddf98afa31840b83235f8df *R/update.R 72b4fff3d0529ec33b90921c73c8ba4c *R/zzz.R 462a82e116481b0c60abeae875d07b00 *README.md a2d6d7d671d82883c552b8d07a2debd6 *build/vignette.rds 29545093bb6edb0416e8ca2725949829 *data/epilepsy.rda d3e1729f040540ec7d640ce9d9e1c7c4 *data/inhaler.rda b491f9e27065b6a30dfaa06835d4058f *data/kidney.rda 1012588a05c5b7e59dfb16bc57adcf85 *data/loss.rda fb8d31f81acfe1fe17f4b1f58461c77b *inst/CITATION a98064209d48d927ff8881552f449293 *inst/chunks/fun_asym_laplace.stan a6d4286c602fa7c20fa9839153baa5d4 *inst/chunks/fun_cauchit.stan cbde5d7aa82e8f54878e21345d41ed2d *inst/chunks/fun_cholesky_cor_ar1.stan 9b3afe0f8326b9630d14fa5bd738e024 *inst/chunks/fun_cholesky_cor_arma1.stan 4a1dfc8a30ac7db990381493f7d07e48 *inst/chunks/fun_cholesky_cor_cosy.stan 9235311a924a41a116c2b694f780dd1c *inst/chunks/fun_cholesky_cor_ma1.stan 63a090be16d81d49a84a508c92f9a6cf *inst/chunks/fun_cloglog.stan f2649108c1c6e6f03cca829b885fd324 *inst/chunks/fun_com_poisson.stan 45fdfa66aabc3094a505d1e5489c0662 *inst/chunks/fun_cox.stan 4a34f3da7fc169ae7734f345be75b3b5 *inst/chunks/fun_dirichlet_logit.stan ba8f1f8abeb08b6835af41427f66a2e4 *inst/chunks/fun_discrete_weibull.stan edb318ed3e2e42fa3571dc9573c6ede5 *inst/chunks/fun_gaussian_process.stan 724df15202c238ff3399ebb746803589 *inst/chunks/fun_gaussian_process_approx.stan 5caee0b5d6ceea372fd0b1c88755ef15 *inst/chunks/fun_gen_extreme_value.stan d497b9a6f52344c1740e7f935b088b43 *inst/chunks/fun_horseshoe.stan e8ca4770184d9834b264efe4f6d05558 *inst/chunks/fun_hurdle_gamma.stan a15d564f825cf56a1cd705479df440c2 *inst/chunks/fun_hurdle_lognormal.stan bf86eb93141cd8ced1e5f6d6a0a299d5 *inst/chunks/fun_hurdle_negbinomial.stan 3323605321012af39364d3ebc2e627b8 *inst/chunks/fun_hurdle_poisson.stan 410331b0ff5335b7122ce94e82bffaa2 *inst/chunks/fun_inv_gaussian.stan fb9e84df9cbe026e3e3b3e6d106d09dd *inst/chunks/fun_logm1.stan a2f8c721f77b3bb06e00d692830428c6 *inst/chunks/fun_monotonic.stan b2ba2403a480200aabb229f38e7ccfeb *inst/chunks/fun_multinomial_logit.stan b568981b75f509ccf32174fb0397ac0d *inst/chunks/fun_normal_errorsar.stan cab255115314672cfb18dac3dcfa0c69 *inst/chunks/fun_normal_fcor.stan b71db1f08c41b1275af65f0206d2bd1b *inst/chunks/fun_normal_lagsar.stan 4db656e838f57bb02f4b154dcf65b5d8 *inst/chunks/fun_normal_time.stan e9141f8f3e7798b949bd19cbb1eaf886 *inst/chunks/fun_r2d2.stan 0cfe72ce347d58328ac8391f94490200 *inst/chunks/fun_scale_r_cor.stan c08ea259483ece76b4b961d0c638fb81 *inst/chunks/fun_scale_r_cor_by.stan d99c8984423de9c89877f8dbb45dcec7 *inst/chunks/fun_scale_r_cor_by_cov.stan 9e0d32d5f353ff615247f36f23a58dfd *inst/chunks/fun_scale_r_cor_cov.stan b65877f1450a1d4266c303568189c0d6 *inst/chunks/fun_scale_time_err.stan 8092f198b21aaa36f63b6f603f411fe3 *inst/chunks/fun_scale_xi.stan 81adee40c17436420879e1a48465d863 *inst/chunks/fun_sequence.stan a68c834d3a8bf2bb1b505e9539f1e4ad *inst/chunks/fun_softplus.stan 0dd10f85ea66787ff49092ac4c0d6507 *inst/chunks/fun_sparse_car_lpdf.stan 6d69314af266429e46007a636896d8a1 *inst/chunks/fun_sparse_icar_lpdf.stan 848167122601e075f21ffb03ac34cbc8 *inst/chunks/fun_squareplus.stan d8b7685f83c599c957909837939ede60 *inst/chunks/fun_student_t_errorsar.stan 8e62e4d6ef29142a37407d35e152561a *inst/chunks/fun_student_t_fcor.stan 4de7706d06b43c637cfa10a17a96b02a *inst/chunks/fun_student_t_lagsar.stan c7d278e8ae6529f39ed2d786d4fc78ec *inst/chunks/fun_student_t_time.stan 15e7bba16664915bcb682a58b012d5d4 *inst/chunks/fun_tan_half.stan 2396fcafffb41122f71465f7306a8cd0 *inst/chunks/fun_von_mises.stan 8f0e2fd17a82e2489aff8195b924e156 *inst/chunks/fun_which_range.stan 28240b527b6638f5f7f1c52a03c45dcd *inst/chunks/fun_wiener_diffusion.stan bcacbeab70fc5141db21c788cc424629 *inst/chunks/fun_zero_inflated_asym_laplace.stan 9ea0b8a6d1a6bf5cb00f8835fa4b14ad *inst/chunks/fun_zero_inflated_beta.stan fc249bf75c51cd2f9525ef1943189104 *inst/chunks/fun_zero_inflated_binomial.stan 221508c5adb43ccb0bdb95db0e68c5ea *inst/chunks/fun_zero_inflated_negbinomial.stan 515585bed72e8196eff714ed5270b9b8 *inst/chunks/fun_zero_inflated_poisson.stan 0642c30fcb52c745abeaba0c8767ac74 *inst/chunks/fun_zero_one_inflated_beta.stan 779220fdc3753a6a18eb5639d71f61bf *inst/doc/brms_customfamilies.R 4d677b7dd6922df9e3e0399af379efb2 *inst/doc/brms_customfamilies.Rmd 983d76b7cc32aef240f121438c84f699 *inst/doc/brms_customfamilies.html eed4bcadd0394d075bacb37295c4ebe0 *inst/doc/brms_distreg.R 8c3bfb376c70c42e179053f9c1e9c565 *inst/doc/brms_distreg.Rmd 8f32f4b704ec166e37037740cdf558bf *inst/doc/brms_distreg.html 24d2a0c1b21e81268a3fe22c085d5da1 *inst/doc/brms_families.Rmd d405822faf7eb1ecb67702de3aefa91d *inst/doc/brms_families.html 7628123a2cc4c1b2ca73f628a26ca77a *inst/doc/brms_missings.R 0cab824b02268ac6e3f5deabba37f5e5 *inst/doc/brms_missings.Rmd a6a0221056824539f92655f71b762182 *inst/doc/brms_missings.html 7d0b64f17e6b692926122c5460c490cf *inst/doc/brms_monotonic.R 3df4781dcaace7113283264da54ffee1 *inst/doc/brms_monotonic.Rmd c65d11b4bd896301cf27cd2b5995c77a *inst/doc/brms_monotonic.html 936e753dc84aadef50c46ba07f417011 *inst/doc/brms_multilevel.ltx 7dee615777073a6c39a997e3b26d8bff *inst/doc/brms_multilevel.pdf 35abd548882af0b4fdd703dc4a8b4f71 *inst/doc/brms_multivariate.R 87e35e5a77cc5e5c27ded17031c52b51 *inst/doc/brms_multivariate.Rmd bb21604ae8428eb675cf32fa2e2f13df *inst/doc/brms_multivariate.html b30140a0fa20028ac2cee2688e6a0879 *inst/doc/brms_nonlinear.R 22819f4acf34913fb947beb0e8799f8b *inst/doc/brms_nonlinear.Rmd 0d1ffdde8625ee9f5486cc5a2560486b *inst/doc/brms_nonlinear.html 0f61eb8dc9644314ad28512597696f8d *inst/doc/brms_overview.ltx 13030fd8b37c8a63829b29102ffa8c9c *inst/doc/brms_overview.pdf 4c7c66726a89ed9a459865813f529644 *inst/doc/brms_phylogenetics.R f2f5d8f580423d498e25dc5dfcb19c26 *inst/doc/brms_phylogenetics.Rmd f36983c24160672e4386bf94328a33ab *inst/doc/brms_phylogenetics.html f0e2a1f3865b29f13ca1c5ba3f27876f *inst/doc/brms_threading.R feed5725c51eb4429b5b556eda4461ac *inst/doc/brms_threading.Rmd 43e2dd9df7f4c780c7bd64f08b1b9b79 *inst/doc/brms_threading.html 4a3de0051a14f5f69e19dada8405b0f7 *man/AsymLaplace.Rd 7b242d7d3cce6d6cbd46950408fcbadc *man/Dirichlet.Rd 121353956d2df4b5f9d25361633c7190 *man/ExGaussian.Rd 36719474a53f764f94fd68b4e9ed0b52 *man/Frechet.Rd 79fe34f3bb3ae49f7f53e0785eb97827 *man/GenExtremeValue.Rd 05e149444009fbc46f54ad56fd5b6b99 *man/Hurdle.Rd 767641821e1bbb405cc53324b9911f51 *man/InvGaussian.Rd 533ab218661f21ea11d0e0c70217bf98 *man/MultiNormal.Rd 4fc00d787af2c694636394ed0919beb1 *man/MultiStudentT.Rd bb3b5e65380032f0652038da09ac5f0a *man/R2D2.Rd d211519a613d023bb0c2151ad2c5ed67 *man/Shifted_Lognormal.Rd 1b23d60c6bd2f5d277ee6d1e62076c2d *man/SkewNormal.Rd d8dacbc59a716219a5dcc30053770e19 *man/StudentT.Rd 088c579de00b1050488bad974a1e7b73 *man/VarCorr.brmsfit.Rd 2cbe623bd331e6c97cb2c7b28255bc90 *man/VonMises.Rd 9a699aeb450133c5e1521c5a086a9009 *man/Wiener.Rd ed8c35c02f5b87d827d37f01f012faec *man/ZeroInflated.Rd 218920f5f075b30d394c68eed106995b *man/add_criterion.Rd 31fe0fc7b7f834611058e3408c3a098c *man/add_ic.Rd 0ea163e1aef3085d0b180d556bd76f9a *man/add_rstan_model.Rd fc3dbb35478fed3ff4170add64a93c14 *man/addition-terms.Rd 6fe15df008f29b223b16001c9424fabf *man/ar.Rd 7027516bd6d8f25fb0c954f6abe2ae19 *man/arma.Rd 1c18b8f665f31dae8dd554dbb7caa0cb *man/as.data.frame.brmsfit.Rd 6e6563cabe857e465b1ad20657fc71b5 *man/as.mcmc.brmsfit.Rd 4bc01134553482f3ac35f88335d9aa6b *man/autocor-terms.Rd c0ea56b4c8ae8db9362e666ba1f9804d *man/autocor.brmsfit.Rd 02e78c08c2d00248f1f3f0fa8ba9b645 *man/bayes_R2.brmsfit.Rd 79ed99cf0a1d4ab95be5e7d00ce178c0 *man/bayes_factor.brmsfit.Rd 6728cdb5f4bf8ffa5b2defcbe7f48fe3 *man/bridge_sampler.brmsfit.Rd dccf8706a59211512677655c79053215 *man/brm.Rd b4f65c7279610c5737107d7b965640d7 *man/brm_multiple.Rd 4969bda8630e12faa0f114850f7b8916 *man/brms-package.Rd 0da70dabc2dc49f9d89fef595408c5a5 *man/brmsfamily.Rd 7d1904f46951d357ad9da065b5029c01 *man/brmsfit-class.Rd 4d13dcfe58aa0f5f664214210bc28a1c *man/brmsfit_needs_refit.Rd d70e3a8bd58f14f1764a4320f0765390 *man/brmsformula-helpers.Rd 747930f869d7ff7b95015f7345642c9c *man/brmsformula.Rd b01a01790db33ac14c5ff3f928d7d77c *man/brmshypothesis.Rd 6c1b52813e2f6258fba2d029731d748e *man/brmsterms.Rd ab71ad4238d22d820f5c933b83631643 *man/car.Rd 2da899352b093ca01c88f062862e85c1 *man/coef.brmsfit.Rd 14455c636110902592e2656ff880e6c9 *man/combine_models.Rd 536475ee3189c2b8c98a0b7f700df36a *man/compare_ic.Rd b62051df80a1e1795ef8cf3d5ed615bc *man/conditional_effects.brmsfit.Rd aaaeaa11cbe3b491e6fc2cfb81971574 *man/conditional_smooths.brmsfit.Rd 8b9ba946bd8ab62f623a359a358e2dcc *man/control_params.Rd 880c203a381108beade110e0ffb81334 *man/cor_ar.Rd d6cc2c55c8b8b48283fb1192af853c23 *man/cor_arma.Rd caa14430a3471aedf961ff6d1b8998ee *man/cor_arr.Rd ff54b0010b804d14a524cf09fbecbd6b *man/cor_brms.Rd dc75585b61a68ce01134e261dd71237a *man/cor_bsts.Rd c4d870ef90f10d22ff2fec44a2c99e17 *man/cor_car.Rd 2434a8432103e4e4e8961e603f292a2b *man/cor_cosy.Rd b4c36f5603dc02f95cdf5ef86f2776f1 *man/cor_fixed.Rd a3380ce7925c2f16b205a45026b6706e *man/cor_ma.Rd c98f6396cdbf60f9785f5064d91ed197 *man/cor_sar.Rd e47db19e7cb6f87d747baa6bae399ce7 *man/cosy.Rd a6f0fe02a8f0382eb7a2dc4775a4310e *man/cs.Rd 41955bdfbfd4a04054383ce54442d933 *man/custom_family.Rd 81be337f5ddeccc02a7fb4aefd143a2d *man/data_predictor.Rd 5e686d854680185c1635bc68f8471ab8 *man/data_response.Rd 7942584b6937f1af1f6b9afa276a6b9a *man/density_ratio.Rd 94e3a313300ecce5790bc2e2432c1ad6 *man/diagnostic-quantities.Rd 9b09daff808d0683d07b5fddb5346af8 *man/do_call.Rd b8a2223a54ca9db2a3e4580c8b04ff04 *man/draws-brms.Rd 451242e3949307bb05e1260d467a82c0 *man/draws-index-brms.Rd 4dba716fde6dbe88540f5854ee1a5eeb *man/emmeans-brms-helpers.Rd 260cac52df95e5768cce2f2ecfe34574 *man/epilepsy.Rd a3fe80bdbe805b8e2ddbc50749bd83ac *man/expose_functions.brmsfit.Rd b39f81a07a666c3c2e6e5f65091dbef1 *man/expp1.Rd e43a00bc97ecaa16b25b0832b1ef6327 *man/family.brmsfit.Rd cdacff18293c3ba78788f0d9dede7d0a *man/fcor.Rd 8939fce14ae807a154ff4ab5f714fea5 *man/figures/README-conditional_effects-1.png 3f784b7850206ac837f1e618890f285e *man/figures/README-plot-1.png 5a143f562f85cfa3980dbfc8a1187fa3 *man/figures/brms.png 5fad10a5cc62c59ea429a5ce90191e2e *man/figures/stanlogo.png dc2fae5a86aa597d0ae8c72e294ada89 *man/fitted.brmsfit.Rd 018ec8aa5d191411d6f9ddf4134507fc *man/fixef.brmsfit.Rd 366deef67bb007692950ec06f763015d *man/get_dpar.Rd cfe6b43c52168bc38288038ceda682f9 *man/get_prior.Rd f00714ffd523d51420c9b14da69d7c1e *man/get_refmodel.brmsfit.Rd 50888fb89fefc612cc8abd3f70e3b3bb *man/get_y.Rd 812c9803ee5f7a9823a5d999e6e2430a *man/gp.Rd 6a7032b9ae8389b29467b608c0c1eb2a *man/gr.Rd db02bdd57db9c1a3ae91667c03ed44b0 *man/horseshoe.Rd 0ebac0c6b66c6d6eb33e70a91ea65f27 *man/hypothesis.brmsfit.Rd de24adad365b036d52b47d3c1b2eb92a *man/inhaler.Rd 91bc090feda4bd1d3905237cb210afc0 *man/inv_logit_scaled.Rd b415b243f7778b800ab56569baa3044b *man/is.brmsfit.Rd a26dcbb0bfe6a152171450ca5de77fbc *man/is.brmsfit_multiple.Rd 837292b068f84e6ee06a2b0e2fbfcefe *man/is.brmsformula.Rd 15ff1c006fba2a58b1025d39bbde6362 *man/is.brmsprior.Rd cb6a884428dcb9a410dddb78d10d089e *man/is.brmsterms.Rd c4a2c5a027c141ffb46a5ed4748b15ef *man/is.cor_brms.Rd 4a381927c9781360cb7195b44acc15d2 *man/is.mvbrmsformula.Rd 4febd60005469f36a36c289a09846012 *man/is.mvbrmsterms.Rd aa38b096fe5437b4bd7fd99538c02af0 *man/kfold.brmsfit.Rd e856ad22ddf57c7f3f0f2f3723833d49 *man/kfold_predict.Rd 82e4963735217e608c0ddb15f8fb604e *man/kidney.Rd 82c676486e6ad3c7863ed663c86d9696 *man/lasso.Rd 56928865274d3060603d5824d5317bb3 *man/launch_shinystan.brmsfit.Rd 3a5b8ff06599d70e3b79b8556d155d84 *man/log_lik.brmsfit.Rd eb7887eb9acb7e80778fcdf9af5da719 *man/logit_scaled.Rd 51678017231f5c106b401e074ba5a193 *man/logm1.Rd d64f2389dffd8fcc4e6872a16829d974 *man/loo.brmsfit.Rd 23871e6a7d1d406eefa37fff2abc24bf *man/loo_R2.brmsfit.Rd d0d4babf60634c59a4c3d8640d7fa885 *man/loo_compare.brmsfit.Rd 1239953226f2c0592844778d7300ccaa *man/loo_model_weights.brmsfit.Rd 96f0d385c72d7a0e2a3075a63d5dd2f8 *man/loo_moment_match.brmsfit.Rd 76be9b908e2d55264faa60911ec46a02 *man/loo_predict.brmsfit.Rd d23a7ced444c3818e6e23b29a55c17e9 *man/loo_subsample.brmsfit.Rd 8ec836de39a59363e0119f35bd341595 *man/loss.Rd 71cc49ced14a40d70f962b51ff9944f9 *man/ma.Rd 8a9b655c43bc0a74f9e765c1017a2a5d *man/make_conditions.Rd 4f8919e26fd4c7a810f0000a4ec2f0b2 *man/make_stancode.Rd ee3c10058dde48d3bdfb20d5f6348859 *man/make_standata.Rd 850bb658c7596783c14411c7c9c5ee11 *man/mcmc_plot.brmsfit.Rd b4bf1080b3cdc7eb695c4e4b887cabc1 *man/me.Rd e4027c05a4b0bf191d092b3f4a0d4abf *man/mi.Rd 886ffd80fe4941576f384f684cfe963c *man/mixture.Rd 606c37c63a40a14c1e27b50b3951c4fc *man/mm.Rd 9c3e263487793e900df3cc059e7c6781 *man/mmc.Rd 267c77b9f6e81d542a2dd47f101206da *man/mo.Rd d1a95180c86b8c393c0f559c2dcba2e0 *man/model_weights.brmsfit.Rd 41d271b33d265ac55dce75c385d429ca *man/mvbind.Rd bd746d483683608f2bebcd60aabfc81d *man/mvbrmsformula.Rd 1583894f185a4696a33136eb6fc53933 *man/ngrps.brmsfit.Rd b7a41a067626f4f61a9a71e0e88e59ce *man/nsamples.brmsfit.Rd 1cc9d19228c25f480dffa71d284667c0 *man/opencl.Rd 16736f0a0586f135f9d51619faabdb94 *man/pairs.brmsfit.Rd 42d330926da9cc7674ec17cf4c9bb2a2 *man/parnames.Rd d88af5b63e6a517482d6e60169a26e48 *man/plot.brmsfit.Rd 7f5ccb3bfbfac60babf3cf1bf51c5886 *man/post_prob.brmsfit.Rd 7c50b075b1e569f433f183868a051eb6 *man/posterior_average.brmsfit.Rd 7d0e0e8203d5759275312d1e0a4ebd9b *man/posterior_epred.brmsfit.Rd deb75862fb990c7135e3259e86030180 *man/posterior_interval.brmsfit.Rd 452e05eb1528662890627740ebfb0cd4 *man/posterior_linpred.brmsfit.Rd 4159287ae87cf65f7982f3d9393a76b5 *man/posterior_predict.brmsfit.Rd 7800cfc79113307cfd5cc22cd8c739e0 *man/posterior_samples.brmsfit.Rd efe0e5738561c35822887e859df64b2d *man/posterior_smooths.brmsfit.Rd 022ca488c128b992c7fc76d479fe9073 *man/posterior_summary.Rd 2ee832b41159af33d03890fa5b7c6cf4 *man/posterior_table.Rd 1b68e9cda8816d4b1aa38d4cd2104e02 *man/pp_average.brmsfit.Rd c9dadec75846594d42aca55844181ea7 *man/pp_check.brmsfit.Rd b88bb2ebfa834eae1383a5679036d153 *man/pp_mixture.brmsfit.Rd 7b69307e3df329a311d55b54b7c69249 *man/predict.brmsfit.Rd 5ada0695547e6fb4af63b4377e04570f *man/predictive_error.brmsfit.Rd bcfe80237350dc8b6135cb250675a098 *man/predictive_interval.brmsfit.Rd 05611f229f1c415d31ba54b25f1b52a6 *man/prepare_predictions.Rd f7528522dcbe19c7c6c3e7e81e5b62f9 *man/print.brmsfit.Rd 4695332de883a74adb3b6eeb4643ea0f *man/print.brmsprior.Rd 856389bd392df08e848205b6b1aa6838 *man/prior_draws.brmsfit.Rd 36b7a42f879f2f7441f8d756982b6c70 *man/prior_summary.brmsfit.Rd 32b4b5eaff00acdc78309962af225c23 *man/ranef.brmsfit.Rd 2d049bf19ee1db3b1e00b044c41f3e3d *man/recompile_model.Rd 55d374187b74c685fed3c66896b857eb *man/reloo.brmsfit.Rd b86737ad5ddcba0ec6cfb583aed0fe0d *man/rename_pars.Rd 010a73449fb09ec51a3f87beddb1fb3b *man/residuals.brmsfit.Rd de90a86da8cba2eda87b5a85c2e89ae2 *man/restructure.Rd d35464624f86fe2ea5104984d086a00b *man/rows2labels.Rd 796cfda792081bd0bc1103f5369f86f9 *man/s.Rd b38f0ff9d48fcfedba9b27c63e9ce208 *man/sar.Rd 3cf345c1e14b4f73434fbf9da0fb065d *man/save_pars.Rd 343680160a44641cd39f9c85b18dce01 *man/set_prior.Rd b40a9ca551e3d4fe1576ef70192cea72 *man/stancode.brmsfit.Rd 13983425ed88a94741eaf4d7dbf5c0e4 *man/standata.brmsfit.Rd 282dc8e772911a4c7f3b35cc33dacfb5 *man/stanvar.Rd 15f9e57d2d16f5cdeaa11efc82f3a6b3 *man/summary.brmsfit.Rd 62e4e69322483d22e6b0a9d0cab134d9 *man/theme_black.Rd 3fdb7c7e7f37555d794f238b234f1bad *man/theme_default.Rd 66f01dc325acb775d6908a67eeea0361 *man/threading.Rd 36d9a1069e4f39dc8d8bf0757cfa2c73 *man/update.brmsfit.Rd 862d7f27b7c04a6ffbb31b00481de092 *man/update.brmsfit_multiple.Rd d4329014c6586f1d939c80df9105286d *man/update_adterms.Rd 70e25beb0ce7b0519f5a0d3f5817ed43 *man/validate_newdata.Rd 9cd8a6ec5bb3c5e1a6dca4bbf49cabb6 *man/validate_prior.Rd 3054742849165aa4111da1cb17c19890 *man/vcov.brmsfit.Rd 70564348d5313cd557d61f42d0fd20f1 *man/waic.brmsfit.Rd 2b545ab36b38afc18e158956ff3071a7 *tests/testthat.R 9803edc3d25a061c517d81f2328f8e92 *tests/testthat/helpers/insert_refcat_ch.R b78e4255c898e45171bd384dd27b411f *tests/testthat/helpers/inv_link_categorical_ch.R 6bd67088a605e026b11dabfa30056c85 *tests/testthat/helpers/inv_link_ordinal_ch.R 7dbd7900503f0241de4530b72d0d509e *tests/testthat/helpers/link_categorical_ch.R 576c93f792337e0412b236036236450f *tests/testthat/helpers/link_ordinal_ch.R c84470b942721db6758db28cd98c88cf *tests/testthat/helpers/simopts_catlike.R a68beb7b3fffcd6d0b06ee28ddae203a *tests/testthat/helpers/simopts_catlike_oneobs.R d5b5fa484306406e3d79209c8e88ade2 *tests/testthat/tests.brm.R cf177191d175048df8f28e993f221684 *tests/testthat/tests.brmsfit-helpers.R 26f7d5c0b64418aa905ae5e25b23c1d5 *tests/testthat/tests.brmsfit-methods.R 6619c7458c57e3bfd6102dac65b79a8f *tests/testthat/tests.brmsformula.R 5dfbad44314e36823894ee6bc8932ac5 *tests/testthat/tests.brmsterms.R e859d58ccbd22effe1e5a8907694d607 *tests/testthat/tests.data-helpers.R 8609d28ca2c927f1d22c3dbd2a5ef6ee *tests/testthat/tests.distributions.R cbfef132f01754f96d43dd9e370113ac *tests/testthat/tests.emmeans.R b20e3dcd3d97d64acc0c36daea982b70 *tests/testthat/tests.exclude_pars.R 469e1b181b2bef194a6c9feef7675c15 *tests/testthat/tests.families.R e8f00b5c1fd92182c1472d71aa5130ec *tests/testthat/tests.log_lik.R 1fa7eae9c9ca7d64b3294228b9142f7c *tests/testthat/tests.make_stancode.R c99c6fde8ad4ac47c6d703106aae1350 *tests/testthat/tests.make_standata.R 01549ac202dd07e8a3c51e2f8ff2eefc *tests/testthat/tests.misc.R bbb1bacf702e516f9bb02fbf1e3ff295 *tests/testthat/tests.posterior_epred.R 65b2c02a257a2c2117569a4aa80ea5a7 *tests/testthat/tests.posterior_predict.R 871c468b74f1f908b8a07f714da4bbdf *tests/testthat/tests.priors.R 0722edbe0eb05a3ceb7abbb42d80be04 *tests/testthat/tests.rename_pars.R 741f832512f242551c4b9807adbebe79 *tests/testthat/tests.restructure.R 0c91c32577cbc8c185fe001848becbe1 *tests/testthat/tests.stan_functions.R 4d677b7dd6922df9e3e0399af379efb2 *vignettes/brms_customfamilies.Rmd 8c3bfb376c70c42e179053f9c1e9c565 *vignettes/brms_distreg.Rmd 24d2a0c1b21e81268a3fe22c085d5da1 *vignettes/brms_families.Rmd 0cab824b02268ac6e3f5deabba37f5e5 *vignettes/brms_missings.Rmd 3df4781dcaace7113283264da54ffee1 *vignettes/brms_monotonic.Rmd 936e753dc84aadef50c46ba07f417011 *vignettes/brms_multilevel.ltx 87e35e5a77cc5e5c27ded17031c52b51 *vignettes/brms_multivariate.Rmd 22819f4acf34913fb947beb0e8799f8b *vignettes/brms_nonlinear.Rmd 0f61eb8dc9644314ad28512597696f8d *vignettes/brms_overview.ltx f2f5d8f580423d498e25dc5dfcb19c26 *vignettes/brms_phylogenetics.Rmd feed5725c51eb4429b5b556eda4461ac *vignettes/brms_threading.Rmd d03511b5b5f0034e974205b90a81e499 *vignettes/citations_multilevel.bib 07ac5ec3d888046289de19638ab18a45 *vignettes/citations_overview.bib 1e02697a37e36908b7d8954bfaea2e92 *vignettes/flowchart.pdf 598082534ce6cb51d34c01a69dda5088 *vignettes/inhaler_plot.pdf d7d237f55a6850eba15ad5ceeaf821f6 *vignettes/kidney_conditional_effects.pdf 7632f1034a93aa91cd5d27f3430419f7 *vignettes/kidney_plot.pdf 130d165d8715c0e39e51dac5a843d50a *vignettes/me_loss1.pdf 2c51e8bc0ba3986d8e445b445943473c *vignettes/me_loss1_year.pdf 70c11e0b4eb944016ef306a402fce2c4 *vignettes/me_rent1.pdf beff1ce999b4bd7244ecbe2b6e887c9a *vignettes/me_rent2.pdf 8d6a4a639492d0ac1e71bbf25b93fa03 *vignettes/me_rent3.pdf 5b56487f6dc0b92bfe7894ba09264971 *vignettes/me_zinb1.pdf 1fe96ffc00b75a46155b60f534625f43 *vignettes/ppc_mm1.pdf brms/NEWS.md0000644000175000017500000023520114146734665012470 0ustar nileshnilesh# brms 2.16.3 ### Other changes * Move `projpred` from `Imports:` to `Suggests:`. This has the important implication that users need to load or attach `projpred` themselves if they want to use it (the more common case is probably attaching, which is achieved by `library(projpred)`). (#1222) ### Bug Fixes * Ensure that argument `overwrite` in `add_criterion` is working as intended thanks to Ruben Arslan. (#1219) * Fix a bug in `get_refmodel.brmsfit()` (i.e., when using `projpred` for a `"brmsfit"`) causing offsets not to be recognized. (#1220) * Several further minor bug fixes. # brms 2.16.1 ### Bug Fixes * Fix a bug causing problems during post-processing of models fitted with older versions of brms and the `cmdstanr` backend thanks to Riccardo Fusaroli. (#1218) # brms 2.16.0 ### New Features * Support several methods of the `posterior` package. (#1204) * Substantially extend compatibility of `brms` models with `emmeans` thanks to Mattan S. Ben-Shachar. (#907, #1134) * Combine missing value (`mi`) terms with `subset` addition terms. (#1063) * Expose function `get_dpar` for use in the post-processing of custom families thank to Martin Modrak. (#1131) * Support the `squareplus` link function in all families and distributional parameters that also allow for the `log` link function. * Add argument `incl_thres` to `posterior_linpred.brmsfit()` allowing to subtract the threshold-excluding linear predictor from the thresholds in case of an ordinal family. (#1137) * Add a `"mock"` backend option to facilitate testing thanks to Martin Modrak. (#1116) * Add option `file_refit = "always"` to always overwrite models stored via the `file` argument. (#1151) * Initial GPU support via OpenCL thanks to the help Rok Češnovar. (#1166) * Support argument `robust` in method `hypothesis`. (#1170) * Vectorize the Stan code of custom likelihoods via argument `loop` of `custom_family`. (#1084) * Experimentally allow category specific effects for ordinal `cumulative` models. (#1060) * Regenerate Stan code of an existing model via argument `regenerate` of method `stancode`. * Support `expose_functions` for models fitted with the `cmdstanr` backend thanks to Sebastian Weber. (#1176) * Support `log_prob` and related functionality in models fitted with the `cmdstanr` backend via function `add_rstan_model`. (#1184) ### Other Changes * Remove use of `cbind` to express multivariate models after over two years of deprecation (please use `mvbind` instead). * Method `posterior_linpred(transform = TRUE)` is now equal to `posterior_epred(dpar = "mu")` and no longer deprecated. * Refactor and extend internal post-processing functions for ordinal and categorical models thanks to Frank Weber. (#1159) * Ignore `NA` values in interval censored boundaries as long as they are unused. (#1070) * Take offsets into account when deriving default priors for overall intercept parameters. (#923) * Soft deprecate measurement error (`me`) terms in favor of the more general and consistent missing value (`mi`) terms. (#698) ### Bug Fixes * Fix an issue in the post-processing of non-normal ARMA models thanks to Thomas Buehrens. (#1149) * Fix an issue with default baseline hazard knots in `cox` models thanks to Malcolm Gillies. (#1143) * Fix a bug in non-linear models caused by accidental merging of operators in the non-linear formula thanks to Fernando Miguez. (#1142) * Correctly trigger a refit for `file_refit = "on_change"` if factor level names have changed thanks to Martin Modrak. (#1128) * Validate factors in `validate_newdata` even when they are simultaneously used as predictors and grouping variables thanks to Martin Modrak. (#1141) * Fix a bug in the Stan code generation of threaded mixture models with predicted mixture probabilities thanks to Riccardo Fusaroli. (#1150) * Remove duplicated Stan code related to the `horseshoe` prior thanks to Max Joseph. (#1167) * Fix an issue in the post-processing of non-looped non-linear parameters thanks to Sebastian Weber. * Fix an issue in the Stan code of threaded non-looped non-linear models thanks to Sebastian Weber. (#1175) * Fix problems in the post-processing of multivariate meta-analytic models that could lead to incorrect handling of known standard errors. # brms 2.15.0 ### New Features * Turn off normalization in the Stan model via argument `normalize`. to increase sampling efficiency thanks to Andrew Johnson. (#1017, #1053) * Enable `posterior_predict` for truncated continuous models even if the required CDF or quantile functions are unavailable. * Update and export `validate_prior` to validate priors supplied by the user. * Add support for within-chain threading with `rstan (Stan >= 2.25)` backend. * Apply the R2-D2 shrinkage prior to population-level coefficients via function `R2D2` to be used in `set_prior`. * Extend support for `arma` correlation structures in non-normal families. * Extend scope of variables passed via `data2` for use in the evaluation of most model terms. * Refit models previously stored on disc only when necessary thanks to Martin Modrak. The behavior can be controlled via `file_refit`. (#1058) * Allow for a finer tuning of informational messages printed in `brm` via the `silent` argument. (#1076) * Allow `stanvars` to alter distributional parameters. (#1061) * Allow `stanvars` to be used inside threaded likelihoods. (#1111) ### Other Changes * Improve numerical stability of ordinal sequential models (families `sratio` and `cratio`) thanks to Andrew Johnson. (#1087) ### Bug Fixes * Allow fitting `multinomial` models with the `cmdstanr` backend thanks to Andrew Johnson. (#1033) * Allow user-defined Stan functions in threaded models. (#1034) * Allow usage of the `:` operator in autocorrelation terms. * Fix Stan code generation when specifying coefficient-level priors on spline terms. * Fix numerical issues occurring in edge cases during post-processing of Gaussian processes thanks to Marta Kołczyńska. * Fix an error during post-processing of new levels in multi-membership terms thanks to Guilherme Mohor. * Fix a bug in the Stan code of threaded `wiener` drift diffusion models thanks to the GitHub user yanivabir. (#1085) * Fix a bug in the threaded Stan code for GPs with categorical `by` variables thanks to Reece Willoughby. (#1081) * Fix a bug in the threaded Stan code when using QR decomposition thanks to Steve Bronder. (#1086) * Include offsets in `emmeans` related methods thanks to Russell V. Lenth. (#1096) # brms 2.14.4 ### New Features * Support `projpred` version 2.0 for variable selection in generalized linear and additive multilevel models thanks to Alejandro Catalina. * Support `by` variables in multi-membership terms. * Use Bayesian bootstrap in `loo_R2`. ### Bug Fixes * Allow non-linear terms in threaded models. * Allow multi-membership terms in threaded models. * Allow `se` addition terms in threaded models. * Allow `categorical` families in threaded models. * Fix updating of parameters in `loo_moment_match`. * Fix facet labels in `conditional_effects` thanks to Isaac Petersen. (#1014) # brms 2.14.0 ### New Features * Experimentally support within-chain parallelization via `reduce_sum` using argument `threads` in `brm` thanks to Sebastian Weber. (#892) * Add algorithm `fixed_param` to sample from fixed parameter values. (#973) * No longer remove `NA` values in `data` if there are unused because of the `subset` addition argument. (#895) * Combine `by` variables and within-group correlation matrices in group-level terms. (#674) * Add argument `robust` to the `summary` method. (#976) * Parallelize evaluation of the `posterior_predict` and `log_lik` methods via argument `cores`. (#819) * Compute effective number of parameters in `kfold`. * Show prior sources and vectorization in the `print` output of `brmsprior` objects. (#761) * Store unused variables in the model's data frame via argument `unused` of function `brmsformula`. * Support posterior mean predictions in `emmeans` via `dpar = "mean"` thanks to Russell V. Lenth. (#993) * Improve control of which parameters should be saved via function `save_pars` and corresponding argument in `brm`. (#746) * Add method `posterior_smooths` to computing predictions of individual smooth terms. (#738) * Allow to display grouping variables in `conditional_effects` using the `effects` argument. (#1012) ### Other Changes * Improve sampling efficiency for a lot of models by using Stan's GLM-primitives even in non-GLM cases. (#984) * Improve sampling efficiency of multilevel models with within-group covariances thanks to David Westergaard. (#977) * Deprecate argument `probs` in the `conditional_effects` method in favor of argument `prob`. ### Bug Fixes * Fix a problem in `pp_check` inducing wronger observation orders in time series models thanks to Fiona Seaton. (#1007) * Fix multiple problems with `loo_moment_match` that prevented it from working for some more complex models. # brms 2.13.5 ### New Features * Support the Cox proportional hazards model for time-to-event data via family `cox`. (#230, #962) * Support method `loo_moment_match`, which can be used to update a `loo` object when Pareto k estimates are large. ### Other Changes * Improve the prediction behavior in post-processing methods when sampling new levels of grouping factors via `sample_new_levels = "uncertainty"`. (#956) ### Bug Fixes * Fix minor problems with MKL on CRAN. # brms 2.13.3 ### New Features * Fix shape parameters across multiple monotonic terms via argument `id` in function `mo` to ensure conditionally monotonic effects. (#924) * Support package `rtdists` as additional backend of `wiener` distribution functions thanks to the help of Henrik Singmann. (#385) ### Bug Fixes * Fix generated Stan Code of models with improper global priors and `constant` priors on some coefficients thanks to Frank Weber. (#919) * Fix a bug in `conditional_effects` occurring for categorical models with matrix predictors thanks to Jamie Cranston. (#933) ### Other Changes * Adjust behavior of the `rate` addition term so that it also affects the `shape` parameter in `negbinomial` models thanks to Edward Abraham. (#915) * Adjust the default inverse-gamma prior on length-scale parameters of Gaussian processes to be less extreme in edge cases thanks to Topi Paananen. # brms 2.13.0 ### New Features * Constrain ordinal thresholds to sum to zero via argument `threshold` in ordinal family functions thanks to the help of Marta Kołczyńska. * Support `posterior_linpred` as method in `conditional_effects`. * Use `std_normal` in the Stan code for improved efficiency. * Add arguments `cor`, `id`, and `cov` to the functions `gr` and `mm` for easy specification of group-level correlation structures. * Improve workflow to feed back brms-created models which were fitted somewhere else back into brms. (#745) * Improve argument `int_conditions` in `conditional_effects` to work for all predictors not just interactions. * Support multiple imputation of data passed via `data2` in `brm_multiple`. (#886) * Fully support the `emmeans` package thanks to the help of Russell V. Lenth. (#418) * Control the within-block position of Stan code added via `stanvar` using the `position` argument. ### Bug Fixes * Fix issue in Stan code of models with multiple `me` terms thanks to Chris Chatham. (#855, #856) * Fix scaling problems in the estimation of ordinal models with multiple threshold vectors thanks to Marta Kołczyńska and Rok Češnovar. * Allow usage of `std_normal` in `set_prior` thanks to Ben Goodrich. (#867) * Fix Stan code of distributional models with `weibull`, `frechet`, or `inverse.gaussian` families thanks to Brian Huey and Jack Caster. (#879) * Fix Stan code of models which are truncated and weighted at the same time thanks to Michael Thompson. (#884) * Fix Stan code of multivariate models with custom families and data variables passed to the likelihood thanks to Raoul Wolf. (#906) ### Other Changes * Reduce minimal scale of several default priors from 10 to 2.5. The resulting priors should remain weakly informative. * Automatically group observations in `gp` for increased efficiency. * Rename `parse_bf` to `brmsterms` and deprecate the former function. * Rename `extract_draws` to `prepare_predictions` and deprecate the former function. * Deprecate using a model-dependent `rescor` default. * Deprecate argument `cov_ranef` in `brm` and related functions. * Improve several internal interfaces. This should not have any user-visible changes. * Simplify the parameterization of the horseshoe prior thanks to Aki Vehtari. (#873) * Store fixed distributional parameters as regular draws so that they behave as if they were estimated in post-processing methods. # brms 2.12.0 ### New Features * Fix parameters to constants via the `prior` argument. (#783) * Specify autocorrelation terms directly in the model formula. (#708) * Translate integer covariates in non-linear formulas to integer arrays in Stan. * Estimate `sigma` in combination with fixed correlation matrices via autocorrelation term `fcor`. * Use argument `data2` in `brm` and related functions to pass data objects which cannot be passed via `data`. The usage of `data2` will be extended in future versions. * Compute pointwise log-likelihood values via `log_lik` for non-factorizable Student-t models. (#705) ### Bug Fixes * Fix output of `posterior_predict` for `multinomial` models thanks to Ivan Ukhov. * Fix selection of group-level terms via `re_formula` in multivariate models thanks to Maxime Dahirel. (#834) * Enforce correct ordering of terms in `re_formula` thanks to @ferberkl. (#844) * Fix post-processing of multivariate multilevel models when multiple IDs are used for the same grouping factor thanks to @lott999. (#835) * Store response category names of ordinal models in the output of `posterior_predict` again thanks to Mattew Kay. (#838) * Handle `NA` values more consistently in `posterior_table` thanks to Anna Hake. (#845) * Fix a bug in the Stan code of models with multiple monotonic varying effects across different groups thanks to Julian Quandt. ### Other Changes * Rename `offset` variables to `offsets` in the generated Stan code as the former will be reserved in the new stanc3 compiler. # brms 2.11.1 ### Bug Fixes * Fix version requirement of the `loo` package. * Fix effective sample size note in the `summary` output. (#824) * Fix an edge case in the handling of covariates in special terms thanks to Andrew Milne. (#823) * Allow restructuring objects multiple times with different brms versions thanks to Jonathan A. Nations. (#828) * Fix validation of ordered factors in `newdata` thanks to Andrew Milne. (#830) # brms 2.11.0 ### New Features * Support grouped ordinal threshold vectors via addition argument `resp_thres`. (#675) * Support method `loo_subsample` for performing approximate leave-one-out cross-validation for large data. * Allow storing more model fit criteria via `add_criterion`. (#793) ### Bug Fixes * Fix prediction uncertainties of new group levels for `sample_new_levels = "uncertainty"` thanks to Dominic Magirr. (#779) * Fix problems when using `pp_check` on censored models thanks to Andrew Milne. (#744) * Fix error in the generated Stan code of multivariate `zero_inflated_binomial` models thanks to Raoul Wolf. (#756) * Fix predictions of spline models when using addition argument `subset` thanks to Ruben Arslan. * Fix out-of-sample predictions of AR models when predicting more than one step ahead. * Fix problems when using `reloo` or `kfold` with CAR models. * Fix problems when using `fitted(..., scale = "linear")` with multinomial models thanks to Santiago Olivella. (#770) * Fix problems in the `as.mcmc` method for thinned models thanks to @hoxo-m. (#811) * Fix problems in parsing covariates of special effects terms thanks to Riccardo Fusaroli (#813) ### Other Changes * Rename `marginal_effects` to `conditional_effects` and `marginal_smooths` to `conditional_smooths`. (#735) * Rename `stanplot` to `mcmc_plot`. * Add method `pp_expect` as an alias of `fitted`. (#644) * Model fit criteria computed via `add_criterion` are now stored in the `brmsfit$criteria` slot. * Deprecate `resp_cat` in favor of `resp_thres`. * Deprecate specifying global priors on regression coefficients in categorical and multivariate models. * Improve names of weighting methods in `model_weights`. * Deprecate reserved variable `intercept` in favor of `Intercept`. * Deprecate argument `exact_match` in favor of `fixed`. * Deprecate functions `add_loo` and `add_waic` in favor of `add_criterion`. # brms 2.10.0 ### New Features * Improve convergence diagnostics in the `summary` output. (#712) * Use primitive Stan GLM functions whenever possible. (#703) * Pass real and integer data vectors to custom families via the addition arguments `vreal` and `vint`. (#707) * Model compound symmetry correlations via `cor_cosy`. (#403) * Predict `sigma` in combination with several autocorrelation structures. (#403) * Use addition term `rate` to conveniently handle denominators of rate responses in log-linear models. * Fit BYM2 CAR models via `cor_car` thanks to the case study and help of Mitzi Morris. ### Other Changes * Substantially improve the sampling efficiency of SAR models thanks to the GitHub user aslez. (#680) * No longer allow changing the boundaries of autocorrelation parameters. * Set the number of trials to 1 by default in `marginal_effects` if not specified otherwise. (#718) * Use non-standard evaluation for addition terms. * Name temporary intercept parameters more consistently in the Stan code. ### Bug Fixes * Fix problems in the post-processing of `me` terms with grouping factors thanks to the GitHub user tatters. (#706) * Allow grouping variables to start with a dot thanks to Bruno Nicenboim. (#679) * Allow the `horseshoe` prior in categorical and related models thanks to the Github user tatters. (#678) * Fix extraction of prior samples for overall intercepts in `prior_samples` thanks to Jonas Kristoffer Lindelov. (#696) * Allow underscores to be used in category names of categorical responses thanks to Emmanuel Charpentier. (#672) * Fix Stan code of multivariate models with multi-membership terms thanks to the Stan discourse user Pia. * Improve checks for non-standard variable names thanks to Ryan Holbrook. (#721) * Fix problems when plotting facetted spaghetti plots via `marginal_smooths` thanks to Gavin Simpson. (#740) # brms 2.9.0 ### New Features * Specify non-linear ordinal models. (#623) * Allow to fix thresholds in ordinal mixture models (#626) * Use the `softplus` link function in various families. (#622) * Use QR decomposition of design matrices via argument `decomp` of `brmsformula` thanks to the help of Ben Goodrich. (#640) * Define argument `sparse` separately for each model formula. * Allow using `bayes_R2` and `loo_R2` with ordinal models. (#639) * Support `cor_arma` in non-normal models. (#648) ### Other Changes * Change the parameterization of monotonic effects to improve their interpretability. (#578) * No longer support the `cor_arr` and `cor_bsts` correlation structures after a year of deprecation. * Refactor internal evaluation of special predictor terms. * Improve penalty of splines thanks to Ben Goodrich and Ruben Arslan. ### Bug Fixes * Fix a problem when applying `marginal_effects` to measurement error models thanks to Jonathan A. Nations. (#636) * Fix computation of log-likelihood values for weighted mixture models. * Fix computation of fitted values for truncated lognormal and weibull models. * Fix checking of response boundaries for models with missing values thanks to Lucas Deschamps. * Fix Stan code of multivariate models with both residual correlations and missing value terms thanks to Solomon Kurz. * Fix problems with interactions of special terms when extracting variable names in `marginal_effects`. * Allow compiling a model in `brm_multiple` without sampling thanks to Will Petry. (#671) # brms 2.8.0 ### New Features * Fit multinomial models via family `multinomial`. (#463) * Fit Dirichlet models via family `dirichlet`. (#463) * Fit conditional logistic models using the `categorical` and `multinomial` families together with non-linear formula syntax. (#560) * Choose the reference category of `categorical` and related families via argument `refcat` of the corresponding family functions. * Use different subsets of the data in different univariate parts of a multivariate model via addition argument `subset`. (#360) * Control the centering of population-level design matrices via argument `center` of `brmsformula` and related functions. * Add an `update` method for `brmsfit_multiple` objects. (#615) * Split folds after `group` in the `kfold` method. (#619) ### Other changes * Deprecate `compare_ic` and instead recommend `loo_compare` for the comparison of `loo` objects to ensure consistency between packages. (#414) * Use the **glue** package in the Stan code generation. (#549) * Introduce `mvbind` to eventually replace `cbind` in the formula syntax of multivariate models. * Validate several sampling-related arguments in `brm` before compiling the Stan model. (#576) * Show evaluated vignettes on CRAN again. (#591) * Export function `get_y` which is used to extract response values from `brmsfit` objects. ### Bug fixes * Fix an error when trying to change argument `re_formula` in `bayes_R2` thanks to the GitHub user emieldl. (#592) * Fix occasional problems when running chains in parallel via the **future** package thanks to Jared Knowles. (#579) * Ensure correct ordering of response categories in ordinal models thanks to Jonas Kristoffer Lindelov. (#580) * Ignore argument `resp` of `marginal_effects` in univariate models thanks to Vassilis Kehayas. (#589) * Correctly disable cell-mean coding in varying effects. * Allow to fix parameter `ndt` in drift diffusion models. * Fix Stan code for t-distributed varying effects thanks to Ozgur Asar. * Fix an error in the post-processing of monotonic effects occurring for multivariate models thanks to James Rae. (#598) * Fix lower bounds in truncated discrete models. * Fix checks of the original data in `kfold` thanks to the GitHub user gcolitti. (#602) * Fix an error when applying the `VarCorr` method to meta-analytic models thanks to Michael Scharkow. (#616) # brms 2.7.0 ### New features * Fit approximate and non-isotropic Gaussian processes via `gp`. (#540) * Enable parallelization of model fitting in `brm_multiple` via the future package. (#364) * Perform posterior predictions based on k-fold cross-validation via `kfold_predict`. (#468) * Indicate observations for out-of-sample predictions in ARMA models via argument `oos` of `extract_draws`. (#539) ### Other changes * Allow factor-like variables in smooth terms. (#562) * Make plotting of `marginal_effects` more robust to the usage of non-standard variable names. * Deactivate certain data validity checks when using custom families. * Improve efficiency of adjacent category models. * No longer print informational messages from the Stan parser. ### Bug fixes * Fix an issue that could result in a substantial efficiency drop of various post-processing methods for larger models. * Fix an issue when that resulted in an error when using `fitted(..., scale = "linear")` with ordinal models thanks to Andrew Milne. (#557) * Allow setting priors on the overall intercept in sparse models. * Allow sampling from models with only a single observation that also contain an offset thanks to Antonio Vargas. (#545) * Fix an error when sampling from priors in mixture models thanks to Jacki Buros Novik. (#542) * Fix a problem when trying to sample from priors of parameter transformations. * Allow using `marginal_smooths` with ordinal models thanks to Andrew Milne. (#570) * Fix an error in the post-processing of `me` terms thanks to the GitHub user hlluik. (#571) * Correctly update `warmup` samples when using `update.brmsfit`. # brms 2.6.0 ### New features * Fit factor smooth interactions thanks to Simon Wood. * Specify separate priors for thresholds in ordinal models. (#524) * Pass additional arguments to `rstan::stan_model` via argument `stan_model_args` in `brm`. (#525) * Save model objects via argument `file` in `add_ic` after adding model fit criteria. (#478) * Compute density ratios based on MCMC samples via `density_ratio`. * Ignore offsets in various post-processing methods via argument `offset`. * Update addition terms in formulas via `update_adterms`. ### Other changes * Improve internal modularization of smooth terms. * Reduce size of internal example models. ### Bug fixes * Correctly plot splines with factorial covariates via `marginal_smooths`. * Allow sampling from priors in intercept only models thanks to Emmanuel Charpentier. (#529) * Allow logical operators in non-linear formulas. # brms 2.5.0 ### New features * Improve `marginal_effects` to better display ordinal and categorical models via argument `categorical`. (#491, #497) * Improve method `kfold` to offer more options for specifying omitted subsets. (#510) * Compute estimated values of non-linear parameters via argument `nlpar` in method `fitted`. * Disable automatic cell-mean coding in model formulas without an intercept via argument `cmc` of `brmsformula` and related functions thanks to Marie Beisemann. * Allow using the `bridge_sampler` method even if prior samples are drawn within the model. (#485) * Specify post-processing functions of custom families directly in `custom_family`. * Select a subset of coefficients in `fixef`, `ranef`, and `coef` via argument `pars`. (#520) * Allow to `overwrite` already stored fit indices when using `add_ic`. ### Other changes * Ignore argument `resp` when post-processing univariate models thanks to Ruben Arslan. (#488) * Deprecate argument `ordinal` of `marginal_effects`. (#491) * Deprecate argument `exact_loo` of `kfold`. (#510) * Deprecate usage of `binomial` families without specifying `trials`. * No longer sample from priors of population-level intercepts when using the default intercept parameterization. ### Bug fixes * Correctly sample from LKJ correlation priors thanks to Donald Williams. * Remove stored fit indices when calling `update` on brmsfit objects thanks to Emmanuel Charpentier. (#490) * Fix problems when predicting a single data point using spline models thanks to Emmanuel Charpentier. (#494) * Set `Post.Prob = 1` if `Evid.Ratio = Inf` in method `hypothesis` thanks to Andrew Milne. (#509) * Ensure correct handling of argument `file` in `brm_multiple`. # brms 2.4.0 ### New features * Define custom variables in all of Stan's program blocks via function `stanvar`. (#459) * Change the scope of non-linear parameters to be global within univariate models. (#390) * Allow to automatically group predictor values in Gaussian processes specified via `gp`. This may lead to a considerable increase in sampling efficiency. (#300) * Compute LOO-adjusted R-squared using method `loo_R2`. * Compute non-linear predictors outside of a loop over observations by means of argument `loop` in `brmsformula`. * Fit non-linear mixture models. (#456) * Fit censored or truncated mixture models. (#469) * Allow `horseshoe` and `lasso` priors to be set on special population-level effects. * Allow vectors of length greater one to be passed to `set_prior`. * Conveniently save and load fitted model objects in `brm` via argument `file`. (#472) * Display posterior probabilities in the output of `hypothesis`. ### Other changes * Deprecate argument `stan_funs` in `brm` in favor of using the `stanvars` argument for the specification of custom Stan functions. * Deprecate arguments `flist` and `...` in `nlf`. * Deprecate argument `dpar` in `lf` and `nlf`. ### Bug fixes * Allow custom families in mixture models thanks to Noam Ross. (#453) * Ensure compatibility with **mice** version 3.0. (#455) * Fix naming of correlation parameters of group-level terms with multiple subgroups thanks to Kristoffer Magnusson. (#457) * Improve scaling of default priors in `lognormal` models (#460). * Fix multiple problems in the post-processing of categorical models. * Fix validation of nested grouping factors in post-processing methods when passing new data thanks to Liam Kendall. # brms 2.3.1 ### New features * Allow censoring and truncation in zero-inflated and hurdle models. (#430) * Export zero-inflated and hurdle distribution functions. ### Other changes * Improve sampling efficiency of the ordinal families `cumulative`, `sratio`, and `cratio`. (#433) * Allow to specify a single k-fold subset in method `kfold`. (#441) ### Bug fixes * Fix a problem in `launch_shinystan` due to which the maximum treedepth was not correctly displayed thanks to Paul Galpern. (#431) # brms 2.3.0 ### Features * Extend `cor_car` to support intrinsic CAR models in pairwise difference formulation thanks to the case study of Mitzi Morris. * Compute `loo` and related methods for non-factorizable normal models. ### Other changes * Rename quantile columns in `posterior_summary`. This affects the output of `predict` and related methods if `summary = TRUE`. (#425) * Use hashes to check if models have the same response values when performing model comparisons. (#414) * No longer set `pointwise` dynamically in `loo` and related methods. (#416) * No longer show information criteria in the summary output. * Simplify internal workflow to implement native response distributions. (#421) ### Bug fixes * Allow `cor_car` in multivariate models with residual correlations thanks to Quentin Read. (#427) * Fix a problem in the Stan code generation of distributional `beta` models thanks to Hans van Calster. (#404) * Fix `launch_shinystan.brmsfit` so that all parameters are now shown correctly in the diagnose tab. (#340) # brms 2.2.0 ### Features * Specify custom response distributions with function `custom_family`. (#381) * Model missing values and measurement error in responses using the `mi` addition term. (#27, #343) * Allow missing values in predictors using `mi` terms on the right-hand side of model formulas. (#27) * Model interactions between the special predictor terms `mo`, `me`, and `mi`. (#313) * Introduce methods `model_weights` and `loo_model_weights` providing several options to compute model weights. (#268) * Introduce method `posterior_average` to extract posterior samples averaged across models. (#386) * Allow hyperparameters of group-level effects to vary over the levels of a categorical covariate using argument `by` in function `gr`. (#365) * Allow predictions of measurement-error models with new data. (#335) * Pass user-defined variables to Stan via `stanvar`. (#219, #357) * Allow ordinal families in mixture models. (#389) * Model covariates in multi-membership structures that vary over the levels of the grouping factor via `mmc` terms. (#353) * Fit shifted log-normal models via family `shifted_lognormal`. (#218) * Specify nested non-linear formulas. * Introduce function `make_conditions` to ease preparation of conditions for `marginal_effects`. ### Other changes * Change the parameterization of `weibull` and `exgaussian` models to be consistent with other model classes. Post-processing of related models fitted with earlier version of `brms` is no longer possible. * Treat integer responses in `ordinal` models as directly indicating categories even if the lowest integer is not one. * Improve output of the `hypothesis` method thanks to the ideas of Matti Vuorre. (#362) * Always plot `by` variables as facets in `marginal_smooths`. * Deprecate the `cor_bsts` correlation structure. ### Bug fixes * Allow the `:` operator to combine groups in multi-membership terms thanks to Gang Chen. * Avoid an unexpected error when calling `LOO` with argument `reloo = TRUE` thanks to Peter Konings. (#348) * Fix problems in `predict` when applied to categorical models thanks to Lydia Andreyevna Krasilnikova and Thomas Vladeck. (#336, #345) * Allow truncation in multivariate models with missing values thanks to Malte Lau Petersen. (#380) * Force time points to be unique within groups in autocorrelation structures thanks to Ruben Arslan. (#363) * Fix problems when post-processing multiple uncorrelated group-level terms of the same grouping factor thanks to Ivy Jansen. (#374) * Fix a problem in the Stan code of multivariate `weibull` and `frechet` models thanks to the GitHub user philj1s. (#375) * Fix a rare error when post-processing `binomial` models thanks to the GitHub user SeanH94. (#382) * Keep attributes of variables when preparing the `model.frame` thanks to Daniel Luedecke. (#393) # brms 2.1.0 ### Features * Fit models on multiple imputed datasets via `brm_multiple` thanks to Ruben Arslan. (#27) * Combine multiple `brmsfit` objects via function `combine_models`. * Compute model averaged posterior predictions with method `pp_average`. (#319) * Add new argument `ordinal` to `marginal_effects` to generate special plots for ordinal models thanks to the idea of the GitHub user silberzwiebel. (#190) * Use informative inverse-gamma priors for length-scale parameters of Gaussian processes. (#275) * Compute hypotheses for all levels of a grouping factor at once using argument `scope` in method `hypothesis`. (#327) * Vectorize user-defined `Stan` functions exported via `export_functions` using argument `vectorize`. * Allow predicting new data in models with ARMA autocorrelation structures. ### Bug fixes * Correctly recover noise-free coefficients through `me` terms thanks to Ruben Arslan. As a side effect, it is no longer possible to define priors on noise-free `Xme` variables directly, but only on their hyper-parameters `meanme` and `sdme`. * Fix problems in renaming parameters of the `cor_bsts` structure thanks to Joshua Edward Morten. (#312) * Fix some unexpected errors when predicting from ordinal models thanks to David Hervas and Florian Bader. (#306, #307, #331) * Fix problems when estimating and predicting multivariate ordinal models thanks to David West. (#314) * Fix various minor problems in autocorrelation structures thanks to David West. (#320) # brms 2.0.1 ### Features * Export the helper functions `posterior_summary` and `posterior_table` both being used to summarize posterior samples and predictions. ### Bug fixes * Fix incorrect computation of intercepts in `acat` and `cratio` models thanks to Peter Phalen. (#302) * Fix `pointwise` computation of `LOO` and `WAIC` in multivariate models with estimated residual correlation structure. * Fix problems in various S3 methods sometimes requiring unused variables to be specified in `newdata`. * Fix naming of Stan models thanks to Hao Ran Lai. # brms 2.0.0 This is the second major release of `brms`. The main new feature are generalized multivariate models, which now support everything already possible in univariate models, but with multiple response variables. Further, the internal structure of the package has been improved considerably to be easier to maintain and extend in the future. In addition, most deprecated functionality and arguments have been removed to provide a clean new start for the package. Models fitted with `brms` 1.0 or higher should remain fully compatible with `brms` 2.0. ### Features * Add support for generalized multivariate models, where each of the univariate models may have a different family and autocorrelation structure. Residual correlations can be estimated for multivariate `gaussian` and `student` models. All features supported in univariate models are now also available in multivariate models. (#3) * Specify different formulas for different categories in `categorical` models. * Add weakly informative default priors for the parameter class `Intercept` to improve convergence of more complex distributional models. * Optionally display the MC standard error in the `summary` output. (#280) * Add argument `re.form` as an alias of `re_formula` to the methods `posterior_predict`, `posterior_linpred`, and `predictive_error` for consistency with other packages making use of these methods. (#283) ### Other changes * Refactor many parts of the package to make it more consistent and easier to extend. * Show the link functions of all distributional parameters in the `summary` output. (#277) * Reduce working memory requirements when extracting posterior samples for use in `predict` and related methods thanks to Fanyi Zhang. (#224) * Remove deprecated aliases of functions and arguments from the package. (#278) * No longer support certain prior specifications, which were previously labeled as deprecated. * Remove the deprecated addition term `disp` from the package. * Remove old versions of methods `fixef`, `ranef`, `coef`, and `VarCorr`. * No longer support models fitted with `brms` < 1.0, which used the multivariate `'trait'` syntax originally deprecated in `brms` 1.0. * Make posterior sample extraction in the `summary` method cleaner and less error prone. * No longer fix the seed for random number generation in `brm` to avoid unexpected behavior in simulation studies. ### Bug fixes * Store `stan_funs` in `brmsfit` objects to allow using `update` on models with user-defined Stan functions thanks to Tom Wallis. (#288) * Fix problems in various post-processing methods when applied to models with the reserved variable `intercept` in group-level terms thanks to the GitHub user ASKurz. (#279) * Fix an unexpected error in `predict` and related methods when setting `sample_new_levels = "gaussian"` in models with only one group-level effect. Thanks to Timothy Mastny. (#286) # brms 1.10.2 ### Features * Allow setting priors on noise-free variables specified via function `me`. * Add arguments `Ksub`, `exact_loo` and `group` to method `kfold` for defining omitted subsets according to a grouping variable or factor. * Allow addition argument `se` in `skew_normal` models. ### Bug fixes * Ensure correct behavior of horseshoe and lasso priors in multivariate models thanks to Donald Williams. * Allow using `identity` links on all parameters of the `wiener` family thanks to Henrik Singmann. (#276) * Use reasonable dimnames in the output of `fitted` when returning linear predictors of ordinal models thanks to the GitHub user atrolle. (#274) * Fix problems in `marginal_smooths` occurring for multi-membership models thanks to Hans Tierens. # brms 1.10.0 ### Features * Rebuild monotonic effects from scratch to allow specifying interactions with other variables. (#239) * Introduce methods `posterior_linpred` and `posterior_interval` for consistency with other model fitting packages based on `Stan`. * Introduce function `theme_black` providing a black `ggplot2` theme. * Specify special group-level effects within the same terms as ordinary group-level effects. * Add argument `prob` to `summary`, which allows to control the width of the computed uncertainty intervals. (#259) * Add argument `newdata` to the `kfold` method. * Add several arguments to the `plot` method of `marginal_effects` to improve control over the appearences of the plots. ### Other changes * Use the same noise-free variables for all model parts in measurement error models. (#257) * Make names of local-level terms used in the `cor_bsts` structure more informative. * Store the `autocor` argument within `brmsformula` objects. * Store posterior and prior samples in separate slots in the output of method `hypothesis`. * No longer change the default theme of `ggplot2` when attaching `brms`. (#256) * Make sure signs of estimates are not dropped when rounding to zero in `summary.brmsfit`. (#263) * Refactor parts of `extract_draws` and `linear_predictor` to be more consistent with the rest of the package. ### Bug fixes * Do not silence the `Stan` parser when calling `brm` to get informative error messages about invalid priors. * Fix problems with spaces in priors passed to `set_prior`. * Handle non `data.frame` objects correctly in `hypothesis.default`. * Fix a problem relating to the colour of points displayed in `marginal_effects`. # brms 1.9.0 ### Features * Perform model comparisons based on marginal likelihoods using the methods `bridge_sampler`, `bayes_factor`, and `post_prob` all powered by the `bridgesampling` package. * Compute a Bayesian version of R-squared with the `bayes_R2` method. * Specify non-linear models for all distributional parameters. * Combine multiple model formulas using the `+` operator and the helper functions `lf`, `nlf`, and `set_nl`. * Combine multiple priors using the `+` operator. * Split the `nlpar` argument of `set_prior` into the three arguments `resp`, `dpar`, and `nlpar` to allow for more flexible prior specifications. ### Other changes * Refactor parts of the package to prepare for the implementation of more flexible multivariate models in future updates. * Keep all constants in the log-posterior in order for `bridge_sampler` to be working correctly. * Reduce the amount of renaming done within the `stanfit` object. * Rename argument `auxpar` of `fitted.brmsfit` to `dpar`. * Use the `launch_shinystan` generic provided by the `shinystan` package. * Set `bayesplot::theme_default()` as the default `ggplot2` theme when attaching `brms`. * Include citations of the `brms` overview paper as published in the Journal of Statistical Software. ### Bug fixes * Fix problems when calling `fitted` with `hurdle_lognormal` models thanks to Meghna Krishnadas. * Fix problems when predicting `sigma` in `asym_laplace` models thanks to Anna Josefine Sorensen. # brms 1.8.0 ### Features * Fit conditional autoregressive (CAR) models via function `cor_car` thanks to the case study of Max Joseph. * Fit spatial autoregressive (SAR) models via function `cor_sar`. Currently works for families `gaussian` and `student`. * Implement skew normal models via family `skew_normal`. Thanks to Stephen Martin for suggestions on the parameterization. * Add method `reloo` to perform exact cross-validation for problematic observations and `kfold` to perform k-fold cross-validation thanks to the Stan Team. * Regularize non-zero coefficients in the `horseshoe` prior thanks to Juho Piironen and Aki Vehtari. * Add argument `new_objects` to various post-processing methods to allow for passing of data objects, which cannot be passed via `newdata`. * Improve parallel execution flexibility via the `future` package. ### Other changes * Improve efficiency and stability of ARMA models. * Throw an error when the intercept is removed in an ordinal model instead of silently adding it back again. * Deprecate argument `threshold` in `brm` and instead recommend passing `threshold` directly to the ordinal family functions. * Throw an error instead of a message when invalid priors are passed. * Change the default value of the `autocor` slot in `brmsfit` objects to an empty `cor_brms` object. * Shorten `Stan` code by combining declarations and definitions where possible. ### Bug fixes * Fix problems in `pp_check` when the variable specified in argument `x` has attributes thanks to Paul Galpern. * Fix problems when computing fitted values for truncated discrete models based on new data thanks to Nathan Doogan. * Fix unexpected errors when passing models, which did not properly initialize, to various post-processing methods. * Do not accidently drop the second dimension of matrices in `summary.brmsfit` for models with only a single observation. # brms 1.7.0 ### Features * Fit latent Gaussian processes of one or more covariates via function `gp` specified in the model formula (#221). * Rework methods `fixef`, `ranef`, `coef`, and `VarCorr` to be more flexible and consistent with other post-processing methods (#200). * Generalize method `hypothesis` to be applicable on all objects coercible to a `data.frame` (#198). * Visualize predictions via spaghetti plots using argument `spaghetti` in `marginal_effects` and `marginal_smooths`. * Introduce method `add_ic` to store and reuse information criteria in fitted model objects (#220). * Allow for negative weights in multi-membership grouping structures. * Introduce an `as.array` method for `brmsfit` objects. ### Other changes * Show output of \R code in HTML vignettes thanks to Ben Goodrich (#158). * Resolve citations in PDF vignettes thanks to Thomas Kluth (#223). * Improve sampling efficiency for `exgaussian` models thanks to Alex Forrence (#222). * Also transform data points when using argument `transform` in `marginal_effects` thanks to Markus Gesmann. ### Bug fixes * Fix an unexpected error in `marginal_effects` occurring for some models with autocorrelation terms thanks to Markus Gesmann. * Fix multiple problems occurring for models with the `cor_bsts` structure thanks to Andrew Ellis. # brms 1.6.1 ### Features * Implement zero-one-inflated beta models via family `zero_one_inflated_beta`. * Allow for more link functions in zero-inflated and hurdle models. ### Other changes * Ensure full compatibility with `bayesplot` version 1.2.0. * Deprecate addition argument `disp`. ### Bug fixes * Fix problems when setting priors on coefficients of auxiliary parameters when also setting priors on the corresponding coefficients of the mean parameter. Thanks to Matti Vuorre for reporting this bug. * Allow ordered factors to be used as grouping variables thanks to the GitHub user itissid. # brms 1.6.0 ### Features * Fit finite mixture models using family function `mixture`. * Introduce method `pp_mixture` to compute posterior probabilities of mixture component memberships thanks to a discussion with Stephen Martin. * Implement different ways to sample new levels of grouping factors in `predict` and related methods through argument `sample_new_levels`. Thanks to Tom Wallis and Jonah Gabry for a detailed discussion about this feature. * Add methods `loo_predict`, `loo_linpred`, and `loo_predictive_interval` for computing LOO predictions thanks to Aki Vehtari and Jonah Gabry. * Allow using `offset` in formulas of non-linear and auxiliary parameters. * Allow sparse matrix multiplication in non-linear and distributional models. * Allow using the `identity` link for all auxiliary parameters. * Introduce argument `negative_rt` in `predict` and `posterior_predict` to distinguish responses on the upper and lower boundary in `wiener` diffusion models thanks to Guido Biele. * Introduce method `control_params` to conveniently extract control parameters of the NUTS sampler. * Introduce argument `int_conditions` in `marginal_effects` for enhanced plotting of two-way interactions thanks to a discussion with Thomas Kluth. * Improve flexibility of the `conditions` argument of `marginal_effects`. * Extend method `stanplot` to correctly handle some new `mcmc_` plots of the `bayesplot` package. ### Other changes * Improve the `update` method to only recompile models when the `Stan` code changes. * Warn about divergent transitions when calling `summary` or `print` on `brmsfit` objects. * Warn about unused variables in argument `conditions` when calling `marginal_effects`. * Export and document several distribution functions that were previously kept internal. ### Bug fixes * Fix problems with the inclusion of offsets occurring for more complicated formulas thanks to Christian Stock. * Fix a bug that led to invalid Stan code when sampling from priors in intercept only models thanks to Tom Wallis. * Correctly check for category specific group-level effects in non-ordinal models thanks to Wayne Folta. * Fix problems in `pp_check` when specifying argument `newdata` together with arguments `x` or `group`. * Rename the last column in the output of `hypothesis` to `"star"` in order to avoid problems with zero length column names thanks to the GitHub user puterleat. * Add a missing new line statement at the end of the `summary` output thanks to Thomas Kluth. # brms 1.5.1 ### Features * Allow `horseshoe` and `lasso` priors to be applied on population-level effects of non-linear and auxiliary parameters. * Force recompiling `Stan` models in `update.brmsfit` via argument `recompile`. ### Other changes * Avoid indexing of matrices in non-linear models to slightly improve sampling speed. ### Bug fixes * Fix a severe problem (introduced in version 1.5.0), when predicting `Beta` models thanks to Vivian Lam. * Fix problems when summarizing some models fitted with older version of `brms` thanks to Vivian Lam. * Fix checks of argument `group` in method `pp_check` thanks to Thomas K. * Get arguments `subset` and `nsamples` working correctly in `marginal_smooths`. # brms 1.5.0 ### Features * Implement the generalized extreme value distribution via family `gen_extreme_value`. * Improve flexibility of the `horseshoe` prior thanks to Juho Piironen. * Introduce auxiliary parameter `mu` as an alternative to specifying effects within the `formula` argument in function `brmsformula`. * Return fitted values of auxiliary parameters via argument `auxpar` of method `fitted`. * Add vignette `"brms_multilevel"`, in which the advanced formula syntax of `brms` is explained in detail using several examples. ### Other changes * Refactor various parts of the package to ease implementation of mixture and multivariate models in future updates. This should not have any user visible effects. * Save the version number of `rstan` in element `version` of `brmsfit` objects. ### Bug fixes * Fix a rare error when predicting `von_mises` models thanks to John Kirwan. # brms 1.4.0 ### Features * Fit quantile regression models via family `asym_laplace` (asymmetric Laplace distribution). * Specify non-linear models in a (hopefully) more intuitive way using `brmsformula`. * Fix auxiliary parameters to certain values through `brmsformula`. * Allow `family` to be specified in `brmsformula`. * Introduce family `frechet` for modelling strictly positive responses. * Allow truncation and censoring at the same time. * Introduce function `prior_` allowing to specify priors using one-sided formulas or `quote`. * Pass priors to `Stan` directly without performing any checks by setting `check = FALSE` in `set_prior`. * Introduce method `nsamples` to extract the number of posterior samples. * Export the main formula parsing function `parse_bf`. * Add more options to customize two-dimensional surface plots created by `marginal_effects` or `marginal_smooths`. ### Other changes * Change structure of `brmsformula` objects to be more reliable and easier to extend. * Make sure that parameter `nu` never falls below `1` to reduce convergence problems when using family `student`. * Deprecate argument `nonlinear`. * Deprecate family `geometric`. * Rename `cov_fixed` to `cor_fixed`. * Make handling of addition terms more transparent by exporting and documenting related functions. * Refactor helper functions of the `fitted` method to be easier to extend in the future. * Remove many units tests of internal functions and add tests of user-facing functions instead. * Import some generics from `nlme` instead of `lme4` to remove dependency on the latter one. * Do not apply `structure` to `NULL` anymore to get rid of warnings in R-devel. ### Bug fixes * Fix problems when fitting smoothing terms with factors as `by` variables thanks to Milani Chaloupka. * Fix a bug that could cause some monotonic effects to be ignored in the `Stan` code thanks to the GitHub user bschneider. * Make sure that the data of models with only a single observation are compatible with the generated `Stan` code. * Handle argument `algorithm` correctly in `update.brmsfit`. * Fix a bug sometimes causing an error in `marginal_effects` when using family `wiener` thanks to Andrew Ellis. * Fix problems in `fitted` when applied to `zero_inflated_beta` models thanks to Milani Chaloupka. * Fix minor problems related to the prediction of autocorrelated models. * Fix a few minor bugs related to the backwards compatibility of multivariate and related models fitted with `brms` < 1.0.0. # brms 1.3.1 ### Features * Introduce the auxiliary parameter `disc` ('discrimination') to be used in ordinal models. By default it is not estimated but fixed to one. * Create `marginal_effects` plots of two-way interactions of variables that were not explicitely modeled as interacting. ### Other changes * Move `rstan` to 'Imports' and `Rcpp` to 'Depends' in order to avoid loading `rstan` into the global environment automatically. ### Bug fixes * Fix a bug leading to unexpected errors in some S3 methods when applied to ordinal models. # brms 1.3.0 ### Features * Fit error-in-variables models using function `me` in the model formulae. * Fit multi-membership models using function `mm` in grouping terms. * Add families `exgaussian` (exponentially modified Gaussian distribution) and `wiener` (Wiener diffusion model distribution) specifically suited to handle for response times. * Add the `lasso` prior as an alternative to the `horseshoe` prior for sparse models. * Add the methods `log_posterior`, `nuts_params`, `rhat`, and `neff_ratio` for `brmsfit` objects to conveniently access quantities used to diagnose sampling behavior. * Combine chains in method `as.mcmc` using argument `combine_chains`. * Estimate the auxiliary parameter `sigma` in models with known standard errors of the response by setting argument `sigma` to `TRUE` in addition function `se`. * Allow visualizing two-dimensional smooths with the `marginal_smooths` method. ### Other changes * Require argument `data` to be explicitely specified in all user facing functions. * Refactor the `stanplot` method to use `bayesplot` on the backend. * Use the `bayesplot` theme as the default in all plotting functions. * Add the abbreviations `mo` and `cs` to specify monotonic and category specific effects respectively. * Rename generated variables in the data.frames returned by `marginal_effects` to avoid potential naming conflicts. * Deprecate argument `cluster` and use the native `cores` argument of `rstan` instead. * Remove argument `cluster_type` as it is no longer required to apply forking. * Remove the deprecated `partial` argument. # brms 1.2.0 ### Features * Add the new family `hurdle_lognormal` specifically suited for zero-inflated continuous responses. * Introduce the `pp_check` method to perform various posterior predictive checks using the `bayesplot` package. * Introduce the `marginal_smooths` method to better visualize smooth terms. * Allow varying the scale of global shrinkage parameter of the `horseshoe` prior. * Add functions `prior` and `prior_string` as aliases of `set_prior`, the former allowing to pass arguments without quotes `""` using non-standard evaluation. * Introduce four new vignettes explaining how to fit non-linear models, distributional models, phylogenetic models, and monotonic effects respectively. * Extend the `coef` method to better handle category specific group-level effects. * Introduce the `prior_summary` method for `brmsfit` objects to obtain a summary of prior distributions applied. * Sample from the prior of the original population-level intercept when `sample_prior = TRUE` even in models with an internal temporary intercept used to improve sampling efficiency. * Introduce methods `posterior_predict`, `predictive_error` and `log_lik` as (partial) aliases of `predict`, `residuals`, and `logLik` respectively. ### Other changes * Improve computation of Bayes factors in the `hypothesis` method to be less influenced by MCMC error. * Improve documentation of default priors. * Refactor internal structure of some formula and prior evaluating functions. This should not have any user visible effects. * Use the `bayesplot` package as the new backend of `plot.brmsfit`. ### Bug fixes * Better mimic `mgcv` when parsing smooth terms to make sure all arguments are correctly handled. * Avoid an error occurring during the prediction of new data when grouping factors with only a single factor level were supplied thanks to Tom Wallis. * Fix `marginal_effects` to consistently produce plots for all covariates in non-linear models thanks to David Auty. * Improve the `update` method to better recognize situations where recompliation of the `Stan` code is necessary thanks to Raphael P.H. * Allow to correctly `update` the `sample_prior` argument to value `"only"`. * Fix an unexpected error occurring in many S3 methods when the thinning rate is not a divisor of the total number of posterior samples thanks to Paul Zerr. # brms 1.1.0 ### Features * Estimate monotonic group-level effects. * Estimate category specific group-level effects. * Allow `t2` smooth terms based on multiple covariates. * Estimate interval censored data via the addition argument `cens` in the model formula. * Allow to compute `residuals` also based on predicted values instead of fitted values. ### Other changes * Use the prefix `bcs` in parameter names of category specific effects and the prefix `bm` in parameter names of monotonic effects (instead of the prefix `b`) to simplify their identification. * Ensure full compatibility with `ggplot2` version 2.2. ### Bug fixes * Fix a bug that could result in incorrect threshold estimates for `cumulative` and `sratio` models thanks to Peter Congdon. * Fix a bug that sometimes kept distributional `gamma` models from being compiled thanks to Tim Beechey. * Fix a bug causing an error in `predict` and related methods when two-level factors or logical variables were used as covariates in non-linear models thanks to Martin Schmettow. * Fix a bug causing an error when passing lists to additional arguments of smoothing functions thanks to Wayne Folta. * Fix a bug causing an error in the `prior_samples` method for models with multiple group-level terms that refer to the same grouping factor thanks to Marco Tullio Liuzza. * Fix a bug sometimes causing an error when calling `marginal_effects` for weighted models. # brms 1.0.1 \subsection{MINOR CHANGES * Center design matrices inside the Stan code instead of inside `make_standata`. * Get rid of several warning messages occurring on CRAN. # brms 1.0.0 This is one of the largest updates of `brms` since its initial release. In addition to many new features, the multivariate `'trait'` syntax has been removed from the package as it was confusing for users, required much special case coding, and was hard to maintain. See `help(brmsformula)` for details of the formula syntax applied in `brms`. ### Features * Allow estimating correlations between group-level effects defined across multiple formulae (e.g., in non-linear models) by specifying IDs in each grouping term via an extended `lme4` syntax. * Implement distributional regression models allowing to fully predict auxiliary parameters of the response distribution. Among many other possibilities, this can be used to model heterogeneity of variances. * Zero-inflated and hurdle models do not use multivariate syntax anymore but instead have special auxiliary parameters named `zi` and `hu` defining zero-inflation / hurdle probabilities. * Implement the `von_mises` family to model circular responses. * Introduce the `brmsfamily` function for convenient specification of `family` objects. * Allow predictions of `t2` smoothing terms for new data. * Feature vectors as arguments for the addition argument `trunc` in order to model varying truncation points. ### Other changes * Remove the `cauchy` family after several months of deprecation. * Make sure that group-level parameter names are unambiguous by adding double underscores thanks to the idea of the GitHub user schmettow. * The `predict` method now returns predicted probabilities instead of absolute frequencies of samples for ordinal and categorical models. * Compute the linear predictor in the model block of the Stan program instead of in the transformed parameters block. This avoids saving samples of unnecessary parameters to disk. Thanks goes to Rick Arrano for pointing me to this issue. * Colour points in `marginal_effects` plots if sensible. * Set the default of the `robust` argument to `TRUE` in `marginal_effects.brmsfit`. ### Bug fixes * Fix a bug that could occur when predicting factorial response variables for new data. Only affects categorical and ordinal models. * Fix a bug that could lead to duplicated variable names in the Stan code when sampling from priors in non-linear models thanks to Tom Wallis. * Fix problems when trying to pointwise evaluate non-linear formulae in `logLik.brmsfit` thanks to Tom Wallis. * Ensure full compatibility of the `ranef` and `coef` methods with non-linear models. * Fix problems that occasionally occurred when handling `dplyr` datasets thanks to the GitHub user Atan1988. # brms 0.10.0 ### Features * Add support for generalized additive mixed models (GAMMs). Smoothing terms can be specified using the `s` and `t2` functions in the model formula. * Introduce `as.data.frame` and `as.matrix` methods for `brmsfit` objects. ### Other changes * The `gaussian("log")` family no longer implies a log-normal distribution, but a normal distribution with log-link to match the behavior of `glm`. The log-normal distribution can now be specified via family `lognormal`. * Update syntax of `Stan` models to match the recommended syntax of `Stan` 2.10. ### Bug fixes * The `ngrps` method should now always return the correct result for non-linear models. * Fix problems in `marginal_effects` for models using the reserved variable `intercept` thanks to Frederik Aust. * Fix a bug in the `print` method of `brmshypothesis` objects that could lead to duplicated and thus invalid row names. * Residual standard deviation parameters of multivariate models are again correctly displayed in the output of the `summary` method. * Fix problems when using variational Bayes algorithms with `brms` while having `rstan` >= 2.10.0 installed thanks to the GitHub user cwerner87. # brms 0.9.1 ### Features * Allow the '/' symbol in group-level terms in the `formula` argument to indicate nested grouping structures. * Allow to compute `WAIC` and `LOO` based on the pointwise log-likelihood using argument `pointwise` to substantially reduce memory requirements. ### Other changes * Add horizontal lines to the errorbars in `marginal_effects` plots for factors. ### Bug fixes * Fix a bug that could lead to a cryptic error message when changing some parts of the model `formula` using the `update` method. * Fix a bug that could lead to an error when calling `marginal_effects` for predictors that were generated with the `base::scale` function thanks to Tom Wallis. * Allow interactions of numeric and categorical predictors in `marginal_effects` to be passed to the `effects` argument in any order. * Fix a bug that could lead to incorrect results of `predict` and related methods when called with `newdata` in models using the `poly` function thanks to Brock Ferguson. * Make sure that user-specified factor contrasts are always applied in multivariate models. # brms 0.9.0 ### Features * Add support for `monotonic` effects allowing to use ordinal predictors without assuming their categories to be equidistant. * Apply multivariate formula syntax in categorical models to considerably increase modeling flexibility. * Add the addition argument `disp` to define multiplicative factors on dispersion parameters. For linear models, `disp` applies to the residual standard deviation `sigma` so that it can be used to weight observations. * Treat the fixed effects design matrix as sparse by using the `sparse` argument of `brm`. This can considerably reduce working memory requirements if the predictors contain many zeros. * Add the `cor_fixed` correlation structure to allow for fixed user-defined covariance matrices of the response variable. * Allow to pass self-defined `Stan` functions via argument `stan_funs` of `brm`. * Add the `expose_functions` method allowing to expose self-defined `Stan` functions in `R`. * Extend the functionality of the `update` method to allow all model parts to be updated. * Center the fixed effects design matrix also in multivariate models. This may lead to increased sampling speed in models with many predictors. ### Other changes * Refactor `Stan` code and data generating functions to be more consistent and easier to extent. * Improve checks of user-define prior specifications. * Warn about models that have not converged. * Make sure that regression curves computed by the `marginal_effects` method are always smooth. * Allow to define category specific effects in ordinal models directly within the `formula` argument. ### Bug fixes * Fix problems in the generated `Stan` code when using very long non-linear model formulas thanks to Emmanuel Charpentier. * Fix a bug that prohibited to change priors on single standard deviation parameters in non-linear models thanks to Emmanuel Charpentier. * Fix a bug that prohibited to use nested grouping factors in non-linear models thanks to Tom Wallis. * Fix a bug in the linear predictor computation within `R`, occurring for ordinal models with multiple category specific effects. This could lead to incorrect outputs of `predict`, `fitted`, and `logLik` for these models. * Make sure that the global `"contrasts"` option is not used when post-processing a model. # brms 0.8.0 ### Features * Implement generalized non-linear models, which can be specified with the help of the `nonlinear` argument in `brm`. * Compute and plot marginal effects using the `marginal_effects` method thanks to the help of Ruben Arslan. * Implement zero-inflated beta models through family `zero_inflated_beta` thanks to the idea of Ali Roshan Ghias. * Allow to restrict domain of fixed effects and autocorrelation parameters using new arguments `lb` and `ub` in function `set_prior` thanks to the idea of Joel Gombin. * Add an `as.mcmc` method for compatibility with the `coda` package. * Allow to call the `WAIC`, `LOO`, and `logLik` methods with new data. ### Other changes * Make sure that `brms` is fully compatible with `loo` version 0.1.5. * Optionally define the intercept as an ordinary fixed effect to avoid the reparametrization via centering of the fixed effects design matrix. * Do not compute the WAIC in `summary` by default anymore to reduce computation time of the method for larger models. * The `cauchy` family is now deprecated and will be removed soon as it often has convergence issues and not much practical application anyway. * Change the default settings of the number of chains and warmup samples to the defaults of `rstan` (i.e., `chains = 4` and `warmup = iter / 2`). * Do not remove bad behaving chains anymore as they may point to general convergence problems that are dangerous to ignore. * Improve flexibility of the `theme` argument in all plotting functions. * Only show the legend once per page, when computing trace and density plots with the `plot` method. * Move code of self-defined `Stan` functions to `inst/chunks` and incorporate them into the models using `rstan::stanc_builder`. Also, add unit tests for these functions. ### Bug fixes * Fix problems when predicting with `newdata` for zero-inflated and hurdle models thanks to Ruben Arslan. * Fix problems when predicting with `newdata` if it is a subset of the data stored in a `brmsfit` object thanks to Ruben Arslan. * Fix data preparation for multivariate models if some responses are `NA` thanks to Raphael Royaute. * Fix a bug in the `predict` method occurring for some multivariate models so that it now always returns the predictions of all response variables, not just the first one. * Fix a bug in the log-likelihood computation of `hurdle_poisson` and `hurdle_negbinomial` models. This may lead to minor changes in the values obtained by `WAIC` and `LOO` for these models. * Fix some backwards compatibility issues of models fitted with version <= 0.5.0 thanks to Ulf Koether. # brms 0.7.0 ### Features * Use variational inference algorithms as alternative to the NUTS sampler by specifying argument `algorithm` in the `brm` function. * Implement beta regression models through family `Beta`. * Implement zero-inflated binomial models through family `zero_inflated_binomial`. * Implement multiplicative effects for family `bernoulli` to fit (among others) 2PL IRT models. * Generalize the `formula` argument for zero-inflated and hurdle models so that predictors can be included in only one of the two model parts thanks to the idea of Wade Blanchard. * Combine fixed and random effects estimates using the new `coef` method. * Call the `residuals` method with `newdata` thanks to the idea of Friederike Holz-Ebeling. * Allow new levels of random effects grouping factors in the `predict`, `fitted`, and `residuals` methods using argument `allow_new_levels`. * Selectively exclude random effects in the `predict`, `fitted`, and `residuals` methods using argument `re_formula`. * Add a `plot` method for objects returned by method `hypothesis` to visualize prior and posterior distributions of the hypotheses being tested. ### Other changes * Improve evaluation of the response part of the `formula` argument to reliably allow terms with more than one variable (e.g., `y/x ~ 1`). * Improve sampling efficiency of models containing many fixed effects through centering the fixed effects design matrix thanks to Wayne Folta. * Improve sampling efficiency of models containing uncorrelated random effects specified by means of `(random || group)` terms in `formula` thanks to Ali Roshan Ghias. * Utilize user-defined functions in the `Stan` code of ordinal models to improve readability as well as sampling efficiency. * Make sure that model comparisons using `LOO` or `WAIC` are only performed when models are based on the same responses. * Use some generic functions of the `lme4` package to avoid unnecessary function masking. This leads to a change in the argument order of method `VarCorr`. * Change the `ggplot` theme in the `plot` method through argument `theme`. * Remove the `n.` prefix in arguments `n.iter`, `n.warmup`, `n.thin`, `n.chains`, and `n.cluster` of the `brm` function. The old argument names remain usable as deprecated aliases. * Amend names of random effects parameters to simplify matching with their respective grouping factor levels. ### Bug fixes * Fix a bug in the `hypothesis` method that could cause valid model parameters to be falsely reported as invalid. * Fix a bug in the `prior_samples` method that could cause prior samples of parameters of the same class to be artificially correlated. * Fix `Stan` code of linear models with moving-average effects and non-identity link functions so that they no longer contain code related solely to autoregressive effects. * Fix a bug in the evaluation of `formula` that could cause complicated random effects terms to be falsely treated as fixed effects. * Fix several bugs when calling the `fitted` and `predict` methods with `newdata` thanks to Ali Roshan Ghias. # brms 0.6.0 ### Features * Add support for zero-inflated and hurdle models thanks to the idea of Scott Baldwin. * Implement inverse gaussian models through family `inverse.gaussian`. * Allow to specify truncation boundaries of the response variable thanks to the idea of Maciej Beresewicz. * Add support for autoregressive (AR) effects of residuals, which can be modeled using the `cor_ar` and `cor_arma` functions. * Stationary autoregressive-moving-average (ARMA) effects of order one can now also be fitted using special covariance matrices. * Implement multivariate student-t models. * Binomial and ordinal families now support the `cauchit` link function. * Allow family functions to be used in the `family` argument. * Easy access to various `rstan` plotting functions using the `stanplot` method. * Implement horseshoe priors to model sparsity in fixed effects coefficients thanks to the idea of Josh Chang. * Automatically scale default standard deviation priors so that they remain only weakly informative independent on the response scale. * Report model weights computed by the `loo` package when comparing multiple fitted models. ### Other changes * Separate the fixed effects Intercept from other fixed effects in the `Stan` code to slightly improve sampling efficiency. * Move autoregressive (AR) effects of the response from the `cor_ar` to the `cor_arr` function as the result of implementing AR effects of residuals. * Improve checks on argument `newdata` used in the `fitted` and `predict` method. * Method `standata` is now the only way to extract data that was passed to `Stan` from a `brmsfit` object. * Slightly improve `Stan` code for models containing no random effects. * Change the default prior of the degrees of freedom of the `student` family to `gamma(2,0.1)`. * Improve readability of the output of method `VarCorr`. * Export the `make_stancode` function to give users direct access to `Stan` code generated by `brms`. * Rename the `brmdata` function to `make_standata`. The former remains usable as a deprecated alias. * Improve documentation to better explain differences in autoregressive effects across R packages. ### Bug fixes * Fix a bug that could cause an unexpected error when the `predict` method was called with `newdata`. * Avoid side effects of the `rstan` compilation routines that could occasionally cause R to crash. * Make `brms` work correctly with `loo` version 0.1.3 thanks to Mauricio Garnier Villarreal and Jonah Gabry. * Fix a bug that could cause WAIC and LOO estimates to be slightly incorrect for `gaussian` models with `log` link. # brms 0.5.0 ### Features * Compute the Watanabe-Akaike information criterion (WAIC) and leave-one-out cross-validation (LOO) using the `loo` package. * Provide an interface to `shinystan` with S3 method `launch_shiny`. * New functions `get_prior` and `set_prior` to make prior specifications easier. * Log-likelihood values and posterior predictive samples can now be calculated within R after the model has been fitted. * Make predictions based on new data using S3 method `predict`. * Allow for customized covariance structures of grouping factors with multiple random effects. * New S3 methods `fitted` and `residuals` to compute fitted values and residuals, respectively. ### Other changes * Arguments `WAIC` and `predict` are removed from the `brm` function, as they are no longer necessary. * New argument `cluster_type` in function `brm` allowing to choose the cluster type created by the parallel package. * Remove chains that fail to initialize while sampling in parallel leaving the other chains untouched. * Redesign trace and density plots to be faster and more stable. * S3 method `VarCorr` now always returns covariance matrices regardless of whether correlations were estimated. ### Bug fixes * Fix a bug in S3 method `hypothesis` related to the calculation of Bayes-factors for point hypotheses. * User-defined covariance matrices that are not strictly positive definite for numerical reasons should now be handled correctly. * Fix problems when a factor is used as fixed effect and as random effects grouping variable at the same time thanks to Ulf Koether. * Fix minor issues with internal parameter naming. * Perform additional checking on user defined priors. # brms 0.4.1 ### Features * Allow for sampling from all specified proper priors in the model. * Compute Bayes-factors for point hypotheses in S3 method `hypothesis`. ### Bug fixes * Fix a bug that could cause an error for models with multiple grouping factors thanks to Jonathan Williams. * Fix a bug that could cause an error for weighted poisson and exponential models. # brms 0.4.0 ### Features * Implement the Watanabe-Akaike Information Criterion (WAIC). * Implement the `||`-syntax for random effects allowing for the estimation of random effects standard deviations without the estimation of correlations. * Allow to combine multiple grouping factors within one random effects argument using the interaction symbol `:`. * Generalize S3 method `hypothesis` to be used with all parameter classes not just fixed effects. In addition, one-sided hypothesis testing is now possible. * Introduce new family `multigaussian` allowing for multivariate normal regression. * Introduce new family `bernoulli` for dichotomous response variables as a more efficient alternative to families `binomial` or `categorical` in this special case. ### Other changes * Slightly change the internal structure of brms to reflect that `rstan` is finally on CRAN. * Thoroughly check validity of the response variable before the data is passed to `Stan`. * Prohibit variable names containing double underscores `__` to avoid naming conflicts. * Allow function calls with several arguments (e.g. `poly(x,3)`) in the formula argument of function `brm`. * Always center random effects estimates returned by S3 method `ranef` around zero. * Prevent the use of customized covariance matrices for grouping factors with multiple random effects for now. * Remove any experimental `JAGS` code from the package. ### Bug fixes * Fix a bug in S3 method `hypothesis` leading to an error when numbers with decimal places were used in the formulation of the hypotheses. * Fix a bug in S3 method `ranef` that caused an error for grouping factors with only one random effect. * Fix a bug that could cause the fixed intercept to be wrongly estimated in the presence of multiple random intercepts thanks to Jarrod Hadfield. # brms 0.3.0 ### Features * Introduce new methods `parnames` and `posterior_samples` for class 'brmsfit' to extract parameter names and posterior samples for given parameters, respectively. * Introduce new method `hypothesis` for class `brmsfit` allowing to test non-linear hypotheses concerning fixed effects. * Introduce new argument `addition` in function brm to get a more flexible approach in specifying additional information on the response variable (e.g., standard errors for meta-analysis). Alternatively, this information can also be passed to the `formula` argument directly. * Introduce weighted and censored regressions through argument `addition` of function brm. * Introduce new argument `cov.ranef` in the `brm` function allowing for customized covariance structures of random effects thanks to the idea of Boby Mathew. * Introduce new argument `autocor` in function brm allowing for autocorrelation of the response variable. * Introduce new functions `cor.ar`, `cor.ma`, and `cor.arma`, to be used with argument `autocor` for modeling autoregressive, moving-average, and autoregressive-moving-average models. ### Other changes * Amend parametrization of random effects to increase efficiency of the sampling algorithms. * Improve vectorization of sampling statements. ### Bug fixes * Fix a bug that could cause an error when fitting poisson models while `predict = TRUE`. * Fix a bug that caused an error when sampling only one chain while `silent = TRUE`. # brms 0.2.0 ### Features * New S3 class `brmsfit` to be returned by the `brm` function. * New methods for class `brmsfit`: `summary`, `print`, `plot`, `predict`, `fixef`, `ranef`, `VarCorr`, `nobs`, `ngrps`, and `formula`. * Introduce new argument `silent` in the `brm` function, allowing to suppress most of `Stan`'s intermediate output. * Introduce new families `negbinomial` (negative binomial) and `geometric` to allow for more flexibility in modeling count data. ### Other changes * Amend warning and error messages to make them more informative. * Correct examples in the documentation. * Extend the README file. ### Bug fixes * Fix a bug that caused problems when formulas contained more complicated function calls. * Fix a bug that caused an error when posterior predictives were sampled for family `cumulative`. * Fix a bug that prohibited to use of improper flat priors for parameters that have proper priors by default. # brms 0.1.0 * Initial release version brms/DESCRIPTION0000644000175000017500000000612614146772153013074 0ustar nileshnileshPackage: brms Encoding: UTF-8 Type: Package Title: Bayesian Regression Models using 'Stan' Version: 2.16.3 Date: 2021-11-22 Authors@R: c(person("Paul-Christian", "Bürkner", email = "paul.buerkner@gmail.com", role = c("aut", "cre")), person("Jonah", "Gabry", role = c("ctb")), person("Sebastian", "Weber", role = c("ctb")), person("Andrew", "Johnson", role = c("ctb")), person("Martin", "Modrak", role = c("ctb")), person("Hamada S.", "Badr", role = c("ctb")), person("Frank", "Weber", role = c("ctb")), person("Mattan S.", "Ben-Shachar", role = c("ctb"))) Depends: R (>= 3.5.0), Rcpp (>= 0.12.0), methods Imports: rstan (>= 2.19.2), ggplot2 (>= 2.0.0), loo (>= 2.3.1), posterior (>= 1.0.0), Matrix (>= 1.1.1), mgcv (>= 1.8-13), rstantools (>= 2.1.1), bayesplot (>= 1.5.0), shinystan (>= 2.4.0), bridgesampling (>= 0.3-0), glue (>= 1.3.0), future (>= 1.19.0), matrixStats, nleqslv, nlme, coda, abind, stats, utils, parallel, grDevices, backports Suggests: testthat (>= 0.9.1), emmeans (>= 1.4.2), cmdstanr (>= 0.4.0), projpred (>= 2.0.0), RWiener, rtdists, mice, spdep, mnormt, lme4, MCMCglmm, splines2, ape, arm, statmod, digest, diffobj, R.rsp, gtable, shiny, knitr, rmarkdown Description: Fit Bayesian generalized (non-)linear multivariate multilevel models using 'Stan' for full Bayesian inference. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, count data, survival, response times, ordinal, zero-inflated, hurdle, and even self-defined mixture models all in a multilevel context. Further modeling options include non-linear and smooth terms, auto-correlation structures, censored data, meta-analytic standard errors, and quite a few more. In addition, all parameters of the response distribution can be predicted in order to perform distributional regression. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. Model fit can easily be assessed and compared with posterior predictive checks and leave-one-out cross-validation. References: Bürkner (2017) ; Bürkner (2018) ; Bürkner (2021) ; Carpenter et al. (2017) . LazyData: true NeedsCompilation: no License: GPL-2 URL: https://github.com/paul-buerkner/brms, https://discourse.mc-stan.org/ BugReports: https://github.com/paul-buerkner/brms/issues Additional_repositories: https://mc-stan.org/r-packages/ VignetteBuilder: knitr, R.rsp RoxygenNote: 7.1.2 Packaged: 2021-11-22 17:06:55 UTC; paulb Author: Paul-Christian Bürkner [aut, cre], Jonah Gabry [ctb], Sebastian Weber [ctb], Andrew Johnson [ctb], Martin Modrak [ctb], Hamada S. Badr [ctb], Frank Weber [ctb], Mattan S. Ben-Shachar [ctb] Maintainer: Paul-Christian Bürkner Repository: CRAN Date/Publication: 2021-11-22 19:50:02 UTC brms/README.md0000644000175000017500000003563514127555444012655 0ustar nileshnilesh brms Logo[Stan Logo](https://mc-stan.org/) # brms [![Build Status](https://travis-ci.org/paul-buerkner/brms.svg?branch=master)](https://travis-ci.org/paul-buerkner/brms) [![Coverage Status](https://codecov.io/github/paul-buerkner/brms/coverage.svg?branch=master)](https://codecov.io/github/paul-buerkner/brms?branch=master) [![CRAN Version](https://www.r-pkg.org/badges/version/brms)](https://cran.r-project.org/package=brms) [![Downloads](https://cranlogs.r-pkg.org/badges/brms?color=brightgreen)](https://CRAN.R-project.org/package=brms) ## Overview The **brms** package provides an interface to fit Bayesian generalized (non-)linear multivariate multilevel models using Stan, which is a C++ package for performing full Bayesian inference (see ). The formula syntax is very similar to that of the package lme4 to provide a familiar and simple interface for performing regression analyses. A wide range of response distributions are supported, allowing users to fit – among others – linear, robust linear, count data, survival, response times, ordinal, zero-inflated, and even self-defined mixture models all in a multilevel context. Further modeling options include non-linear and smooth terms, auto-correlation structures, censored data, missing value imputation, and quite a few more. In addition, all parameters of the response distribution can be predicted in order to perform distributional regression. Multivariate models (i.e., models with multiple response variables) can be fit, as well. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. Model fit can easily be assessed and compared with posterior predictive checks, cross-validation, and Bayes factors. ## Resources - [Introduction to brms](https://doi.org/10.18637/jss.v080.i01) (Journal of Statistical Software) - [Advanced multilevel modeling with brms](https://journal.r-project.org/archive/2018/RJ-2018-017/index.html) (The R Journal) - [Website](https://paul-buerkner.github.io/brms/) (Website of brms with documentation and vignettes) - [Blog posts](https://paul-buerkner.github.io/blog/brms-blogposts/) (List of blog posts about brms) - [Ask a question](https://discourse.mc-stan.org/) (Stan Forums on Discourse) - [Open an issue](https://github.com/paul-buerkner/brms/issues) (GitHub issues for bug reports and feature requests) ## How to use brms ``` r library(brms) ``` As a simple example, we use poisson regression to model the seizure counts in epileptic patients to investigate whether the treatment (represented by variable `Trt`) can reduce the seizure counts and whether the effect of the treatment varies with the (standardized) baseline number of seizures a person had before treatment (variable `zBase`). As we have multiple observations per person, a group-level intercept is incorporated to account for the resulting dependency in the data. ``` r fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) ``` The results (i.e., posterior draws) can be investigated using ``` r summary(fit1) #> Family: poisson #> Links: mu = log #> Formula: count ~ zAge + zBase * Trt + (1 | patient) #> Data: epilepsy (Number of observations: 236) #> Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; #> total post-warmup samples = 4000 #> #> Group-Level Effects: #> ~patient (Number of levels: 59) #> Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS #> sd(Intercept) 0.58 0.07 0.46 0.74 1.00 810 1753 #> #> Population-Level Effects: #> Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS #> Intercept 1.77 0.12 1.53 2.00 1.00 779 1319 #> zAge 0.09 0.09 -0.09 0.26 1.00 684 1071 #> zBase 0.70 0.12 0.46 0.95 1.00 847 1453 #> Trt1 -0.27 0.17 -0.59 0.06 1.00 661 1046 #> zBase:Trt1 0.05 0.16 -0.26 0.37 1.00 993 1624 #> #> Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS #> and Tail_ESS are effective sample size measures, and Rhat is the potential #> scale reduction factor on split chains (at convergence, Rhat = 1). ``` On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and (in case of more than one group-level effect per grouping factor; not displayed here) correlations between group-level effects. On the bottom of the output, population-level effects (i.e. regression coefficients) are displayed. If incorporated, autocorrelation effects and family specific parameters (e.g. the residual standard deviation ‘sigma’ in normal models) are also given. In general, every parameter is summarized using the mean (‘Estimate’) and the standard deviation (‘Est.Error’) of the posterior distribution as well as two-sided 95% credible intervals (‘l-95% CI’ and ‘u-95% CI’) based on quantiles. We see that the coefficient of `Trt` is negative with a zero overlapping 95%-CI. This indicates that, on average, the treatment may reduce seizure counts by some amount but the evidence based on the data and applied model is not very strong and still insufficient by standard decision rules. Further, we find little evidence that the treatment effect varies with the baseline number of seizures. The last two values (‘Eff.Sample’ and ‘Rhat’) provide information on how well the algorithm could estimate the posterior distribution of this parameter. If ‘Rhat’ is considerably greater than 1, the algorithm has not yet converged and it is necessary to run more iterations and / or set stronger priors. To visually investigate the chains as well as the posterior distributions, we can use the `plot` method. If we just want to see results of the regression coefficients of `Trt` and `zBase`, we go for ``` r plot(fit1, variable = c("b_Trt1", "b_zBase")) ``` A more detailed investigation can be performed by running `launch_shinystan(fit1)`. To better understand the relationship of the predictors with the response, I recommend the `conditional_effects` method: ``` r plot(conditional_effects(fit1, effects = "zBase:Trt")) ``` This method uses some prediction functionality behind the scenes, which can also be called directly. Suppose that we want to predict responses (i.e. seizure counts) of a person in the treatment group (`Trt = 1`) and in the control group (`Trt = 0`) with average age and average number of previous seizures. Than we can use ``` r newdata <- data.frame(Trt = c(0, 1), zAge = 0, zBase = 0) predict(fit1, newdata = newdata, re_formula = NA) #> Estimate Est.Error Q2.5 Q97.5 #> [1,] 5.8980 2.505627 2 11 #> [2,] 4.5595 2.162320 1 9 ``` We need to set `re_formula = NA` in order not to condition of the group-level effects. While the `predict` method returns predictions of the responses, the `fitted` method returns predictions of the regression line. ``` r fitted(fit1, newdata = newdata, re_formula = NA) #> Estimate Est.Error Q2.5 Q97.5 #> [1,] 5.917144 0.7056695 4.632004 7.387471 #> [2,] 4.529949 0.5360204 3.544085 5.624005 ``` Both methods return the same estimate (up to random error), while the latter has smaller variance, because the uncertainty in the regression line is smaller than the uncertainty in each response. If we want to predict values of the original data, we can just leave the `newdata` argument empty. Suppose, we want to investigate whether there is overdispersion in the model, that is residual variation not accounted for by the response distribution. For this purpose, we include a second group-level intercept that captures possible overdispersion. ``` r fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) ``` We can then go ahead and compare both models via approximate leave-one-out (LOO) cross-validation. ``` r loo(fit1, fit2) #> Output of model 'fit1': #> #> Computed from 4000 by 236 log-likelihood matrix #> #> Estimate SE #> elpd_loo -670.4 36.7 #> p_loo 92.8 14.3 #> looic 1340.8 73.3 #> ------ #> Monte Carlo SE of elpd_loo is NA. #> #> Pareto k diagnostic values: #> Count Pct. Min. n_eff #> (-Inf, 0.5] (good) 214 90.7% 251 #> (0.5, 0.7] (ok) 17 7.2% 80 #> (0.7, 1] (bad) 3 1.3% 81 #> (1, Inf) (very bad) 2 0.8% 6 #> See help('pareto-k-diagnostic') for details. #> #> Output of model 'fit2': #> #> Computed from 4000 by 236 log-likelihood matrix #> #> Estimate SE #> elpd_loo -595.2 14.1 #> p_loo 108.0 7.3 #> looic 1190.4 28.2 #> ------ #> Monte Carlo SE of elpd_loo is NA. #> #> Pareto k diagnostic values: #> Count Pct. Min. n_eff #> (-Inf, 0.5] (good) 82 34.7% 544 #> (0.5, 0.7] (ok) 103 43.6% 153 #> (0.7, 1] (bad) 47 19.9% 22 #> (1, Inf) (very bad) 4 1.7% 7 #> See help('pareto-k-diagnostic') for details. #> #> Model comparisons: #> elpd_diff se_diff #> fit2 0.0 0.0 #> fit1 -75.2 26.9 ``` The `loo` output when comparing models is a little verbose. We first see the individual LOO summaries of the two models and then the comparison between them. Since higher `elpd` (i.e., expected log posterior density) values indicate better fit, we see that the model accounting for overdispersion (i.e., `fit2`) fits substantially better. However, we also see in the individual LOO outputs that there are several problematic observations for which the approximations may have not have been very accurate. To deal with this appropriately, we need to fall back to other methods such as `reloo` or `kfold` but this requires the model to be refit several times which takes too long for the purpose of a quick example. The post-processing methods we have shown above are just the tip of the iceberg. For a full list of methods to apply on fitted model objects, type `methods(class = "brmsfit")`. ## Citing brms and related software Developing and maintaining open source software is an important yet often underappreciated contribution to scientific progress. Thus, whenever you are using open source software (or software in general), please make sure to cite it appropriately so that developers get credit for their work. When using brms, please cite one or more of the following publications: - Bürkner P. C. (2017). brms: An R Package for Bayesian Multilevel Models using Stan. *Journal of Statistical Software*. 80(1), 1-28. doi.org/10.18637/jss.v080.i01 - Bürkner P. C. (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. *The R Journal*. 10(1), 395-411. doi.org/10.32614/RJ-2018-017 As brms is a high-level interface to Stan, please additionally cite Stan: - Carpenter B., Gelman A., Hoffman M. D., Lee D., Goodrich B., Betancourt M., Brubaker M., Guo J., Li P., and Riddell A. (2017). Stan: A probabilistic programming language. *Journal of Statistical Software*. 76(1). 10.18637/jss.v076.i01 Further, brms relies on several other R packages and, of course, on R itself. To find out how to cite R and its packages, use the `citation` function. There are some features of brms which specifically rely on certain packages. The **rstan** package together with **Rcpp** makes Stan conveniently accessible in R. Visualizations and posterior-predictive checks are based on **bayesplot** and **ggplot2**. Approximate leave-one-out cross-validation using `loo` and related methods is done via the **loo** package. Marginal likelihood based methods such as `bayes_factor` are realized by means of the **bridgesampling** package. Splines specified via the `s` and `t2` functions rely on **mgcv**. If you use some of these features, please also consider citing the related packages. ## FAQ ### How do I install brms? To install the latest release version from CRAN use ``` r install.packages("brms") ``` The current developmental version can be downloaded from github via ``` r if (!requireNamespace("remotes")) { install.packages("remotes") } remotes::install_github("paul-buerkner/brms") ``` Because brms is based on Stan, a C++ compiler is required. The program Rtools (available on ) comes with a C++ compiler for Windows. On Mac, you should install Xcode. For further instructions on how to get the compilers running, see the prerequisites section on . ### I am new to brms. Where can I start? Detailed instructions and case studies are given in the package’s extensive vignettes. See `vignette(package = "brms")` for an overview. For documentation on formula syntax, families, and prior distributions see `help("brm")`. ### Where do I ask questions, propose a new feature, or report a bug? Questions can be asked on the [Stan forums](https://discourse.mc-stan.org/) on Discourse. To propose a new feature or report a bug, please open an issue on [GitHub](https://github.com/paul-buerkner/brms). ### How can I extract the generated Stan code? If you have already fitted a model, just apply the `stancode` method on the fitted model object. If you just want to generate the Stan code without any model fitting, use the `make_stancode` function. ### Can I avoid compiling models? When you fit your model for the first time with brms, there is currently no way to avoid compilation. However, if you have already fitted your model and want to run it again, for instance with more draws, you can do this without recompilation by using the `update` method. For more details see `help("update.brmsfit")`. ### What is the difference between brms and rstanarm? The rstanarm package is similar to brms in that it also allows to fit regression models using Stan for the backend estimation. Contrary to brms, rstanarm comes with precompiled code to save the compilation time (and the need for a C++ compiler) when fitting a model. However, as brms generates its Stan code on the fly, it offers much more flexibility in model specification than rstanarm. Also, multilevel models are currently fitted a bit more efficiently in brms. For detailed comparisons of brms with other common R packages implementing multilevel models, see `vignette("brms_multilevel")` and `vignette("brms_overview")`. brms/man/0000755000175000017500000000000014136566261012134 5ustar nileshnileshbrms/man/R2D2.Rd0000644000175000017500000000256614105230573013073 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{R2D2} \alias{R2D2} \title{R2-D2 Priors in \pkg{brms}} \usage{ R2D2(mean_R2 = 0.5, prec_R2 = 2, cons_D2 = 1, autoscale = TRUE) } \arguments{ \item{mean_R2}{mean of the Beta prior on the coefficient of determination R^2.} \item{prec_R2}{precision of the Beta prior on the coefficient of determination R^2.} \item{cons_D2}{concentration vector of the Dirichlet prior on the variance decomposition parameters.} \item{autoscale}{Logical; indicating whether the horseshoe prior should be scaled using the residual standard deviation \code{sigma} if possible and sensible (defaults to \code{TRUE}). Autoscaling is not applied for distributional parameters or when the model does not contain the parameter \code{sigma}.} } \description{ Function used to set up R2D2 priors for population-level effects in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up the model. } \examples{ set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10)) } \references{ Zhang, Y. D., Naughton, B. P., Bondell, H. D., & Reich, B. J. (2020). Bayesian regression using a prior on the model fit: The R2-D2 shrinkage prior. Journal of the American Statistical Association. \url{https://arxiv.org/pdf/1609.00046.pdf} } \seealso{ \code{\link{set_prior}} } brms/man/brm_multiple.Rd0000644000175000017500000002371314111751667015123 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm_multiple.R \name{brm_multiple} \alias{brm_multiple} \title{Run the same \pkg{brms} model on multiple datasets} \usage{ brm_multiple( formula, data, family = gaussian(), prior = NULL, data2 = NULL, autocor = NULL, cov_ranef = NULL, sample_prior = c("no", "yes", "only"), sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, silent = 1, recompile = FALSE, combine = TRUE, fit = NA, seed = NA, file = NULL, file_refit = "never", ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{A \emph{list} of data.frames each of which will be used to fit a separate model. Alternatively, a \code{mids} object from the \pkg{mice} package.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{data2}{A \emph{list} of named lists each of which will be used to fit a separate model. Each of the named lists contains objects representing data which cannot be passed via argument \code{data} (see \code{\link{brm}} for examples). The length of the outer list should match the length of the list passed to the \code{data} argument.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{silent}{Verbosity level between \code{0} and \code{2}. If \code{1} (the default), most of the informational messages of compiler and sampler are suppressed. If \code{2}, even more messages are suppressed. The actual sampling progress is still printed. Set \code{refresh = 0} to turn this off as well. If using \code{backend = "rstan"} you can also set \code{open_progress = FALSE} to prevent opening additional progress bars.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled for every imputed data set. Defaults to \code{FALSE}. If \code{NULL}, \code{brm_multiple} tries to figure out internally, if recompilation is necessary, for example because data-dependent priors have changed. Using the default of no recompilation should be fine in most cases.} \item{combine}{Logical; Indicates if the fitted models should be combined into a single fitted model object via \code{\link{combine_models}}. Defaults to \code{TRUE}.} \item{fit}{An instance of S3 class \code{brmsfit_multiple} derived from a previous fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit_multiple}, the compiled model associated with the fitted result is re-used and all arguments modifying the model code or data are ignored. It is not recommended to use this argument directly, but to call the \code{\link[brms:update.brmsfit_multiple]{update}} method, instead.} \item{seed}{The seed for random number generation to make results reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed randomly.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If the file already exists, \code{brm} will load and return the saved model object instead of refitting the model. Unless you specify the \code{file_refit} argument as well, the existing files won't be overwritten, you have to manually remove the file in order to refit and save the model under an existing file name. The file name is stored in the \code{brmsfit} object for later usage.} \item{file_refit}{Modifies when the fit stored via the \code{file} parameter is re-used. Can be set globally for the current \R session via the \code{"brms.file_refit"} option (see \code{\link{options}}). For \code{"never"} (default) the fit is always loaded if it exists and fitting is skipped. For \code{"always"} the model is always refitted. If set to \code{"on_change"}, brms will refit the model if model, data or algorithm as passed to Stan differ from what is stored in the file. This also covers changes in priors, \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you believe there was a false positive, you can use \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. Refit will not be triggered for changes in additional parameters of the fit (e.g., initial values, number of iterations, control arguments, ...). A known limitation is that a refit will be triggered if within-chain parallelization is switched on/off.} \item{...}{Further arguments passed to \code{\link{brm}}.} } \value{ If \code{combine = TRUE} a \code{brmsfit_multiple} object, which inherits from class \code{brmsfit} and behaves essentially the same. If \code{combine = FALSE} a list of \code{brmsfit} objects. } \description{ Run the same \pkg{brms} model on multiple datasets and then combine the results into one fitted model object. This is useful in particular for multiple missing value imputation, where the same model is fitted on multiple imputed data sets. Models can be run in parallel using the \pkg{future} package. } \details{ The combined model may issue false positive convergence warnings, as the MCMC chains corresponding to different datasets may not necessarily overlap, even if each of the original models did converge. To find out whether each of the original models converged, investigate \code{fit$rhats}, where \code{fit} denotes the output of \code{brm_multiple}. } \examples{ \dontrun{ library(mice) imp <- mice(nhanes2) # fit the model using mice and lm fit_imp1 <- with(lm(bmi ~ age + hyp + chl), data = imp) summary(pool(fit_imp1)) # fit the model using brms fit_imp2 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) summary(fit_imp2) plot(fit_imp2, pars = "^b_") # investigate convergence of the original models fit_imp2$rhats # use the future package for parallelization library(future) plan(multiprocess) fit_imp3 <- brm_multiple(bmi~age+hyp+chl, data = imp, chains = 1) summary(fit_imp3) } } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/make_conditions.Rd0000644000175000017500000000217313701270367015570 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{make_conditions} \alias{make_conditions} \title{Prepare Fully Crossed Conditions} \usage{ make_conditions(x, vars, ...) } \arguments{ \item{x}{An \R object from which to extract the variables that should be part of the conditions.} \item{vars}{Names of the variables that should be part of the conditions.} \item{...}{Arguments passed to \code{\link{rows2labels}}.} } \value{ A \code{data.frame} where each row indicates a condition. } \description{ This is a helper function to prepare fully crossed conditions primarily for use with the \code{conditions} argument of \code{\link{conditional_effects}}. Automatically creates labels for each row in the \code{cond__} column. } \details{ For factor like variables, all levels are used as conditions. For numeric variables, \code{mean + (-1:1) * SD} are used as conditions. } \examples{ df <- data.frame(x = c("a", "b"), y = rnorm(10)) make_conditions(df, vars = c("x", "y")) } \seealso{ \code{\link{conditional_effects}}, \code{\link{rows2labels}} } brms/man/make_standata.Rd0000644000175000017500000001250614111751667015222 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_standata.R \name{make_standata} \alias{make_standata} \title{Data for \pkg{brms} Models} \usage{ make_standata( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", stanvars = NULL, threads = NULL, knots = NULL, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{...}{Other arguments for internal use.} } \value{ A named list of objects containing the required data to fit a \pkg{brms} model with \pkg{Stan}. } \description{ Generate data for \pkg{brms} models to be passed to \pkg{Stan} } \examples{ sdata1 <- make_standata(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") str(sdata1) sdata2 <- make_standata(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") str(sdata2) } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/draws-brms.Rd0000644000175000017500000000501014111751667014477 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{draws-brms} \alias{draws-brms} \alias{as_draws} \alias{as_draws_matrix} \alias{as_draws_array} \alias{as_draws_df} \alias{as_draws_rvars} \alias{as_draws_list} \alias{as_draws.brmsfit} \alias{as_draws_matrix.brmsfit} \alias{as_draws_array.brmsfit} \alias{as_draws_df.brmsfit} \alias{as_draws_list.brmsfit} \alias{as_draws_rvars.brmsfit} \title{Transform \code{brmsfit} to \code{draws} objects} \usage{ \method{as_draws}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_matrix}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_array}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_df}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_list}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) \method{as_draws_rvars}{brmsfit}(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{regex}{Logical; Should variable should be treated as a (vector of) regular expressions? Any variable in \code{x} matching at least one of the regular expressions will be selected. Defaults to \code{FALSE}.} \item{inc_warmup}{Should warmup draws be included? Defaults to \code{FALSE}.} \item{...}{Arguments passed to individual methods (if applicable).} } \description{ Transform a \code{brmsfit} object to a format supported by the \pkg{posterior} package. } \details{ To subset iterations, chains, or draws, use the \code{\link[posterior:subset_draws]{subset_draws}} method after transforming the \code{brmsfit} to a \code{draws} object. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # extract posterior draws in an array format (draws_fit <- as_draws_array(fit)) posterior::summarize_draws(draws_fit) # extract only certain variables as_draws_array(fit, variable = "r_patient") as_draws_array(fit, variable = "^b_", regex = TRUE) # extract posterior draws in a random variables format as_draws_rvars(fit) } } \seealso{ \code{\link[posterior:draws]{draws}} \code{\link[posterior:subset_draws]{subset_draws}} } brms/man/posterior_summary.Rd0000644000175000017500000000373214111751667016232 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_summary} \alias{posterior_summary} \alias{posterior_summary.default} \alias{posterior_summary.brmsfit} \title{Summarize Posterior draws} \usage{ posterior_summary(x, ...) \method{posterior_summary}{default}(x, probs = c(0.025, 0.975), robust = FALSE, ...) \method{posterior_summary}{brmsfit}( x, pars = NA, variable = NULL, probs = c(0.025, 0.975), robust = FALSE, ... ) } \arguments{ \item{x}{An \R object.} \item{...}{More arguments passed to or from other methods.} \item{probs}{The percentiles to be computed by the \code{\link[stats:quantile]{quantile}} function.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} } \value{ A matrix where rows indicate variables and columns indicate the summary estimates. } \description{ Summarizes posterior draws based on point estimates (mean or median), estimation errors (SD or MAD) and quantiles. This function mainly exists to retain backwards compatibility. It will eventually be replaced by functions of the \pkg{posterior} package (see examples below). } \examples{ \dontrun{ fit <- brm(time ~ age * sex, data = kidney) posterior_summary(fit) # recommended workflow using posterior library(posterior) draws <- as_draws_array(fit) summarise_draws(draws, default_summary_measures()) } } \seealso{ \code{\link[posterior:summarize_draws]{summarize_draws}} } brms/man/mo.Rd0000644000175000017500000000420314111751667013034 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{mo} \alias{mo} \title{Monotonic Predictors in \pkg{brms} Models} \usage{ mo(x, id = NA) } \arguments{ \item{x}{An integer variable or an ordered factor to be modeled as monotonic.} \item{id}{Optional character string. All monotonic terms with the same \code{id} within one formula will be modeled as having the same simplex (shape) parameter vector. If all monotonic terms of the same predictor have the same \code{id}, the resulting predictions will be conditionally monotonic for all values of interacting covariates (Bürkner & Charpentier, 2020).} } \description{ Specify a monotonic predictor term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model. } \details{ See Bürkner and Charpentier (2020) for the underlying theory. For detailed documentation of the formula syntax used for monotonic terms, see \code{help(brmsformula)} as well as \code{vignette("brms_monotonic")}. } \examples{ \dontrun{ # generate some data income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) # fit a simple monotonic model fit1 <- brm(ls ~ mo(income), data = dat) summary(fit1) plot(fit1, N = 6) plot(conditional_effects(fit1), points = TRUE) # model interaction with other variables dat$x <- sample(c("a", "b", "c"), 100, TRUE) fit2 <- brm(ls ~ mo(income)*x, data = dat) summary(fit2) plot(conditional_effects(fit2), points = TRUE) # ensure conditional monotonicity fit3 <- brm(ls ~ mo(income, id = "i")*x, data = dat) summary(fit3) plot(conditional_effects(fit3), points = TRUE) } } \references{ Bürkner P. C. & Charpentier E. (2020). Modeling Monotonic Effects of Ordinal Predictors in Regression Models. British Journal of Mathematical and Statistical Psychology. doi:10.1111/bmsp.12195 } \seealso{ \code{\link{brmsformula}} } brms/man/brmsfamily.Rd0000644000175000017500000003127514105230573014566 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{brmsfamily} \alias{brmsfamily} \alias{student} \alias{bernoulli} \alias{negbinomial} \alias{geometric} \alias{lognormal} \alias{shifted_lognormal} \alias{skew_normal} \alias{exponential} \alias{weibull} \alias{frechet} \alias{gen_extreme_value} \alias{exgaussian} \alias{wiener} \alias{Beta} \alias{dirichlet} \alias{von_mises} \alias{asym_laplace} \alias{cox} \alias{hurdle_poisson} \alias{hurdle_negbinomial} \alias{hurdle_gamma} \alias{hurdle_lognormal} \alias{zero_inflated_beta} \alias{zero_one_inflated_beta} \alias{zero_inflated_poisson} \alias{zero_inflated_negbinomial} \alias{zero_inflated_binomial} \alias{categorical} \alias{multinomial} \alias{cumulative} \alias{sratio} \alias{cratio} \alias{acat} \title{Special Family Functions for \pkg{brms} Models} \usage{ brmsfamily( family, link = NULL, link_sigma = "log", link_shape = "log", link_nu = "logm1", link_phi = "log", link_kappa = "log", link_beta = "log", link_zi = "logit", link_hu = "logit", link_zoi = "logit", link_coi = "logit", link_disc = "log", link_bs = "log", link_ndt = "log", link_bias = "logit", link_xi = "log1p", link_alpha = "identity", link_quantile = "logit", threshold = "flexible", refcat = NULL, bhaz = NULL ) student(link = "identity", link_sigma = "log", link_nu = "logm1") bernoulli(link = "logit") negbinomial(link = "log", link_shape = "log") geometric(link = "log") lognormal(link = "identity", link_sigma = "log") shifted_lognormal(link = "identity", link_sigma = "log", link_ndt = "log") skew_normal(link = "identity", link_sigma = "log", link_alpha = "identity") exponential(link = "log") weibull(link = "log", link_shape = "log") frechet(link = "log", link_nu = "logm1") gen_extreme_value(link = "identity", link_sigma = "log", link_xi = "log1p") exgaussian(link = "identity", link_sigma = "log", link_beta = "log") wiener( link = "identity", link_bs = "log", link_ndt = "log", link_bias = "logit" ) Beta(link = "logit", link_phi = "log") dirichlet(link = "logit", link_phi = "log", refcat = NULL) von_mises(link = "tan_half", link_kappa = "log") asym_laplace(link = "identity", link_sigma = "log", link_quantile = "logit") cox(link = "log", bhaz = NULL) hurdle_poisson(link = "log") hurdle_negbinomial(link = "log", link_shape = "log", link_hu = "logit") hurdle_gamma(link = "log", link_shape = "log", link_hu = "logit") hurdle_lognormal(link = "identity", link_sigma = "log", link_hu = "logit") zero_inflated_beta(link = "logit", link_phi = "log", link_zi = "logit") zero_one_inflated_beta( link = "logit", link_phi = "log", link_zoi = "logit", link_coi = "logit" ) zero_inflated_poisson(link = "log", link_zi = "logit") zero_inflated_negbinomial(link = "log", link_shape = "log", link_zi = "logit") zero_inflated_binomial(link = "logit", link_zi = "logit") categorical(link = "logit", refcat = NULL) multinomial(link = "logit", refcat = NULL) cumulative(link = "logit", link_disc = "log", threshold = "flexible") sratio(link = "logit", link_disc = "log", threshold = "flexible") cratio(link = "logit", link_disc = "log", threshold = "flexible") acat(link = "logit", link_disc = "log", threshold = "flexible") } \arguments{ \item{family}{A character string naming the distribution of the response variable be used in the model. Currently, the following families are supported: \code{gaussian}, \code{student}, \code{binomial}, \code{bernoulli}, \code{poisson}, \code{negbinomial}, \code{geometric}, \code{Gamma}, \code{skew_normal}, \code{lognormal}, \code{shifted_lognormal}, \code{exgaussian}, \code{wiener}, \code{inverse.gaussian}, \code{exponential}, \code{weibull}, \code{frechet}, \code{Beta}, \code{dirichlet}, \code{von_mises}, \code{asym_laplace}, \code{gen_extreme_value}, \code{categorical}, \code{multinomial}, \code{cumulative}, \code{cratio}, \code{sratio}, \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, \code{zero_inflated_negbinomial}, \code{zero_inflated_poisson}, and \code{zero_one_inflated_beta}.} \item{link}{A specification for the model link function. This can be a name/expression or character string. See the 'Details' section for more information on link functions supported by each family.} \item{link_sigma}{Link of auxiliary parameter \code{sigma} if being predicted.} \item{link_shape}{Link of auxiliary parameter \code{shape} if being predicted.} \item{link_nu}{Link of auxiliary parameter \code{nu} if being predicted.} \item{link_phi}{Link of auxiliary parameter \code{phi} if being predicted.} \item{link_kappa}{Link of auxiliary parameter \code{kappa} if being predicted.} \item{link_beta}{Link of auxiliary parameter \code{beta} if being predicted.} \item{link_zi}{Link of auxiliary parameter \code{zi} if being predicted.} \item{link_hu}{Link of auxiliary parameter \code{hu} if being predicted.} \item{link_zoi}{Link of auxiliary parameter \code{zoi} if being predicted.} \item{link_coi}{Link of auxiliary parameter \code{coi} if being predicted.} \item{link_disc}{Link of auxiliary parameter \code{disc} if being predicted.} \item{link_bs}{Link of auxiliary parameter \code{bs} if being predicted.} \item{link_ndt}{Link of auxiliary parameter \code{ndt} if being predicted.} \item{link_bias}{Link of auxiliary parameter \code{bias} if being predicted.} \item{link_xi}{Link of auxiliary parameter \code{xi} if being predicted.} \item{link_alpha}{Link of auxiliary parameter \code{alpha} if being predicted.} \item{link_quantile}{Link of auxiliary parameter \code{quantile} if being predicted.} \item{threshold}{A character string indicating the type of thresholds (i.e. intercepts) used in an ordinal model. \code{"flexible"} provides the standard unstructured thresholds, \code{"equidistant"} restricts the distance between consecutive thresholds to the same value, and \code{"sum_to_zero"} ensures the thresholds sum to zero.} \item{refcat}{Optional name of the reference response category used in categorical, multinomial, and dirichlet models. If \code{NULL} (the default), the first category is used as the reference. If \code{NA}, all categories will be predicted, which requires strong priors or carefully specified predictor terms in order to lead to an identified model.} \item{bhaz}{Currently for experimental purposes only.} } \description{ Family objects provide a convenient way to specify the details of the models used by many model fitting functions. The family functions presented here are for use with \pkg{brms} only and will **not** work with other model fitting functions such as \code{glm} or \code{glmer}. However, the standard family functions as described in \code{\link[stats:family]{family}} will work with \pkg{brms}. You can also specify custom families for use in \pkg{brms} with the \code{\link{custom_family}} function. } \details{ Below, we list common use cases for the different families. This list is not ment to be exhaustive. \itemize{ \item{Family \code{gaussian} can be used for linear regression.} \item{Family \code{student} can be used for robust linear regression that is less influenced by outliers.} \item{Family \code{skew_normal} can handle skewed responses in linear regression.} \item{Families \code{poisson}, \code{negbinomial}, and \code{geometric} can be used for regression of unbounded count data.} \item{Families \code{bernoulli} and \code{binomial} can be used for binary regression (i.e., most commonly logistic regression).} \item{Families \code{categorical} and \code{multinomial} can be used for multi-logistic regression when there are more than two possible outcomes.} \item{Families \code{cumulative}, \code{cratio} ('continuation ratio'), \code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category') leads to ordinal regression.} \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, \code{lognormal}, \code{frechet}, \code{inverse.gaussian}, and \code{cox} (Cox proportional hazards model) can be used (among others) for time-to-event regression also known as survival regression.} \item{Families \code{weibull}, \code{frechet}, and \code{gen_extreme_value} ('generalized extreme value') allow for modeling extremes.} \item{Families \code{beta} and \code{dirichlet} can be used to model responses representing rates or probabilities.} \item{Family \code{asym_laplace} allows for quantile regression when fixing the auxiliary \code{quantile} parameter to the quantile of interest.} \item{Family \code{exgaussian} ('exponentially modified Gaussian') and \code{shifted_lognormal} are especially suited to model reaction times.} \item{Family \code{wiener} provides an implementation of the Wiener diffusion model. For this family, the main formula predicts the drift parameter 'delta' and all other parameters are modeled as auxiliary parameters (see \code{\link{brmsformula}} for details).} \item{Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, and \code{zero_one_inflated_beta} allow to estimate zero-inflated and hurdle models. These models can be very helpful when there are many zeros in the data (or ones in case of one-inflated models) that cannot be explained by the primary distribution of the response.} } Below, we list all possible links for each family. The first link mentioned for each family is the default. \itemize{ \item{Families \code{gaussian}, \code{student}, \code{skew_normal}, \code{exgaussian}, \code{asym_laplace}, and \code{gen_extreme_value} support the links (as names) \code{identity}, \code{log}, \code{inverse}, and \code{softplus}.} \item{Families \code{poisson}, \code{negbinomial}, \code{geometric}, \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{hurdle_poisson}, and \code{hurdle_negbinomial} support \code{log}, \code{identity}, \code{sqrt}, and \code{softplus}.} \item{Families \code{binomial}, \code{bernoulli}, \code{Beta}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, and \code{zero_one_inflated_beta} support \code{logit}, \code{probit}, \code{probit_approx}, \code{cloglog}, \code{cauchit}, and \code{identity}.} \item{Families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat} support \code{logit}, \code{probit}, \code{probit_approx}, \code{cloglog}, and \code{cauchit}.} \item{Families \code{categorical}, \code{multinomial}, and \code{dirichlet} support \code{logit}.} \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, \code{frechet}, and \code{hurdle_gamma} support \code{log}, \code{identity}, \code{inverse}, and \code{softplus}.} \item{Families \code{lognormal} and \code{hurdle_lognormal} support \code{identity} and \code{inverse}.} \item{Family \code{inverse.gaussian} supports \code{1/mu^2}, \code{inverse}, \code{identity}, \code{log}, and \code{softplus}.} \item{Family \code{von_mises} supports \code{tan_half} and \code{identity}.} \item{Family \code{cox} supports \code{log}, \code{identity}, and \code{softplus} for the proportional hazards parameter.} \item{Family \code{wiener} supports \code{identity}, \code{log}, and \code{softplus} for the main parameter which represents the drift rate.} } Please note that when calling the \code{\link[stats:family]{Gamma}} family function of the \pkg{stats} package, the default link will be \code{inverse} instead of \code{log} although the latter is the default in \pkg{brms}. Also, when using the family functions \code{gaussian}, \code{binomial}, \code{poisson}, and \code{Gamma} of the \pkg{stats} package (see \code{\link[stats:family]{family}}), special link functions such as \code{softplus} or \code{cauchit} won't work. In this case, you have to use \code{brmsfamily} to specify the family with corresponding link function. } \examples{ # create a family object (fam1 <- student("log")) # alternatively use the brmsfamily function (fam2 <- brmsfamily("student", "log")) # both leads to the same object identical(fam1, fam2) } \seealso{ \code{\link[brms:brm]{brm}}, \code{\link[stats:family]{family}}, \code{\link{customfamily}} } brms/man/is.brmsprior.Rd0000644000175000017500000000050513661463272015054 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{is.brmsprior} \alias{is.brmsprior} \title{Checks if argument is a \code{brmsprior} object} \usage{ is.brmsprior(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsprior} object } brms/man/predictive_interval.brmsfit.Rd0000644000175000017500000000165613701270367020136 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{predictive_interval.brmsfit} \alias{predictive_interval.brmsfit} \alias{predictive_interval} \title{Predictive Intervals} \usage{ \method{predictive_interval}{brmsfit}(object, prob = 0.9, ...) } \arguments{ \item{object}{An \R object of class \code{brmsfit}.} \item{prob}{A number p (0 < p < 1) indicating the desired probability mass to include in the intervals. Defaults to \code{0.9}.} \item{...}{Further arguments passed to \code{\link{posterior_predict}}.} } \value{ A matrix with 2 columns for the lower and upper bounds of the intervals, respectively, and as many rows as observations being predicted. } \description{ Compute intervals from the posterior predictive distribution. } \examples{ \dontrun{ fit <- brm(count ~ zBase, data = epilepsy, family = poisson()) predictive_interval(fit) } } brms/man/cor_bsts.Rd0000644000175000017500000000162513701270367014241 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_bsts} \alias{cor_bsts} \title{(Defunct) Basic Bayesian Structural Time Series} \usage{ cor_bsts(formula = ~1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} } \description{ The BSTS correlation structure is no longer supported. } \keyword{internal} brms/man/threading.Rd0000644000175000017500000000461014105230573014357 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{threading} \alias{threading} \title{Threading in Stan} \usage{ threading(threads = NULL, grainsize = NULL, static = FALSE) } \arguments{ \item{threads}{Number of threads to use in within-chain parallelization.} \item{grainsize}{Number of observations evaluated together in one chunk on one of the CPUs used for threading. If \code{NULL} (the default), \code{grainsize} is currently chosen as \code{max(100, N / (2 * threads))}, where \code{N} is the number of observations in the data. This default is experimental and may change in the future without prior notice.} \item{static}{Logical. Apply the static (non-adaptive) version of \code{reduce_sum}? Defaults to \code{FALSE}. Setting it to \code{TRUE} is required to achieve exact reproducibility of the model results (if the random seed is set as well).} } \value{ A \code{brmsthreads} object which can be passed to the \code{threads} argument of \code{brm} and related functions. } \description{ Use threads for within-chain parallelization in \pkg{Stan} via the \pkg{brms} interface. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means. } \details{ The adaptive scheduling procedure used by \code{reduce_sum} will prevent the results to be exactly reproducible even if you set the random seed. If you need exact reproducibility, you have to set argument \code{static = TRUE} which may reduce efficiency a bit. To ensure that chunks (whose size is defined by \code{grainsize}) require roughly the same amount of computing time, we recommend storing observations in random order in the data. At least, please avoid sorting observations after the response values. This is because the latter often cause variations in the computing time of the pointwise log-likelihood, which makes up a big part of the parallelized code. } \examples{ \dontrun{ # this model just serves as an illustration # threading may not actually speed things up here fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = negbinomial(), chains = 1, threads = threading(2, grainsize = 100), backend = "cmdstanr") summary(fit) } } brms/man/set_prior.Rd0000644000175000017500000004466314105230573014434 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{set_prior} \alias{set_prior} \alias{brmsprior} \alias{brmsprior-class} \alias{prior} \alias{prior_} \alias{prior_string} \alias{empty_prior} \title{Prior Definitions for \pkg{brms} Models} \usage{ set_prior( prior, class = "b", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = NA, ub = NA, check = TRUE ) prior(prior, ...) prior_(prior, ...) prior_string(prior, ...) empty_prior() } \arguments{ \item{prior}{A character string defining a distribution in \pkg{Stan} language} \item{class}{The parameter class. Defaults to \code{"b"} (i.e. population-level effects). See 'Details' for other valid parameter classes.} \item{coef}{Name of the coefficient within the parameter class.} \item{group}{Grouping factor for group-level parameters.} \item{resp}{Name of the response variable. Only used in multivariate models.} \item{dpar}{Name of a distributional parameter. Only used in distributional models.} \item{nlpar}{Name of a non-linear parameter. Only used in non-linear models.} \item{lb}{Lower bound for parameter restriction. Currently only allowed for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction.} \item{ub}{Upper bound for parameter restriction. Currently only allowed for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction.} \item{check}{Logical; Indicates whether priors should be checked for validity (as far as possible). Defaults to \code{TRUE}. If \code{FALSE}, \code{prior} is passed to the Stan code as is, and all other arguments are ignored.} \item{...}{Arguments passed to \code{set_prior}.} } \value{ An object of class \code{brmsprior} to be used in the \code{prior} argument of \code{\link{brm}}. } \description{ Define priors for specific parameters or classes of parameters. } \details{ \code{set_prior} is used to define prior distributions for parameters in \pkg{brms} models. The functions \code{prior}, \code{prior_}, and \code{prior_string} are aliases of \code{set_prior} each allowing for a different kind of argument specification. \code{prior} allows specifying arguments as expression without quotation marks using non-standard evaluation. \code{prior_} allows specifying arguments as one-sided formulas or wrapped in \code{quote}. \code{prior_string} allows specifying arguments as strings just as \code{set_prior} itself. Below, we explain its usage and list some common prior distributions for parameters. A complete overview on possible prior distributions is given in the Stan Reference Manual available at \url{https://mc-stan.org/}. To combine multiple priors, use \code{c(...)} or the \code{+} operator (see 'Examples'). \pkg{brms} does not check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \code{C++} and returns an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. Below, we list the types of parameters in \pkg{brms} models, for which the user can specify prior distributions. 1. Population-level ('fixed') effects Every Population-level effect has its own regression parameter represents the name of the corresponding population-level effect. Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2} (i.e., \code{y ~ x1 + x2} in formula syntax). Then, \code{x1} and \code{x2} have regression parameters \code{b_x1} and \code{b_x2} respectively. The default prior for population-level effects (including monotonic and category specific effects) is an improper flat prior over the reals. Other common options are normal priors or student-t priors. If we want to have a normal prior with mean 0 and standard deviation 5 for \code{x1}, and a unit student-t prior with 10 degrees of freedom for \code{x2}, we can specify this via \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr \code{set_prior("student_t(10, 0, 1)", class = "b", coef = "x2")}. To put the same prior on all population-level effects at once, we may write as a shortcut \code{set_prior("", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Both ways of defining priors can be combined using for instance \code{set_prior("normal(0, 2)", class = "b")} and \cr \code{set_prior("normal(0, 10)", class = "b", coef = "x1")} at the same time. This will set a \code{normal(0, 10)} prior on the effect of \code{x1} and a \code{normal(0, 2)} prior on all other population-level effects. However, this will break vectorization and may slow down the sampling procedure a bit. In case of the default intercept parameterization (discussed in the 'Details' section of \code{\link{brmsformula}}), general priors on class \code{"b"} will \emph{not} affect the intercept. Instead, the intercept has its own parameter class named \code{"Intercept"} and priors can thus be specified via \code{set_prior("", class = "Intercept")}. Setting a prior on the intercept will not break vectorization of the other population-level effects. Note that technically, this prior is set on an intercept that results when internally centering all population-level predictors around zero to improve sampling efficiency. On this centered intercept, specifying a prior is actually much easier and intuitive than on the original intercept, since the former represents the expected response value when all predictors are at their means. To treat the intercept as an ordinary population-level effect and avoid the centering parameterization, use \code{0 + Intercept} on the right-hand side of the model formula. A special shrinkage prior to be applied on population-level effects is the (regularized) horseshoe prior and related priors. See \code{\link{horseshoe}} for details. Another shrinkage prior is the so-called lasso prior. See \code{\link{lasso}} for details. In non-linear models, population-level effects are defined separately for each non-linear parameter. Accordingly, it is necessary to specify the non-linear parameter in \code{set_prior} so that priors we can be assigned correctly. If, for instance, \code{alpha} is the parameter and \code{x} the predictor for which we want to define the prior, we can write \code{set_prior("", coef = "x", nlpar = "alpha")}. As a shortcut we can use \code{set_prior("", nlpar = "alpha")} to set the same prior on all population-level effects of \code{alpha} at once. If desired, population-level effects can be restricted to fall only within a certain interval using the \code{lb} and \code{ub} arguments of \code{set_prior}. This is often required when defining priors that are not defined everywhere on the real line, such as uniform or gamma priors. When defining a \code{uniform(2,4)} prior, you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}. When using a prior that is defined on the positive reals only (such as a gamma prior) set \code{lb = 0}. In most situations, it is not useful to restrict population-level parameters through bounded priors (non-linear models are an important exception), but if you really want to this is the way to go. 2. Standard deviations of group-level ('random') effects Each group-level effect of each grouping factor has a standard deviation named \code{sd__}. Consider, for instance, the formula \code{y ~ x1 + x2 + (1 + x1 | g)}. We see that the intercept as well as \code{x1} are group-level effects nested in the grouping factor \code{g}. The corresponding standard deviation parameters are named as \code{sd_g_Intercept} and \code{sd_g_x1} respectively. These parameters are restricted to be non-negative and, by default, have a half student-t prior with 3 degrees of freedom and a scale parameter that depends on the standard deviation of the response after applying the link function. Minimally, the scale parameter is 2.5. This prior is used (a) to be only weakly informative in order to influence results as few as possible, while (b) providing at least some regularization to considerably improve convergence and sampling efficiency. To define a prior distribution only for standard deviations of a specific grouping factor, use \cr \code{set_prior("", class = "sd", group = "")}. To define a prior distribution only for a specific standard deviation of a specific grouping factor, you may write \cr \code{set_prior("", class = "sd", group = "", coef = "")}. Recommendations on useful prior distributions for standard deviations are given in Gelman (2006), but note that he is no longer recommending uniform priors, anymore. \cr When defining priors on group-level parameters in non-linear models, please make sure to specify the corresponding non-linear parameter through the \code{nlpar} argument in the same way as for population-level effects. 3. Correlations of group-level ('random') effects If there is more than one group-level effect per grouping factor, the correlations between those effects have to be estimated. The prior \code{lkj_corr_cholesky(eta)} or in short \code{lkj(eta)} with \code{eta > 0} is essentially the only prior for (Cholesky factors) of correlation matrices. If \code{eta = 1} (the default) all correlations matrices are equally likely a priori. If \code{eta > 1}, extreme correlations become less likely, whereas \code{0 < eta < 1} results in higher probabilities for extreme correlations. Correlation matrix parameters in \code{brms} models are named as \code{cor_}, (e.g., \code{cor_g} if \code{g} is the grouping factor). To set the same prior on every correlation matrix, use for instance \code{set_prior("lkj(2)", class = "cor")}. Internally, the priors are transformed to be put on the Cholesky factors of the correlation matrices to improve efficiency and numerical stability. The corresponding parameter class of the Cholesky factors is \code{L}, but it is not recommended to specify priors for this parameter class directly. 4. Splines Splines are implemented in \pkg{brms} using the 'random effects' formulation as explained in \code{\link[mgcv:gamm]{gamm}}). Thus, each spline has its corresponding standard deviations modeling the variability within this term. In \pkg{brms}, this parameter class is called \code{sds} and priors can be specified via \code{set_prior("", class = "sds", coef = "")}. The default prior is the same as for standard deviations of group-level effects. 5. Gaussian processes Gaussian processes as currently implemented in \pkg{brms} have two parameters, the standard deviation parameter \code{sdgp}, and characteristic length-scale parameter \code{lscale} (see \code{\link{gp}} for more details). The default prior of \code{sdgp} is the same as for standard deviations of group-level effects. The default prior of \code{lscale} is an informative inverse-gamma prior specifically tuned to the covariates of the Gaussian process (for more details see \url{https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html}). This tuned prior may be overly informative in some cases, so please consider other priors as well to make sure inference is robust to the prior specification. If tuning fails, a half-normal prior is used instead. 6. Autocorrelation parameters The autocorrelation parameters currently implemented are named \code{ar} (autoregression), \code{ma} (moving average), \code{arr} (autoregression of the response), \code{car} (spatial conditional autoregression), as well as \code{lagsar} and \code{errorsar} (Spatial simultaneous autoregression). Priors can be defined by \code{set_prior("", class = "ar")} for \code{ar} and similar for other autocorrelation parameters. By default, \code{ar} and \code{ma} are bounded between \code{-1} and \code{1}, \code{car}, \code{lagsar}, and \code{errorsar} are bounded between \code{0}, and \code{1}, and \code{arr} is unbounded (you may change this by using the arguments \code{lb} and \code{ub}). The default prior is flat over the definition area. 7. Distance parameters of monotonic effects As explained in the details section of \code{\link{brm}}, monotonic effects make use of a special parameter vector to estimate the 'normalized distances' between consecutive predictor categories. This is realized in \pkg{Stan} using the \code{simplex} parameter type. This class is named \code{"simo"} (short for simplex monotonic) in \pkg{brms}. The only valid prior for simplex parameters is the dirichlet prior, which accepts a vector of length \code{K - 1} (K = number of predictor categories) as input defining the 'concentration' of the distribution. Explaining the dirichlet prior is beyond the scope of this documentation, but we want to describe how to define this prior syntactically correct. If a predictor \code{x} with \code{K} categories is modeled as monotonic, we can define a prior on its corresponding simplex via \cr \code{prior(dirichlet(), class = simo, coef = mox1)}. The \code{1} in the end of \code{coef} indicates that this is the first simplex in this term. If interactions between multiple monotonic variables are modeled, multiple simplexes per term are required. For \code{}, we can put in any \code{R} expression defining a vector of length \code{K - 1}. The default is a uniform prior (i.e. \code{ = rep(1, K-1)}) over all simplexes of the respective dimension. 8. Parameters for specific families Some families need additional parameters to be estimated. Families \code{gaussian}, \code{student}, \code{skew_normal}, \code{lognormal}, and \code{gen_extreme_value} need the parameter \code{sigma} to account for the residual standard deviation. By default, \code{sigma} has a half student-t prior that scales in the same way as the group-level standard deviations. Further, family \code{student} needs the parameter \code{nu} representing the degrees of freedom of students-t distribution. By default, \code{nu} has prior \code{gamma(2, 0.1)} and a fixed lower bound of \code{1}. Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and \code{negbinomial} need a \code{shape} parameter that has a \code{gamma(0.01, 0.01)} prior by default. For families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}, and only if \code{threshold = "equidistant"}, the parameter \code{delta} is used to model the distance between two adjacent thresholds. By default, \code{delta} has an improper flat prior over the reals. The \code{von_mises} family needs the parameter \code{kappa}, representing the concentration parameter. By default, \code{kappa} has prior \code{gamma(2, 0.01)}. \cr Every family specific parameter has its own prior class, so that \code{set_prior("", class = "")} is the right way to go. All of these priors are chosen to be weakly informative, having only minimal influence on the estimations, while improving convergence and sampling efficiency. Fixing parameters to constants is possible by using the \code{constant} function, for example, \code{constant(1)} to fix a parameter to 1. Broadcasting to vectors and matrices is done automatically. Often, it may not be immediately clear, which parameters are present in the model. To get a full list of parameters and parameter classes for which priors can be specified (depending on the model) use function \code{\link{get_prior}}. } \section{Functions}{ \itemize{ \item \code{prior}: Alias of \code{set_prior} allowing to specify arguments as expressions without quotation marks. \item \code{prior_}: Alias of \code{set_prior} allowing to specify arguments as as one-sided formulas or wrapped in \code{quote}. \item \code{prior_string}: Alias of \code{set_prior} allowing to specify arguments as strings. \item \code{empty_prior}: Create an empty \code{brmsprior} object. }} \examples{ ## use alias functions (prior1 <- prior(cauchy(0, 1), class = sd)) (prior2 <- prior_(~cauchy(0, 1), class = ~sd)) (prior3 <- prior_string("cauchy(0, 1)", class = "sd")) identical(prior1, prior2) identical(prior1, prior3) # check which parameters can have priors get_prior(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative()) # define some priors bprior <- c(prior_string("normal(0,10)", class = "b"), prior(normal(1,2), class = b, coef = treat), prior_(~cauchy(0,2), class = ~sd, group = ~subject, coef = ~Intercept)) # verify that the priors indeed found their way into Stan's model code make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative(), prior = bprior) # use the horseshoe prior to model sparsity in regression coefficients make_stancode(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson(), prior = set_prior("horseshoe(3)")) # fix certain priors to constants bprior <- prior(constant(1), class = "b") + prior(constant(2), class = "b", coef = "zBase") + prior(constant(0.5), class = "sd") make_stancode(count ~ zAge + zBase + (1 | patient), data = epilepsy, prior = bprior) # pass priors to Stan without checking prior <- prior_string("target += normal_lpdf(b[1] | 0, 1)", check = FALSE) make_stancode(count ~ Trt, data = epilepsy, prior = prior) } \references{ Gelman A. (2006). Prior distributions for variance parameters in hierarchical models. Bayesian analysis, 1(3), 515 -- 534. } \seealso{ \code{\link{get_prior}} } brms/man/residuals.brmsfit.Rd0000644000175000017500000000751414111751667016071 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictive_error.R \name{residuals.brmsfit} \alias{residuals.brmsfit} \title{Posterior Draws of Residuals/Predictive Errors} \usage{ \method{residuals}{brmsfit}( object, newdata = NULL, re_formula = NULL, method = "posterior_epred", type = c("ordinary", "pearson"), resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{method}{Method use to obtain predictions. Either \code{"posterior_epred"} (the default) or \code{"posterior_predict"}. Using \code{"posterior_predict"} is recommended but \code{"posterior_epred"} is the current default for reasons of backwards compatibility.} \item{type}{The type of the residuals, either \code{"ordinary"} or \code{"pearson"}. More information is provided under 'Details'.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}..} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predictive error/residual draws. If \code{summary = FALSE} the output resembles those of \code{\link{predictive_error.brmsfit}}. If \code{summary = TRUE} the output is an N x E matrix, where N is the number of observations and E denotes the summary statistics computed from the draws. } \description{ This method is an alias of \code{\link{predictive_error.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \details{ Residuals of type \code{'ordinary'} are of the form \eqn{R = Y - Yrep}, where \eqn{Y} is the observed and \eqn{Yrep} is the predicted response. Residuals of type \code{pearson} are of the form \eqn{R = (Y - Yrep) / SD(Yrep)}, where \eqn{SD(Yrep)} is an estimate of the standard deviation of \eqn{Yrep}. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, cores = 2) ## extract residuals/predictive errors res <- residuals(fit) head(res) } } brms/man/MultiStudentT.Rd0000644000175000017500000000234114111751667015207 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{MultiStudentT} \alias{MultiStudentT} \alias{dmulti_student_t} \alias{rmulti_student_t} \title{The Multivariate Student-t Distribution} \usage{ dmulti_student_t(x, df, mu, Sigma, log = FALSE, check = FALSE) rmulti_student_t(n, df, mu, Sigma, check = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{df}{Vector of degrees of freedom.} \item{mu}{Location vector with length equal to the number of dimensions.} \item{Sigma}{Covariance matrix.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{check}{Logical; Indicates whether several input checks should be performed. Defaults to \code{FALSE} to improve efficiency.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random generation for the multivariate Student-t distribution with location vector \code{mu}, covariance matrix \code{Sigma}, and degrees of freedom \code{df}. } \details{ See the Stan user's manual \url{https://mc-stan.org/documentation/} for details on the parameterization } brms/man/cor_car.Rd0000644000175000017500000000466314105230573014032 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_car} \alias{cor_car} \alias{cor_icar} \title{(Deprecated) Spatial conditional autoregressive (CAR) structures} \usage{ cor_car(W, formula = ~1, type = "escar") cor_icar(W, formula = ~1) } \arguments{ \item{W}{Adjacency matrix of locations. All non-zero entries are treated as if the two locations are adjacent. If \code{formula} contains a grouping factor, the row names of \code{W} have to match the levels of the grouping factor.} \item{formula}{An optional one-sided formula of the form \code{~ 1 | g}, where \code{g} is a grouping factor mapping observations to spatial locations. If not specified, each observation is treated as a separate location. It is recommended to always specify a grouping factor to allow for handling of new data in post-processing methods.} \item{type}{Type of the CAR structure. Currently implemented are \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is provided in the 'Details' section.} } \description{ These function are deprecated. Please see \code{\link{car}} for the new syntax. These functions are constructors for the \code{cor_car} class implementing spatial conditional autoregressive structures. } \details{ The \code{escar} and \code{esicar} types are implemented based on the case study of Max Joseph (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and \code{bym2} type is implemented based on the case study of Mitzi Morris (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). } \examples{ \dontrun{ # generate some spatial data east <- north <- 1:10 Grid <- expand.grid(east, north) K <- nrow(Grid) # set up distance and neighbourhood matrices distance <- as.matrix(dist(Grid)) W <- array(0, c(K, K)) W[distance == 1] <- 1 # generate the covariates and response data x1 <- rnorm(K) x2 <- rnorm(K) theta <- rnorm(K, sd = 0.05) phi <- rmulti_normal( 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) ) eta <- x1 + x2 + phi prob <- exp(eta) / (1 + exp(eta)) size <- rep(50, K) y <- rbinom(n = K, size = size, prob = prob) dat <- data.frame(y, size, x1, x2) # fit a CAR model fit <- brm(y | trials(size) ~ x1 + x2, data = dat, family = binomial(), autocor = cor_car(W)) summary(fit) } } brms/man/draws-index-brms.Rd0000644000175000017500000000163414111751667015614 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{draws-index-brms} \alias{draws-index-brms} \alias{variables} \alias{nvariables} \alias{niterations} \alias{nchains} \alias{ndraws} \alias{Index} \alias{variables,} \alias{iterations,} \alias{chains,} \alias{and} \alias{draws.} \alias{variables.brmsfit} \alias{nvariables.brmsfit} \alias{niterations.brmsfit} \alias{nchains.brmsfit} \alias{ndraws.brmsfit} \title{Index \code{brmsfit} objects} \usage{ \method{variables}{brmsfit}(x, ...) \method{nvariables}{brmsfit}(x, ...) \method{niterations}{brmsfit}(x) \method{nchains}{brmsfit}(x) \method{ndraws}{brmsfit}(x) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{...}{Arguments passed to individual methods (if applicable).} } \description{ Index \code{brmsfit} objects } brms/man/AsymLaplace.Rd0000644000175000017500000000276614111751667014630 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{AsymLaplace} \alias{AsymLaplace} \alias{dasym_laplace} \alias{pasym_laplace} \alias{qasym_laplace} \alias{rasym_laplace} \title{The Asymmetric Laplace Distribution} \usage{ dasym_laplace(x, mu = 0, sigma = 1, quantile = 0.5, log = FALSE) pasym_laplace( q, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE ) qasym_laplace( p, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE ) rasym_laplace(n, mu = 0, sigma = 1, quantile = 0.5) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{sigma}{Vector of scales.} \item{quantile}{Asymmetry parameter corresponding to quantiles in quantile regression (hence the name).} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the asymmetric Laplace distribution with location \code{mu}, scale \code{sigma} and asymmetry parameter \code{quantile}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/loo_model_weights.brmsfit.Rd0000644000175000017500000000251413701270367017571 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo_model_weights.brmsfit} \alias{loo_model_weights.brmsfit} \alias{loo_model_weights} \title{Model averaging via stacking or pseudo-BMA weighting.} \usage{ \method{loo_model_weights}{brmsfit}(x, ..., model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ A named vector of model weights. } \description{ Compute model weights for \code{brmsfit} objects via stacking or pseudo-BMA weighting. For more details, see \code{\link[loo:loo_model_weights]{loo::loo_model_weights}}. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler, family = "gaussian") # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "gaussian") loo_model_weights(fit1, fit2) } } brms/man/update.brmsfit.Rd0000644000175000017500000000324213701270367015347 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update.R \name{update.brmsfit} \alias{update.brmsfit} \title{Update \pkg{brms} models} \usage{ \method{update}{brmsfit}(object, formula., newdata = NULL, recompile = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{formula.}{Changes to the formula; for details see \code{\link{update.formula}} and \code{\link{brmsformula}}.} \item{newdata}{Optional \code{data.frame} to update the model with new data. Data-dependent default priors will not be updated automatically.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. If \code{NULL} (the default), \code{update} tries to figure out internally, if recompilation is necessary. Setting it to \code{FALSE} will cause all Stan code changing arguments to be ignored.} \item{...}{Other arguments passed to \code{\link{brm}}.} } \description{ This method allows to update an existing \code{brmsfit} object. } \examples{ \dontrun{ fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = gaussian("log")) summary(fit1) ## remove effects of 'disease' fit2 <- update(fit1, formula. = ~ . - disease) summary(fit2) ## remove the group specific term of 'patient' and ## change the data (just take a subset in this example) fit3 <- update(fit1, formula. = ~ . - (1|patient), newdata = kidney[1:38, ]) summary(fit3) ## use another family and add population-level priors fit4 <- update(fit1, family = weibull(), inits = "0", prior = set_prior("normal(0,5)")) summary(fit4) } } brms/man/conditional_smooths.brmsfit.Rd0000644000175000017500000001016614111751667020152 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_smooths.R \name{conditional_smooths.brmsfit} \alias{conditional_smooths.brmsfit} \alias{marginal_smooths} \alias{marginal_smooths.brmsfit} \alias{conditional_smooths} \title{Display Smooth Terms} \usage{ \method{conditional_smooths}{brmsfit}( x, smooths = NULL, int_conditions = NULL, prob = 0.95, spaghetti = FALSE, resolution = 100, too_far = 0, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, probs = NULL, ... ) conditional_smooths(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{smooths}{Optional character vector of smooth terms to display. If \code{NULL} (the default) all smooth terms are shown.} \item{int_conditions}{An optional named \code{list} whose elements are vectors of values of the variables specified in \code{effects}. At these values, predictions are evaluated. The names of \code{int_conditions} have to match the variable names exactly. Additionally, the elements of the vectors may be named themselves, in which case their names appear as labels for the conditions in the plots. Instead of vectors, functions returning vectors may be passed and are applied on the original values of the corresponding variable. If \code{NULL} (the default), predictions are evaluated at the \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at all categories for factor-like predictors.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{spaghetti}{Logical. Indicates if predictions should be visualized via spaghetti plots. Only applied for numeric predictors. If \code{TRUE}, it is recommended to set argument \code{ndraws} to a relatively small value (e.g., \code{100}) in order to reduce computation time.} \item{resolution}{Number of support points used to generate the plots. Higher resolution leads to smoother plots. Defaults to \code{100}. If \code{surface} is \code{TRUE}, this implies \code{10000} support points for interaction terms, so it might be necessary to reduce \code{resolution} when only few RAM is available.} \item{too_far}{Positive number. For surface plots only: Grid points that are too far away from the actual data points can be excluded from the plot. \code{too_far} determines what is too far. The grid is scaled into the unit square and then grid points more than \code{too_far} from the predictor variables are excluded. By default, all grid points are used. Ignored for non-surface plots.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{probs}{(Deprecated) The quantiles to be used in the computation of uncertainty intervals. Please use argument \code{prob} instead.} \item{...}{Currently ignored.} } \value{ For the \code{brmsfit} method, an object of class \code{brms_conditional_effects}. See \code{\link{conditional_effects}} for more details and documentation of the related plotting function. } \description{ Display smooth \code{s} and \code{t2} terms of models fitted with \pkg{brms}. } \details{ Two-dimensional smooth terms will be visualized using either contour or raster plots. } \examples{ \dontrun{ set.seed(0) dat <- mgcv::gamSim(1, n = 200, scale = 2) fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) # show all smooth terms plot(conditional_smooths(fit), rug = TRUE, ask = FALSE) # show only the smooth term s(x2) plot(conditional_smooths(fit, smooths = "s(x2)"), ask = FALSE) # fit and plot a two-dimensional smooth term fit2 <- brm(y ~ t2(x0, x2), data = dat) ms <- conditional_smooths(fit2) plot(ms, stype = "contour") plot(ms, stype = "raster") } } brms/man/expp1.Rd0000644000175000017500000000044413661463272013462 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{expp1} \alias{expp1} \title{Exponential function plus one.} \usage{ expp1(x) } \arguments{ \item{x}{A numeric or complex vector.} } \description{ Computes \code{exp(x) + 1}. } brms/man/predictive_error.brmsfit.Rd0000644000175000017500000000464514111751667017447 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predictive_error.R \name{predictive_error.brmsfit} \alias{predictive_error.brmsfit} \alias{predictive_error} \title{Posterior Draws of Predictive Errors} \usage{ \method{predictive_error}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An S x N \code{array} of predictive error draws, where S is the number of posterior draws and N is the number of observations. } \description{ Compute posterior draws of predictive errors, that is, observed minus predicted responses. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, cores = 2) ## extract predictive errors pe <- predictive_error(fit) str(pe) } } brms/man/as.data.frame.brmsfit.Rd0000644000175000017500000000342514111751667016477 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior.R \name{as.data.frame.brmsfit} \alias{as.data.frame.brmsfit} \alias{as.matrix.brmsfit} \alias{as.array.brmsfit} \title{Extract Posterior Draws} \usage{ \method{as.data.frame}{brmsfit}( x, row.names = NULL, optional = TRUE, pars = NA, variable = NULL, draw = NULL, subset = NULL, ... ) \method{as.matrix}{brmsfit}(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) \method{as.array}{brmsfit}(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) } \arguments{ \item{x}{A \code{brmsfit} object or another \R object for which the methods are defined.} \item{row.names, optional}{Unused and only added for consistency with the \code{\link[base:as.data.frame]{as.data.frame}} generic.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{draw}{The draw indices to be select. Subsetting draw indices will lead to an automatic merging of chains.} \item{subset}{Deprecated alias of \code{draw}.} \item{...}{Further arguments to be passed to the corresponding \code{\link[brms:draws-brms]{as_draws_*}} methods as well as to \code{\link[posterior:subset_draws]{subset_draws}}.} } \value{ A data.frame, matrix, or array containing the posterior draws. } \description{ Extract posterior draws in conventional formats as data.frames, matrices, or arrays. } \seealso{ \code{\link[brms:draws-brms]{as_draws}}, \code{\link[posterior:subset_draws]{subset_draws}} } brms/man/me.Rd0000644000175000017500000000322314111751667013023 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{me} \alias{me} \title{Predictors with Measurement Error in \pkg{brms} Models} \usage{ me(x, sdx, gr = NULL) } \arguments{ \item{x}{The variable measured with error.} \item{sdx}{Known measurement error of \code{x} treated as standard deviation.} \item{gr}{Optional grouping factor to specify which values of \code{x} correspond to the same value of the latent variable. If \code{NULL} (the default) each observation will have its own value of the latent variable.} } \description{ (Soft deprecated) Specify predictors with measurement error. The function does not evaluate its arguments -- it exists purely to help set up a model. } \details{ For detailed documentation see \code{help(brmsformula)}. \code{me} terms are soft deprecated in favor of the more general and consistent \code{\link{mi}} terms. By default, latent noise-free variables are assumed to be correlated. To change that, add \code{set_mecor(FALSE)} to your model formula object (see examples). } \examples{ \dontrun{ # sample some data N <- 100 dat <- data.frame( y = rnorm(N), x1 = rnorm(N), x2 = rnorm(N), sdx = abs(rnorm(N, 1)) ) # fit a simple error-in-variables model fit1 <- brm(y ~ me(x1, sdx) + me(x2, sdx), data = dat, save_pars = save_pars(latent = TRUE)) summary(fit1) # turn off modeling of correlations bform <- bf(y ~ me(x1, sdx) + me(x2, sdx)) + set_mecor(FALSE) fit2 <- brm(bform, data = dat, save_pars = save_pars(latent = TRUE)) summary(fit2) } } \seealso{ \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/pairs.brmsfit.Rd0000644000175000017500000000316114111751667015206 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{pairs.brmsfit} \alias{pairs.brmsfit} \title{Create a matrix of output plots from a \code{brmsfit} object} \usage{ \method{pairs}{brmsfit}(x, pars = NA, variable = NULL, regex = FALSE, fixed = FALSE, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{...}{Further arguments to be passed to \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}.} } \description{ A \code{\link[graphics:pairs]{pairs}} method that is customized for MCMC output. } \details{ For a detailed description see \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|visit), data = epilepsy, family = "poisson") pairs(fit, variable = variables(fit)[1:3]) pairs(fit, variable = "^sd_", regex = TRUE) } } brms/man/expose_functions.brmsfit.Rd0000644000175000017500000000171214050266727017463 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{expose_functions.brmsfit} \alias{expose_functions.brmsfit} \alias{expose_functions} \title{Expose user-defined \pkg{Stan} functions} \usage{ \method{expose_functions}{brmsfit}(x, vectorize = FALSE, env = globalenv(), ...) expose_functions(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{vectorize}{Logical; Indicates if the exposed functions should be vectorized via \code{\link{Vectorize}}. Defaults to \code{FALSE}.} \item{env}{Environment where the functions should be made available. Defaults to the global environment.} \item{...}{Further arguments passed to \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}.} } \description{ Export user-defined \pkg{Stan} function and optionally vectorize them. For more details see \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. } brms/man/Frechet.Rd0000644000175000017500000000243514111751667014006 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Frechet} \alias{Frechet} \alias{dfrechet} \alias{pfrechet} \alias{qfrechet} \alias{rfrechet} \title{The Frechet Distribution} \usage{ dfrechet(x, loc = 0, scale = 1, shape = 1, log = FALSE) pfrechet(q, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) qfrechet(p, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rfrechet(n, loc = 0, scale = 1, shape = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{loc}{Vector of locations.} \item{scale}{Vector of scales.} \item{shape}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the Frechet distribution with location \code{loc}, scale \code{scale}, and shape \code{shape}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/add_criterion.Rd0000644000175000017500000000461313701270367015231 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{add_criterion} \alias{add_criterion} \alias{add_criterion.brmsfit} \title{Add model fit criteria to model objects} \usage{ add_criterion(x, ...) \method{add_criterion}{brmsfit}( x, criterion, model_name = NULL, overwrite = FALSE, file = NULL, force_save = FALSE, ... ) } \arguments{ \item{x}{An \R object typically of class \code{brmsfit}.} \item{...}{Further arguments passed to the underlying functions computing the model fit criteria.} \item{criterion}{Names of model fit criteria to compute. Currently supported are \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"loo_subsample"}, \code{"bayes_R2"} (Bayesian R-squared), \code{"loo_R2"} (LOO-adjusted R-squared), and \code{"marglik"} (log marginal likelihood).} \item{model_name}{Optional name of the model. If \code{NULL} (the default) the name is taken from the call to \code{x}.} \item{overwrite}{Logical; Indicates if already stored fit indices should be overwritten. Defaults to \code{FALSE}.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object including the newly added criterion values is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If \code{x} was already stored in a file before, the file name will be reused automatically (with a message) unless overwritten by \code{file}. In any case, \code{file} only applies if new criteria were actually added via \code{add_criterion} or if \code{force_save} was set to \code{TRUE}.} \item{force_save}{Logical; only relevant if \code{file} is specified and ignored otherwise. If \code{TRUE}, the fitted model object will be saved regardless of whether new criteria were added via \code{add_criterion}.} } \value{ An object of the same class as \code{x}, but with model fit criteria added for later usage. } \description{ Add model fit criteria to model objects } \details{ Functions \code{add_loo} and \code{add_waic} are aliases of \code{add_criterion} with fixed values for the \code{criterion} argument. } \examples{ \dontrun{ fit <- brm(count ~ Trt, data = epilepsy) # add both LOO and WAIC at once fit <- add_criterion(fit, c("loo", "waic")) print(fit$criteria$loo) print(fit$criteria$waic) } } brms/man/model_weights.brmsfit.Rd0000644000175000017500000000403514105230573016712 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{model_weights.brmsfit} \alias{model_weights.brmsfit} \alias{model_weights} \title{Model Weighting Methods} \usage{ \method{model_weights}{brmsfit}(x, ..., weights = "stacking", model_names = NULL) model_weights(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ A numeric vector of weights for the models. } \description{ Compute model weights in various ways, for instance, via stacking of posterior predictive distributions, Akaike weights, or marginal likelihoods. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # obtain Akaike weights based on the WAIC model_weights(fit1, fit2, weights = "waic") } } brms/man/gp.Rd0000644000175000017500000001244514111751667013036 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-gp.R \name{gp} \alias{gp} \title{Set up Gaussian process terms in \pkg{brms}} \usage{ gp( ..., by = NA, k = NA, cov = "exp_quad", iso = TRUE, gr = TRUE, cmc = TRUE, scale = TRUE, c = NULL ) } \arguments{ \item{...}{One or more predictors for the GP.} \item{by}{A numeric or factor variable of the same length as each predictor. In the numeric vector case, the elements multiply the values returned by the GP. In the factor variable case, a separate GP is fitted for each factor level.} \item{k}{Optional number of basis functions for computing approximate GPs. If \code{NA} (the default), exact GPs are computed.} \item{cov}{Name of the covariance kernel. By default, the exponentiated-quadratic kernel \code{"exp_quad"} is used.} \item{iso}{A flag to indicate whether an isotropic (\code{TRUE}; the default) of a non-isotropic GP should be used. In the former case, the same amount of smoothing is applied to all predictors. In the latter case, predictors may have different smoothing. Ignored if only a single predictors is supplied.} \item{gr}{Logical; Indicates if auto-grouping should be used (defaults to \code{TRUE}). If enabled, observations sharing the same predictor values will be represented by the same latent variable in the GP. This will improve sampling efficiency drastically if the number of unique predictor combinations is small relative to the number of observations.} \item{cmc}{Logical; Only relevant if \code{by} is a factor. If \code{TRUE} (the default), cell-mean coding is used for the \code{by}-factor, that is one GP per level is estimated. If \code{FALSE}, contrast GPs are estimated according to the contrasts set for the \code{by}-factor.} \item{scale}{Logical; If \code{TRUE} (the default), predictors are scaled so that the maximum Euclidean distance between two points is 1. This often improves sampling speed and convergence. Scaling also affects the estimated length-scale parameters in that they resemble those of scaled predictors (not of the original predictors) if \code{scale} is \code{TRUE}.} \item{c}{Numeric value only used in approximate GPs. Defines the multiplicative constant of the predictors' range over which predictions should be computed. A good default could be \code{c = 5/4} but we are still working on providing better recommendations.} } \value{ An object of class \code{'gp_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a Gaussian process (GP) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with GP terms. } \details{ A GP is a stochastic process, which describes the relation between one or more predictors \eqn{x = (x_1, ..., x_d)} and a response \eqn{f(x)}, where \eqn{d} is the number of predictors. A GP is the generalization of the multivariate normal distribution to an infinite number of dimensions. Thus, it can be interpreted as a prior over functions. Any finite sample realized from this stochastic process is jointly multivariate normal, with a covariance matrix defined by the covariance kernel \eqn{k_p(x)}, where \eqn{p} is the vector of parameters of the GP: \deqn{f(x) ~ MVN(0, k_p(x))} The smoothness and general behavior of the function \eqn{f} depends only on the choice of covariance kernel. For a more detailed introduction to Gaussian processes, see \url{https://en.wikipedia.org/wiki/Gaussian_process}. Below, we describe the currently supported covariance kernels: \itemize{ \item{"exp_quad": }{The exponentiated-quadratic kernel is defined as \eqn{k(x_i, x_j) = sdgp^2 exp(- || x_i - x_j ||^2 / (2 lscale^2))}, where \eqn{|| . ||} is the Euclidean norm, \eqn{sdgp} is a standard deviation parameter, and \eqn{lscale} is characteristic length-scale parameter. The latter practically measures how close two points \eqn{x_i} and \eqn{x_j} have to be to influence each other substantially.} } In the current implementation, \code{"exp_quad"} is the only supported covariance kernel. More options will follow in the future. } \examples{ \dontrun{ # simulate data using the mgcv package dat <- mgcv::gamSim(1, n = 30, scale = 2) # fit a simple GP model fit1 <- brm(y ~ gp(x2), dat, chains = 2) summary(fit1) me1 <- conditional_effects(fit1, ndraws = 200, spaghetti = TRUE) plot(me1, ask = FALSE, points = TRUE) # fit a more complicated GP model fit2 <- brm(y ~ gp(x0) + x1 + gp(x2) + x3, dat, chains = 2) summary(fit2) me2 <- conditional_effects(fit2, ndraws = 200, spaghetti = TRUE) plot(me2, ask = FALSE, points = TRUE) # fit a multivariate GP model fit3 <- brm(y ~ gp(x1, x2), dat, chains = 2) summary(fit3) me3 <- conditional_effects(fit3, ndraws = 200, spaghetti = TRUE) plot(me3, ask = FALSE, points = TRUE) # compare model fit LOO(fit1, fit2, fit3) # simulate data with a factor covariate dat2 <- mgcv::gamSim(4, n = 90, scale = 2) # fit separate gaussian processes for different levels of 'fac' fit4 <- brm(y ~ gp(x2, by = fac), dat2, chains = 2) summary(fit4) plot(conditional_effects(fit4), points = TRUE) } } \seealso{ \code{\link{brmsformula}} } brms/man/validate_prior.Rd0000644000175000017500000000662714111751667015441 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{validate_prior} \alias{validate_prior} \title{Validate Prior for \pkg{brms} Models} \usage{ validate_prior( prior, formula, data, family = gaussian(), sample_prior = "no", data2 = NULL, knots = NULL, ... ) } \arguments{ \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{...}{Other arguments for internal usage only.} } \value{ An object of class \code{brmsprior}. } \description{ Validate priors supplied by the user. Return a complete set of priors for the given model, including default priors. } \examples{ prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) validate_prior(prior1, count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) } \seealso{ \code{\link{get_prior}}, \code{\link{set_prior}}. } brms/man/brm.Rd0000644000175000017500000005565214111751667013217 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm.R \name{brm} \alias{brm} \title{Fit Bayesian Generalized (Non-)Linear Multivariate Multilevel Models} \usage{ brm( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, fit = NA, save_pars = NULL, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL, inits = "random", chains = 4, iter = 2000, warmup = floor(iter/2), thin = 1, cores = getOption("mc.cores", 1), threads = NULL, opencl = NULL, normalize = getOption("brms.normalize", TRUE), control = NULL, algorithm = getOption("brms.algorithm", "sampling"), backend = getOption("brms.backend", "rstan"), future = getOption("future", FALSE), silent = 1, seed = NA, save_model = NULL, stan_model_args = list(), file = NULL, file_refit = getOption("brms.file_refit", "never"), empty = FALSE, rename = TRUE, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{fit}{An instance of S3 class \code{brmsfit} derived from a previous fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit}, the compiled model associated with the fitted result is re-used and all arguments modifying the model code or data are ignored. It is not recommended to use this argument directly, but to call the \code{\link[brms:update.brmsfit]{update}} method, instead.} \item{save_pars}{An object generated by \code{\link{save_pars}} controlling which parameters should be saved in the model. The argument has no impact on the model fitting itself.} \item{save_ranef}{(Deprecated) A flag to indicate if group-level effects for each level of the grouping factor(s) should be saved (default is \code{TRUE}). Set to \code{FALSE} to save memory. The argument has no impact on the model fitting itself.} \item{save_mevars}{(Deprecated) A flag to indicate if draws of latent noise-free variables obtained by using \code{me} and \code{mi} terms should be saved (default is \code{FALSE}). Saving these draws allows to better use methods such as \code{predict} with the latent variables but leads to very large \R objects even for models of moderate size and complexity.} \item{save_all_pars}{(Deprecated) A flag to indicate if draws from all variables defined in Stan's \code{parameters} block should be saved (default is \code{FALSE}). Saving these draws is required in order to apply the methods \code{bridge_sampler}, \code{bayes_factor}, and \code{post_prob}.} \item{inits}{Either \code{"random"} or \code{"0"}. If inits is \code{"random"} (the default), Stan will randomly generate initial values for parameters. If it is \code{"0"}, all parameters are initialized to zero. This option is sometimes useful for certain families, as it happens that default (\code{"random"}) inits cause draws to be essentially constant. Generally, setting \code{inits = "0"} is worth a try, if chains do not behave well. Alternatively, \code{inits} can be a list of lists containing the initial values, or a function (or function name) generating initial values. The latter options are mainly implemented for internal testing but are available to users if necessary. If specifying initial values using a list or a function then currently the parameter names must correspond to the names used in the generated Stan code (not the names used in \R). For more details on specifying initial values you can consult the documentation of the selected \code{backend}.} \item{chains}{Number of Markov chains (defaults to 4).} \item{iter}{Number of total iterations per chain (including warmup; defaults to 2000).} \item{warmup}{A positive integer specifying number of warmup (aka burnin) iterations. This also specifies the number of iterations used for stepsize adaptation, so warmup draws should not be used for inference. The number of warmup should not be larger than \code{iter} and the default is \code{iter/2}.} \item{thin}{Thinning rate. Must be a positive integer. Set \code{thin > 1} to save memory and computation time if \code{iter} is large.} \item{cores}{Number of cores to use when executing the chains in parallel, which defaults to 1 but we recommend setting the \code{mc.cores} option to be as many processors as the hardware and RAM allow (up to the number of chains). For non-Windows OS in non-interactive \R sessions, forking is used instead of PSOCK clusters.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means.} \item{opencl}{The platform and device IDs of the OpenCL device to use for fitting using GPU support. If you don't know the IDs of your OpenCL device, \code{c(0,0)} is most likely what you need. For more details, see \code{\link{opencl}}.} \item{normalize}{Logical. Indicates whether normalization constants should be included in the Stan code (defaults to \code{TRUE}). Setting it to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, sampling efficiency may be increased but some post processing functions such as \code{\link{bridge_sampler}} will not be available. Can be controlled globally for the current \R session via the `brms.normalize` option.} \item{control}{A named \code{list} of parameters to control the sampler's behavior. It defaults to \code{NULL} so all the default values are used. The most important control parameters are discussed in the 'Details' section below. For a comprehensive overview see \code{\link[rstan:stan]{stan}}.} \item{algorithm}{Character string naming the estimation approach to use. Options are \code{"sampling"} for MCMC (the default), \code{"meanfield"} for variational inference with independent normal distributions, \code{"fullrank"} for variational inference with a multivariate normal distribution, or \code{"fixed_param"} for sampling from fixed parameter values. Can be set globally for the current \R session via the \code{"brms.algorithm"} option (see \code{\link{options}}).} \item{backend}{Character string naming the package to use as the backend for fitting the Stan model. Options are \code{"rstan"} (the default) or \code{"cmdstanr"}. Can be set globally for the current \R session via the \code{"brms.backend"} option (see \code{\link{options}}). Details on the \pkg{rstan} and \pkg{cmdstanr} packages are available at \url{https://mc-stan.org/rstan/} and \url{https://mc-stan.org/cmdstanr/}, respectively. Additionally a \code{"mock"} backend is available to make testing \pkg{brms} and packages that depend on it easier. The \code{"mock"} backend does not actually do any fitting, it only checks the generated Stan code for correctness and then returns whatever is passed in an additional \code{mock_fit} argument as the result of the fit.} \item{future}{Logical; If \code{TRUE}, the \pkg{\link[future:future]{future}} package is used for parallel execution of the chains and argument \code{cores} will be ignored. Can be set globally for the current \R session via the \code{"future"} option. The execution type is controlled via \code{\link[future:plan]{plan}} (see the examples section below).} \item{silent}{Verbosity level between \code{0} and \code{2}. If \code{1} (the default), most of the informational messages of compiler and sampler are suppressed. If \code{2}, even more messages are suppressed. The actual sampling progress is still printed. Set \code{refresh = 0} to turn this off as well. If using \code{backend = "rstan"} you can also set \code{open_progress = FALSE} to prevent opening additional progress bars.} \item{seed}{The seed for random number generation to make results reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed randomly.} \item{save_model}{Either \code{NULL} or a character string. In the latter case, the model's Stan code is saved via \code{\link{cat}} in a text file named after the string supplied in \code{save_model}.} \item{stan_model_args}{A \code{list} of further arguments passed to \code{\link[rstan:stan_model]{stan_model}}.} \item{file}{Either \code{NULL} or a character string. In the latter case, the fitted model object is saved via \code{\link{saveRDS}} in a file named after the string supplied in \code{file}. The \code{.rds} extension is added automatically. If the file already exists, \code{brm} will load and return the saved model object instead of refitting the model. Unless you specify the \code{file_refit} argument as well, the existing files won't be overwritten, you have to manually remove the file in order to refit and save the model under an existing file name. The file name is stored in the \code{brmsfit} object for later usage.} \item{file_refit}{Modifies when the fit stored via the \code{file} parameter is re-used. Can be set globally for the current \R session via the \code{"brms.file_refit"} option (see \code{\link{options}}). For \code{"never"} (default) the fit is always loaded if it exists and fitting is skipped. For \code{"always"} the model is always refitted. If set to \code{"on_change"}, brms will refit the model if model, data or algorithm as passed to Stan differ from what is stored in the file. This also covers changes in priors, \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you believe there was a false positive, you can use \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. Refit will not be triggered for changes in additional parameters of the fit (e.g., initial values, number of iterations, control arguments, ...). A known limitation is that a refit will be triggered if within-chain parallelization is switched on/off.} \item{empty}{Logical. If \code{TRUE}, the Stan model is not created and compiled and the corresponding \code{'fit'} slot of the \code{brmsfit} object will be empty. This is useful if you have estimated a brms-created Stan model outside of \pkg{brms} and want to feed it back into the package.} \item{rename}{For internal use only.} \item{...}{Further arguments passed to Stan. For \code{backend = "rstan"} the arguments are passed to \code{\link[rstan]{sampling}} or \code{\link[rstan]{vb}}. For \code{backend = "cmdstanr"} the arguments are passed to the \code{cmdstanr::sample} or \code{cmdstanr::variational} method.} } \value{ An object of class \code{brmsfit}, which contains the posterior draws along with many other useful information about the model. Use \code{methods(class = "brmsfit")} for an overview on available methods. } \description{ Fit Bayesian generalized (non-)linear multivariate multilevel models using Stan for full Bayesian inference. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, count data, survival, response times, ordinal, zero-inflated, hurdle, and even self-defined mixture models all in a multilevel context. Further modeling options include non-linear and smooth terms, auto-correlation structures, censored data, meta-analytic standard errors, and quite a few more. In addition, all parameters of the response distributions can be predicted in order to perform distributional regression. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared with posterior predictive checks and leave-one-out cross-validation. } \details{ Fit a generalized (non-)linear multivariate multilevel model via full Bayesian inference using Stan. A general overview is provided in the vignettes \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")}. For a full list of available vignettes see \code{vignette(package = "brms")}. \bold{Formula syntax of brms models} Details of the formula syntax applied in \pkg{brms} can be found in \code{\link{brmsformula}}. \bold{Families and link functions} Details of families supported by \pkg{brms} can be found in \code{\link{brmsfamily}}. \bold{Prior distributions} Priors should be specified using the \code{\link[brms:set_prior]{set_prior}} function. Its documentation contains detailed information on how to correctly specify priors. To find out on which parameters or parameter classes priors can be defined, use \code{\link[brms:get_prior]{get_prior}}. Default priors are chosen to be non or very weakly informative so that their influence on the results will be negligible and you usually don't have to worry about them. However, after getting more familiar with Bayesian statistics, I recommend you to start thinking about reasonable informative priors for your model parameters: Nearly always, there is at least some prior information available that can be used to improve your inference. \bold{Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup draws, and chains, users can control the behavior of the NUTS sampler, by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior draws. Whenever you see the warning "There were x divergent transitions after warmup." you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior draws. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior draws. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. For more details on the \code{control} argument see \code{\link[rstan:stan]{stan}}. } \examples{ \dontrun{ # Poisson regression for the number of seizures in epileptic patients # using normal priors for population-level effects # and half-cauchy priors for standard deviations of group-level effects prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), prior = prior1) # generate a summary of the results summary(fit1) # plot the MCMC chains as well as the posterior distributions plot(fit1, ask = FALSE) # predict responses based on the fitted model head(predict(fit1)) # plot conditional effects for each predictor plot(conditional_effects(fit1), ask = FALSE) # investigate model fit loo(fit1) pp_check(fit1) # Ordinal regression modeling patient's rating of inhaler instructions # category specific effects are estimated for variable 'treat' fit2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2) summary(fit2) plot(fit2, ask = FALSE) WAIC(fit2) # Survival regression modeling the time between the first # and second recurrence of an infection in kidney patients. fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit3) plot(fit3, ask = FALSE) plot(conditional_effects(fit3), ask = FALSE) # Probit regression using the binomial family ntrials <- sample(1:10, 100, TRUE) success <- rbinom(100, size = ntrials, prob = 0.4) x <- rnorm(100) data4 <- data.frame(ntrials, success, x) fit4 <- brm(success | trials(ntrials) ~ x, data = data4, family = binomial("probit")) summary(fit4) # Non-linear Gaussian model fit5 <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) summary(fit5) conditional_effects(fit5) # Normal model with heterogeneous variances data_het <- data.frame( y = c(rnorm(50), rnorm(50, 1, 2)), x = factor(rep(c("a", "b"), each = 50)) ) fit6 <- brm(bf(y ~ x, sigma ~ 0 + x), data = data_het) summary(fit6) plot(fit6) conditional_effects(fit6) # extract estimated residual SDs of both groups sigmas <- exp(as.data.frame(fit6, variable = "^b_sigma_", regex = TRUE)) ggplot(stack(sigmas), aes(values)) + geom_density(aes(fill = ind)) # Quantile regression predicting the 25\%-quantile fit7 <- brm(bf(y ~ x, quantile = 0.25), data = data_het, family = asym_laplace()) summary(fit7) conditional_effects(fit7) # use the future package for more flexible parallelization library(future) plan(multiprocess) fit7 <- update(fit7, future = TRUE) # fit a model manually via rstan scode <- make_stancode(count ~ Trt, data = epilepsy) sdata <- make_standata(count ~ Trt, data = epilepsy) stanfit <- rstan::stan(model_code = scode, data = sdata) # feed the Stan model back into brms fit8 <- brm(count ~ Trt, data = epilepsy, empty = TRUE) fit8$fit <- stanfit fit8 <- rename_pars(fit8) summary(fit8) } } \references{ Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. \code{doi:10.18637/jss.v080.i01} Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. \emph{The R Journal}. 10(1), 395–411. \code{doi:10.32614/RJ-2018-017} } \seealso{ \code{\link{brms}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}}, \code{\link{brmsfit}} } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/summary.brmsfit.Rd0000644000175000017500000000314314111751667015565 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{summary.brmsfit} \alias{summary.brmsfit} \title{Create a summary of a fitted model represented by a \code{brmsfit} object} \usage{ \method{summary}{brmsfit}( object, priors = FALSE, prob = 0.95, robust = FALSE, mc_se = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{priors}{Logical; Indicating if priors should be included in the summary. Default is \code{FALSE}.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{mc_se}{Logical; Indicating if the uncertainty in \code{Estimate} caused by the MCMC sampling should be shown in the summary. Defaults to \code{FALSE}.} \item{...}{Other potential arguments} } \description{ Create a summary of a fitted model represented by a \code{brmsfit} object } \details{ The convergence diagnostics \code{Rhat}, \code{Bulk_ESS}, and \code{Tail_ESS} are described in detail in Vehtari et al. (2020). } \references{ Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, and Paul-Christian Bürkner (2020). Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC. *Bayesian Analysis*. 1–28. dpi:10.1214/20-BA1221 } brms/man/prior_draws.brmsfit.Rd0000644000175000017500000000364314111751667016430 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prior_draws.R \name{prior_draws.brmsfit} \alias{prior_draws.brmsfit} \alias{prior_samples} \alias{prior_draws} \title{Extract Prior Draws} \usage{ \method{prior_draws}{brmsfit}(x, variable = NULL, pars = NULL, ...) prior_draws(x, ...) prior_samples(x, ...) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{...}{Arguments passed to individual methods (if applicable).} } \value{ A \code{data.frame} containing the prior draws. } \description{ Extract prior draws of specified parameters } \details{ To make use of this function, the model must contain draws of prior distributions. This can be ensured by setting \code{sample_prior = TRUE} in function \code{brm}. Priors of certain parameters cannot be saved for technical reasons. For instance, this is the case for the population-level intercept, which is only computed after fitting the model by default. If you want to treat the intercept as part of all the other regression coefficients, so that sampling from its prior becomes possible, use \code{... ~ 0 + Intercept + ...} in the formulas. } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative", prior = set_prior("normal(0,2)", class = "b"), sample_prior = TRUE) # extract all prior draws draws1 <- prior_draws(fit) head(draws1) # extract prior draws for the coefficient of 'treat' draws2 <- prior_draws(fit, "b_treat") head(draws2) } } brms/man/theme_black.Rd0000644000175000017500000000240414111751667014660 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot-themes.R \name{theme_black} \alias{theme_black} \title{(Deprecated) Black Theme for \pkg{ggplot2} Graphics} \usage{ theme_black(base_size = 12, base_family = "") } \arguments{ \item{base_size}{base font size} \item{base_family}{base font family} } \value{ A \code{theme} object used in \pkg{ggplot2} graphics. } \description{ A black theme for ggplot graphics inspired by a blog post of Jon Lefcheck (\url{https://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/}). } \details{ When using \code{theme_black} in plots powered by the \pkg{bayesplot} package such as \code{pp_check} or \code{stanplot}, I recommend using the \code{"viridisC"} color scheme (see examples). } \examples{ \dontrun{ # change default ggplot theme ggplot2::theme_set(theme_black()) # change default bayesplot color scheme bayesplot::color_scheme_set("viridisC") # fit a simple model fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 2) summary(fit) # create various plots plot(marginal_effects(fit), ask = FALSE) pp_check(fit) mcmc_plot(fit, type = "hex", variable = c("b_Intercept", "b_Trt1")) } } brms/man/make_stancode.Rd0000644000175000017500000001513014111751667015217 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_stancode.R \name{make_stancode} \alias{make_stancode} \title{Stan Code for \pkg{brms} Models} \usage{ make_stancode( formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sparse = NULL, sample_prior = "no", stanvars = NULL, stan_funs = NULL, knots = NULL, threads = NULL, normalize = getOption("brms.normalize", TRUE), save_model = NULL, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{prior}{One or more \code{brmsprior} objects created by \code{\link{set_prior}} or related functions and combined using the \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} for more help.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{cov_ranef}{(Deprecated) A list of matrices that are proportional to the (within) covariance structure of the group-level effects. The names of the matrices should correspond to columns in \code{data} that are used as grouping factors. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others to model pedigrees and phylogenetic effects. It is now recommended to specify those matrices in the formula interface using the \code{\link{gr}} and related functions. See \code{vignette("brms_phylogenetics")} for more details.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{sample_prior}{Indicate if draws from priors should be drawn additionally to the posterior draws. Options are \code{"no"} (the default), \code{"yes"}, and \code{"only"}. Among others, these draws can be used to calculate Bayes factors for point hypotheses via \code{\link{hypothesis}}. Please note that improper priors are not sampled, including the default improper priors used by \code{brm}. See \code{\link{set_prior}} on how to set (proper) priors. Please also note that prior draws for the overall intercept are not obtained by default for technical reasons. See \code{\link{brmsformula}} how to obtain prior draws for the intercept. If \code{sample_prior} is set to \code{"only"}, draws are drawn solely from the priors ignoring the likelihood, which allows among others to generate draws from the prior predictive distribution. In this case, all parameters must have proper priors.} \item{stanvars}{An optional \code{stanvars} object generated by function \code{\link{stanvar}} to define additional variables for use in \pkg{Stan}'s program blocks.} \item{stan_funs}{(Deprecated) An optional character string containing self-defined \pkg{Stan} functions, which will be included in the functions block of the generated \pkg{Stan} code. It is now recommended to use the \code{stanvars} argument for this purpose instead.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{threads}{Number of threads to use in within-chain parallelization. For more control over the threading process, \code{threads} may also be a \code{brmsthreads} object created by \code{\link{threading}}. Within-chain parallelization is experimental! We recommend its use only if you are experienced with Stan's \code{reduce_sum} function and have a slow running model that cannot be sped up by any other means.} \item{normalize}{Logical. Indicates whether normalization constants should be included in the Stan code (defaults to \code{TRUE}). Setting it to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, sampling efficiency may be increased but some post processing functions such as \code{\link{bridge_sampler}} will not be available. Can be controlled globally for the current \R session via the `brms.normalize` option.} \item{save_model}{Either \code{NULL} or a character string. In the latter case, the model's Stan code is saved via \code{\link{cat}} in a text file named after the string supplied in \code{save_model}.} \item{...}{Other arguments for internal usage only.} } \value{ A character string containing the fully commented \pkg{Stan} code to fit a \pkg{brms} model. } \description{ Generate Stan code for \pkg{brms} models } \examples{ make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") make_stancode(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") } brms/man/reloo.brmsfit.Rd0000644000175000017500000000557214010776135015213 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reloo.R \name{reloo.brmsfit} \alias{reloo.brmsfit} \alias{reloo.loo} \alias{reloo} \title{Compute exact cross-validation for problematic observations} \usage{ \method{reloo}{brmsfit}( x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, ... ) \method{reloo}{loo}(x, fit, ...) reloo(x, ...) } \arguments{ \item{x}{An \R object of class \code{brmsfit} or \code{loo} depending on the method.} \item{loo}{An \R object of class \code{loo}.} \item{k_threshold}{The threshold at which Pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check}{Logical; If \code{TRUE} (the default), some checks check are performed if the \code{loo} object was generated from the \code{brmsfit} object passed to argument \code{fit}.} \item{...}{Further arguments passed to \code{\link{update.brmsfit}} and \code{\link{log_lik.brmsfit}}.} \item{fit}{An \R object of class \code{brmsfit}.} } \value{ An object of the class \code{loo}. } \description{ Compute exact cross-validation for problematic observations for which approximate leave-one-out cross-validation may return incorrect results. Models for problematic observations can be run in parallel using the \pkg{future} package. } \details{ Warnings about Pareto \eqn{k} estimates indicate observations for which the approximation to LOO is problematic (this is described in detail in Vehtari, Gelman, and Gabry (2017) and the \pkg{\link[loo:loo-package]{loo}} package documentation). If there are \eqn{J} observations with \eqn{k} estimates above \code{k_threshold}, then \code{reloo} will refit the original model \eqn{J} times, each time leaving out one of the \eqn{J} problematic observations. The pointwise contributions of these observations to the total ELPD are then computed directly and substituted for the previous estimates from these \eqn{J} observations that are stored in the original \code{loo} object. } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) (reloo1 <- reloo(fit1, loo = loo1, chains = 1)) } } \seealso{ \code{\link{loo}}, \code{\link{kfold}} } brms/man/cor_brms.Rd0000644000175000017500000000215713701270367014232 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_brms} \alias{cor_brms} \alias{cor_brms-class} \title{(Deprecated) Correlation structure classes for the \pkg{brms} package} \description{ Classes of correlation structures available in the \pkg{brms} package. \code{cor_brms} is not a correlation structure itself, but the class common to all correlation structures implemented in \pkg{brms}. } \section{Available correlation structures}{ \describe{ \item{cor_arma}{autoregressive-moving average (ARMA) structure, with arbitrary orders for the autoregressive and moving average components} \item{cor_ar}{autoregressive (AR) structure of arbitrary order} \item{cor_ma}{moving average (MA) structure of arbitrary order} \item{cor_car}{Spatial conditional autoregressive (CAR) structure} \item{cor_sar}{Spatial simultaneous autoregressive (SAR) structure} \item{cor_fixed}{fixed user-defined covariance structure} } } \seealso{ \code{\link{cor_arma}, \link{cor_ar}, \link{cor_ma}, \link{cor_car}, \link{cor_sar}, \link{cor_fixed}} } brms/man/kfold_predict.Rd0000644000175000017500000000341313701270367015231 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kfold.R \name{kfold_predict} \alias{kfold_predict} \title{Predictions from K-Fold Cross-Validation} \usage{ kfold_predict(x, method = c("predict", "fitted"), resp = NULL, ...) } \arguments{ \item{x}{Object of class \code{'kfold'} computed by \code{\link{kfold}}. For \code{kfold_predict} to work, the fitted model objects need to have been stored via argument \code{save_fits} of \code{\link{kfold}}.} \item{method}{The method used to make predictions. Either \code{"predict"} or \code{"fitted"}. See \code{\link{predict.brmsfit}} for details.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ A \code{list} with two slots named \code{'y'} and \code{'yrep'}. Slot \code{y} contains the vector of observed responses. Slot \code{yrep} contains the matrix of predicted responses, with rows being posterior draws and columns being observations. } \description{ Compute and evaluate predictions after performing K-fold cross-validation via \code{\link{kfold}}. } \examples{ \dontrun{ fit <- brm(count ~ zBase * Trt + (1|patient), data = epilepsy, family = poisson()) # perform k-fold cross validation (kf <- kfold(fit, save_fits = TRUE, chains = 1)) # define a loss function rmse <- function(y, yrep) { yrep_mean <- colMeans(yrep) sqrt(mean((yrep_mean - y)^2)) } # predict responses and evaluate the loss kfp <- kfold_predict(kf) rmse(y = kfp$y, yrep = kfp$yrep) } } \seealso{ \code{\link{kfold}} } brms/man/cor_ar.Rd0000644000175000017500000000404713701270367013671 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_ar} \alias{cor_ar} \title{(Deprecated) AR(p) correlation structure} \usage{ cor_ar(formula = ~1, p = 1, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is 1.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma} containing solely autoregression terms. } \description{ This function is deprecated. Please see \code{\link{ar}} for the new syntax. This function is a constructor for the \code{cor_arma} class, allowing for autoregression terms only. } \details{ AR refers to autoregressive effects of residuals, which is what is typically understood as autoregressive effects. However, one may also model autoregressive effects of the response variable, which is called ARR in \pkg{brms}. } \examples{ cor_ar(~visit|patient, p = 2) } \seealso{ \code{\link{cor_arma}} } brms/man/mm.Rd0000644000175000017500000000566314105230573013034 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{mm} \alias{mm} \title{Set up multi-membership grouping terms in \pkg{brms}} \usage{ mm( ..., weights = NULL, scale = TRUE, by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian" ) } \arguments{ \item{...}{One or more terms containing grouping factors.} \item{weights}{A matrix specifying the weights of each member. It should have as many columns as grouping terms specified in \code{...}. If \code{NULL} (the default), equally weights are used.} \item{scale}{Logical; if \code{TRUE} (the default), weights are standardized in order to sum to one per row. If negative weights are specified, \code{scale} needs to be set to \code{FALSE}.} \item{by}{An optional factor matrix, specifying sub-populations of the groups. It should have as many columns as grouping terms specified in \code{...}. For each level of the \code{by} variable, a separate variance-covariance matrix will be fitted. Levels of the grouping factor must be nested in levels of the \code{by} variable matrix.} \item{cor}{Logical. If \code{TRUE} (the default), group-level terms will be modelled as correlated.} \item{id}{Optional character string. All group-level terms across the model with the same \code{id} will be modeled as correlated (if \code{cor} is \code{TRUE}). See \code{\link{brmsformula}} for more details.} \item{cov}{An optional matrix which is proportional to the withon-group covariance matrix of the group-level effects. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others, to model pedigrees and phylogenetic effects. See \code{vignette("brms_phylogenetics")} for more details. By default, levels of the same grouping factor are modeled as independent of each other.} \item{dist}{Name of the distribution of the group-level effects. Currently \code{"gaussian"} is the only option.} } \description{ Function to set up a multi-membership grouping term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with grouping terms. } \examples{ \dontrun{ # simulate some data dat <- data.frame( y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) ) # multi-membership model with two members per group and equal weights fit1 <- brm(y ~ x1 + (1|mm(g1, g2)), data = dat) summary(fit1) # weight the first member two times for than the second member dat$w1 <- rep(2, 100) dat$w2 <- rep(1, 100) fit2 <- brm(y ~ x1 + (1|mm(g1, g2, weights = cbind(w1, w2))), data = dat) summary(fit2) # multi-membership model with level specific covariate values dat$xc <- (dat$x1 + dat$x2) / 2 fit3 <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) summary(fit3) } } \seealso{ \code{\link{brmsformula}}, \code{\link{mmc}} } brms/man/brmsterms.Rd0000644000175000017500000000414713701270370014435 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{brmsterms} \alias{brmsterms} \alias{parse_bf} \alias{brmsterms.default} \alias{brmsterms.brmsformula} \alias{brmsterms.mvbrmsformula} \title{Parse Formulas of \pkg{brms} Models} \usage{ brmsterms(formula, ...) \method{brmsterms}{default}(formula, ...) \method{brmsterms}{brmsformula}(formula, check_response = TRUE, resp_rhs_all = TRUE, ...) \method{brmsterms}{mvbrmsformula}(formula, ...) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{...}{Further arguments passed to or from other methods.} \item{check_response}{Logical; Indicates whether the left-hand side of \code{formula} (i.e. response variables and addition arguments) should be parsed. If \code{FALSE}, \code{formula} may also be one-sided.} \item{resp_rhs_all}{Logical; Indicates whether to also include response variables on the right-hand side of formula \code{.$allvars}, where \code{.} represents the output of \code{brmsterms}.} } \value{ An object of class \code{brmsterms} or \code{mvbrmsterms} (for multivariate models), which is a \code{list} containing all required information initially stored in \code{formula} in an easier to use format, basically a list of formulas (not an abstract syntax tree). } \description{ Parse formulas objects for use in \pkg{brms}. } \details{ This is the main formula parsing function of \pkg{brms}. It should usually not be called directly, but is exported to allow package developers making use of the formula syntax implemented in \pkg{brms}. As long as no other packages depend on this functions, it may be changed without deprecation warnings, when new features make this necessary. } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/plot.brmsfit.Rd0000644000175000017500000000513414111751667015050 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot.brmsfit} \alias{plot.brmsfit} \title{Trace and Density Plots for MCMC Draws} \usage{ \method{plot}{brmsfit}( x, pars = NA, combo = c("dens", "trace"), N = 5, variable = NULL, regex = FALSE, fixed = FALSE, theme = NULL, plot = TRUE, ask = TRUE, newpage = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{combo}{A character vector with at least two elements. Each element of \code{combo} corresponds to a column in the resulting graphic and should be the name of one of the available \code{\link[bayesplot:MCMC-overview]{MCMC}} functions (omitting the \code{mcmc_} prefix).} \item{N}{The number of parameters plotted per page.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{newpage}{Logical; indicates if the first set of plots should be plotted to a new page. Only used if \code{plot} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[bayesplot:MCMC-combos]{mcmc_combo}}.} } \value{ An invisible list of \code{\link[gtable:gtable]{gtable}} objects. } \description{ Trace and Density Plots for MCMC Draws } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|visit), data = epilepsy, family = "poisson") plot(fit) ## plot population-level effects only plot(fit, variable = "^b_", regex = TRUE) } } brms/man/autocor.brmsfit.Rd0000644000175000017500000000136213701270367015542 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{autocor.brmsfit} \alias{autocor.brmsfit} \alias{autocor} \title{(Deprecated) Extract Autocorrelation Objects} \usage{ \method{autocor}{brmsfit}(object, resp = NULL, ...) autocor(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Currently unused.} } \value{ A \code{cor_brms} object or a list of such objects for multivariate models. Not supported for models fitted with brms 2.11.1 or higher. } \description{ (Deprecated) Extract Autocorrelation Objects } brms/man/lasso.Rd0000644000175000017500000000355113565500270013541 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{lasso} \alias{lasso} \title{Set up a lasso prior in \pkg{brms}} \usage{ lasso(df = 1, scale = 1) } \arguments{ \item{df}{Degrees of freedom of the chi-square prior of the inverse tuning parameter. Defaults to \code{1}.} \item{scale}{Scale of the lasso prior. Defaults to \code{1}.} } \value{ A character string obtained by \code{match.call()} with additional arguments. } \description{ Function used to set up a lasso prior for population-level effects in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up the model. } \details{ The lasso prior is the Bayesian equivalent to the LASSO method for performing variable selection (Park & Casella, 2008). With this prior, independent Laplace (i.e. double exponential) priors are placed on the population-level effects. The scale of the Laplace priors depends on a tuning parameter that controls the amount of shrinkage. In \pkg{brms}, the inverse of the tuning parameter is used so that smaller values imply more shrinkage. The inverse tuning parameter has a chi-square distribution and with degrees of freedom controlled via argument \code{df} of function \code{lasso} (defaults to \code{1}). For instance, one can specify a lasso prior using \code{set_prior("lasso(1)")}. To make sure that shrinkage can equally affect all coefficients, predictors should be one the same scale. If you do not want to standardized all variables, you can adjust the general scale of the lasso prior via argument \code{scale}, for instance, \code{lasso(1, scale = 10)}. } \examples{ set_prior(lasso(df = 1, scale = 10)) } \references{ Park, T., & Casella, G. (2008). The Bayesian Lasso. Journal of the American Statistical Association, 103(482), 681-686. } \seealso{ \code{\link{set_prior}} } brms/man/launch_shinystan.brmsfit.Rd0000644000175000017500000000224713701270367017443 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/launch_shinystan.R \name{launch_shinystan.brmsfit} \alias{launch_shinystan.brmsfit} \alias{launch_shinystan} \title{Interface to \pkg{shinystan}} \usage{ \method{launch_shinystan}{brmsfit}(object, rstudio = getOption("shinystan.rstudio"), ...) } \arguments{ \item{object}{A fitted model object typically of class \code{brmsfit}.} \item{rstudio}{Only relevant for RStudio users. The default (\code{rstudio=FALSE}) is to launch the app in the default web browser rather than RStudio's pop-up Viewer. Users can change the default to \code{TRUE} by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}.} \item{...}{Optional arguments to pass to \code{\link[shiny:runApp]{runApp}}} } \value{ An S4 shinystan object } \description{ Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms} } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "gaussian") launch_shinystan(fit) } } \seealso{ \code{\link[shinystan:launch_shinystan]{launch_shinystan}} } brms/man/restructure.Rd0000644000175000017500000000136114111751667015012 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/restructure.R \name{restructure} \alias{restructure} \title{Restructure Old \code{brmsfit} Objects} \usage{ restructure(x, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{...}{Currently ignored.} } \value{ A \code{brmsfit} object compatible with the latest version of \pkg{brms}. } \description{ Restructure old \code{brmsfit} objects to work with the latest \pkg{brms} version. This function is called internally when applying post-processing methods. However, in order to avoid unnecessary run time caused by the restructuring, I recommend explicitly calling \code{restructure} once per model after updating \pkg{brms}. } brms/man/brmshypothesis.Rd0000644000175000017500000000431214111751667015505 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{brmshypothesis} \alias{brmshypothesis} \alias{print.brmshypothesis} \alias{plot.brmshypothesis} \title{Descriptions of \code{brmshypothesis} Objects} \usage{ \method{print}{brmshypothesis}(x, digits = 2, chars = 20, ...) \method{plot}{brmshypothesis}( x, N = 5, ignore_prior = FALSE, chars = 40, colors = NULL, theme = NULL, ask = TRUE, plot = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{digits}{Minimal number of significant digits, see \code{\link[base:print.default]{print.default}}.} \item{chars}{Maximum number of characters of each hypothesis to print or plot. If \code{NULL}, print the full hypotheses. Defaults to \code{20}.} \item{...}{Currently ignored.} \item{N}{The number of parameters plotted per page.} \item{ignore_prior}{A flag indicating if prior distributions should also be plotted. Only used if priors were specified on the relevant parameters.} \item{colors}{Two values specifying the colors of the posterior and prior density respectively. If \code{NULL} (the default) colors are taken from the current color scheme of the \pkg{bayesplot} package.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} } \description{ A \code{brmshypothesis} object contains posterior draws as well as summary statistics of non-linear hypotheses as returned by \code{\link{hypothesis}}. } \details{ The two most important elements of a \code{brmshypothesis} object are \code{hypothesis}, which is a data.frame containing the summary estimates of the hypotheses, and \code{samples}, which is a data.frame containing the corresponding posterior draws. } \seealso{ \code{\link{hypothesis}} } brms/man/mi.Rd0000644000175000017500000000402614111751667013031 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sp.R \name{mi} \alias{mi} \title{Predictors with Missing Values in \pkg{brms} Models} \usage{ mi(x, idx = NA) } \arguments{ \item{x}{The variable containing missing values.} \item{idx}{An optional variable containing indices of observations in `x` that are to be used in the model. This is mostly relevant in partially subsetted models (via \code{resp_subset}) but may also have other applications that I haven't thought of.} } \description{ Specify predictor term with missing values in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model. } \details{ For detailed documentation see \code{help(brmsformula)}. } \examples{ \dontrun{ data("nhanes", package = "mice") N <- nrow(nhanes) # simple model with missing data bform1 <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit1 <- brm(bform1, data = nhanes) summary(fit1) plot(conditional_effects(fit1, resp = "bmi"), ask = FALSE) loo(fit1, newdata = na.omit(fit1$data)) # simulate some measurement noise nhanes$se <- rexp(N, 2) # measurement noise can be handled within 'mi' terms # with or without the presence of missing values bform2 <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit2 <- brm(bform2, data = nhanes) summary(fit2) plot(conditional_effects(fit2, resp = "bmi"), ask = FALSE) # 'mi' terms can also be used when some responses are subsetted nhanes$sub <- TRUE nhanes$sub[1:2] <- FALSE nhanes$id <- 1:N nhanes$idx <- sample(3:N, N, TRUE) # this requires the addition term 'index' being specified # in the subsetted part of the model bform3 <- bf(bmi | mi() ~ age * mi(chl, idx)) + bf(chl | mi(se) + subset(sub) + index(id) ~ age) + set_rescor(FALSE) fit3 <- brm(bform3, data = nhanes) summary(fit3) plot(conditional_effects(fit3, resp = "bmi"), ask = FALSE) } } \seealso{ \code{\link{brmsformula}} } brms/man/Dirichlet.Rd0000644000175000017500000000152114111751667014330 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Dirichlet} \alias{Dirichlet} \alias{ddirichlet} \alias{rdirichlet} \title{The Dirichlet Distribution} \usage{ ddirichlet(x, alpha, log = FALSE) rdirichlet(n, alpha) } \arguments{ \item{x}{Matrix of quantiles. Each row corresponds to one probability vector.} \item{alpha}{Matrix of positive shape parameters. Each row corresponds to one probability vector.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random number generation for the dirichlet distribution with shape parameter vector \code{alpha}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/data_predictor.Rd0000644000175000017500000000077613625767110015417 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-predictor.R \name{data_predictor} \alias{data_predictor} \title{Prepare Predictor Data} \usage{ data_predictor(x, ...) } \arguments{ \item{x}{An \R object.} \item{...}{Further arguments passed to or from other methods.} } \value{ A named list of data related to predictor variables. } \description{ Prepare data related to predictor variables in \pkg{brms}. Only exported for use in package development. } \keyword{internal} brms/man/addition-terms.Rd0000644000175000017500000001120214111751667015341 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ad.R \name{addition-terms} \alias{addition-terms} \alias{resp_se} \alias{resp_weights} \alias{resp_trials} \alias{resp_thres} \alias{resp_cat} \alias{resp_dec} \alias{resp_cens} \alias{resp_trunc} \alias{resp_mi} \alias{resp_index} \alias{resp_rate} \alias{resp_subset} \alias{resp_vreal} \alias{resp_vint} \title{Additional Response Information} \usage{ resp_se(x, sigma = FALSE) resp_weights(x, scale = FALSE) resp_trials(x) resp_thres(x, gr = NA) resp_cat(x) resp_dec(x) resp_cens(x, y2 = NA) resp_trunc(lb = -Inf, ub = Inf) resp_mi(sdy = NA) resp_index(x) resp_rate(denom) resp_subset(x) resp_vreal(...) resp_vint(...) } \arguments{ \item{x}{A vector; usually a variable defined in the data. Allowed values depend on the function: \code{resp_se} and \code{resp_weights} require positive numeric values. \code{resp_trials}, \code{resp_thres}, and \code{resp_cat} require positive integers. \code{resp_dec} requires \code{0} and \code{1}, or alternatively \code{'lower'} and \code{'upper'}. \code{resp_subset} requires \code{0} and \code{1}, or alternatively \code{FALSE} and \code{TRUE}. \code{resp_cens} requires \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate left, no, right, or interval censoring. \code{resp_index} does not make any requirements other than the value being unique for each observation.} \item{sigma}{Logical; Indicates whether the residual standard deviation parameter \code{sigma} should be included in addition to the known measurement error. Defaults to \code{FALSE} for backwards compatibility, but setting it to \code{TRUE} is usually the better choice.} \item{scale}{Logical; Indicates whether weights should be scaled so that the average weight equals one. Defaults to \code{FALSE}.} \item{gr}{A vector of grouping indicators.} \item{y2}{A vector specifying the upper bounds in interval censoring. Will be ignored for non-interval censored observations. However, it should NOT be \code{NA} even for non-interval censored observations to avoid accidental exclusion of these observations.} \item{lb}{A numeric vector or single numeric value specifying the lower truncation bound.} \item{ub}{A numeric vector or single numeric value specifying the upper truncation bound.} \item{sdy}{Optional known measurement error of the response treated as standard deviation. If specified, handles measurement error and (completely) missing values at the same time using the plausible-values-technique.} \item{denom}{A vector of positive numeric values specifying the denominator values from which the response rates are computed.} \item{...}{For \code{resp_vreal}, vectors of real values. For \code{resp_vint}, vectors of integer values. In Stan, these variables will be named \code{vreal1}, \code{vreal2}, ..., and \code{vint1}, \code{vint2}, ..., respectively.} } \value{ A list of additional response information to be processed further by \pkg{brms}. } \description{ Provide additional information on the response variable in \pkg{brms} models, such as censoring, truncation, or known measurement error. } \details{ These functions are almost solely useful when called in formulas passed to the \pkg{brms} package. Within formulas, the \code{resp_} prefix may be omitted. More information is given in the 'Details' section of \code{\link{brmsformula}}. } \examples{ \dontrun{ ## Random effects meta-analysis nstudies <- 20 true_effects <- rnorm(nstudies, 0.5, 0.2) sei <- runif(nstudies, 0.05, 0.3) outcomes <- rnorm(nstudies, true_effects, sei) data1 <- data.frame(outcomes, sei) fit1 <- brm(outcomes | se(sei, sigma = TRUE) ~ 1, data = data1) summary(fit1) ## Probit regression using the binomial family n <- sample(1:10, 100, TRUE) # number of trials success <- rbinom(100, size = n, prob = 0.4) x <- rnorm(100) data2 <- data.frame(n, success, x) fit2 <- brm(success | trials(n) ~ x, data = data2, family = binomial("probit")) summary(fit2) ## Survival regression modeling the time between the first ## and second recurrence of an infection in kidney patients. fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit3) ## Poisson model with truncated counts fit4 <- brm(count | trunc(ub = 104) ~ zBase * Trt, data = epilepsy, family = poisson()) summary(fit4) } } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}} } brms/man/Wiener.Rd0000644000175000017500000000403614111751667013656 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Wiener} \alias{Wiener} \alias{dwiener} \alias{rwiener} \title{The Wiener Diffusion Model Distribution} \usage{ dwiener( x, alpha, tau, beta, delta, resp = 1, log = FALSE, backend = getOption("wiener_backend", "Rwiener") ) rwiener( n, alpha, tau, beta, delta, types = c("q", "resp"), backend = getOption("wiener_backend", "Rwiener") ) } \arguments{ \item{x}{Vector of quantiles.} \item{alpha}{Boundary separation parameter.} \item{tau}{Non-decision time parameter.} \item{beta}{Bias parameter.} \item{delta}{Drift rate parameter.} \item{resp}{Response: \code{"upper"} or \code{"lower"}. If no character vector, it is coerced to logical where \code{TRUE} indicates \code{"upper"} and \code{FALSE} indicates \code{"lower"}.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{backend}{Name of the package to use as backend for the computations. Either \code{"Rwiener"} (the default) or \code{"rtdists"}. Can be set globally for the current \R session via the \code{"wiener_backend"} option (see \code{\link{options}}).} \item{n}{Number of draws to sample from the distribution.} \item{types}{Which types of responses to return? By default, return both the response times \code{"q"} and the dichotomous responses \code{"resp"}. If either \code{"q"} or \code{"resp"}, return only one of the two types.} } \description{ Density function and random generation for the Wiener diffusion model distribution with boundary separation \code{alpha}, non-decision time \code{tau}, bias \code{beta} and drift rate \code{delta}. } \details{ These are wrappers around functions of the \pkg{RWiener} or \pkg{rtdists} package (depending on the chosen \code{backend}). See \code{vignette("brms_families")} for details on the parameterization. } \seealso{ \code{\link[RWiener:wienerdist]{wienerdist}}, \code{\link[rtdists:Diffusion]{Diffusion}} } brms/man/log_lik.brmsfit.Rd0000644000175000017500000000731414111751667015514 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/log_lik.R \name{log_lik.brmsfit} \alias{log_lik.brmsfit} \alias{log_lik} \alias{logLik.brmsfit} \title{Compute the Pointwise Log-Likelihood} \usage{ \method{log_lik}{brmsfit}( object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, add_point_estimate = FALSE, cores = NULL, ... ) } \arguments{ \item{object}{A fitted model object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once (the default), or just return the likelihood function along with all data and draws required to compute the log-likelihood separately for each observation. The latter option is rarely useful when calling \code{log_lik} directly, but rather when computing \code{\link{waic}} or \code{\link{loo}}.} \item{combine}{Only relevant in multivariate models. Indicates if the log-likelihoods of the submodels should be combined per observation (i.e. added together; the default) or if the log-likelihoods should be returned separately.} \item{add_point_estimate}{For internal use only. Ensures compatibility with the \code{\link{loo_subsample}} method.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ Usually, an S x N matrix containing the pointwise log-likelihood draws, where S is the number of draws and N is the number of observations in the data. For multivariate models and if \code{combine} is \code{FALSE}, an S x N x R array is returned, where R is the number of response variables. If \code{pointwise = TRUE}, the output is a function with a \code{draws} attribute containing all relevant data and posterior draws. } \description{ Compute the Pointwise Log-Likelihood } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. } brms/man/conditional_effects.brmsfit.Rd0000644000175000017500000003153214111751667020075 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{conditional_effects.brmsfit} \alias{conditional_effects.brmsfit} \alias{marginal_effects} \alias{marginal_effects.brmsfit} \alias{conditional_effects} \alias{plot.brms_conditional_effects} \title{Display Conditional Effects of Predictors} \usage{ \method{conditional_effects}{brmsfit}( x, effects = NULL, conditions = NULL, int_conditions = NULL, re_formula = NA, prob = 0.95, robust = TRUE, method = "posterior_epred", spaghetti = FALSE, surface = FALSE, categorical = FALSE, ordinal = FALSE, transform = NULL, resolution = 100, select_points = 0, too_far = 0, probs = NULL, ... ) conditional_effects(x, ...) \method{plot}{brms_conditional_effects}( x, ncol = NULL, points = FALSE, rug = FALSE, mean = TRUE, jitter_width = 0, stype = c("contour", "raster"), line_args = list(), cat_args = list(), errorbar_args = list(), surface_args = list(), spaghetti_args = list(), point_args = list(), rug_args = list(), facet_args = list(), theme = NULL, ask = TRUE, plot = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{effects}{An optional character vector naming effects (main effects or interactions) for which to compute conditional plots. Interactions are specified by a \code{:} between variable names. If \code{NULL} (the default), plots are generated for all main effects and two-way interactions estimated in the model. When specifying \code{effects} manually, \emph{all} two-way interactions (including grouping variables) may be plotted even if not originally modeled.} \item{conditions}{An optional \code{data.frame} containing variable values to condition on. Each effect defined in \code{effects} will be plotted separately for each row of \code{conditions}. Values in the \code{cond__} column will be used as titles of the subplots. If \code{cond__} is not given, the row names will be used for this purpose instead. It is recommended to only define a few rows in order to keep the plots clear. See \code{\link{make_conditions}} for an easy way to define conditions. If \code{NULL} (the default), numeric variables will be conditionalized by using their means and factors will get their first level assigned. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{int_conditions}{An optional named \code{list} whose elements are vectors of values of the variables specified in \code{effects}. At these values, predictions are evaluated. The names of \code{int_conditions} have to match the variable names exactly. Additionally, the elements of the vectors may be named themselves, in which case their names appear as labels for the conditions in the plots. Instead of vectors, functions returning vectors may be passed and are applied on the original values of the corresponding variable. If \code{NULL} (the default), predictions are evaluated at the \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at all categories for factor-like predictors.} \item{re_formula}{A formula containing group-level effects to be considered in the conditional predictions. If \code{NULL}, include all group-level effects; if \code{NA} (default), include no group-level effects.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{robust}{If \code{TRUE} (the default) the median is used as the measure of central tendency. If \code{FALSE} the mean is used instead.} \item{method}{Method used to obtain predictions. Can be set to \code{"posterior_epred"} (the default), \code{"posterior_predict"}, or \code{"posterior_linpred"}. For more details, see the respective function documentations.} \item{spaghetti}{Logical. Indicates if predictions should be visualized via spaghetti plots. Only applied for numeric predictors. If \code{TRUE}, it is recommended to set argument \code{ndraws} to a relatively small value (e.g., \code{100}) in order to reduce computation time.} \item{surface}{Logical. Indicates if interactions or two-dimensional smooths should be visualized as a surface. Defaults to \code{FALSE}. The surface type can be controlled via argument \code{stype} of the related plotting method.} \item{categorical}{Logical. Indicates if effects of categorical or ordinal models should be shown in terms of probabilities of response categories. Defaults to \code{FALSE}.} \item{ordinal}{(Deprecated) Please use argument \code{categorical}. Logical. Indicates if effects in ordinal models should be visualized as a raster with the response categories on the y-axis. Defaults to \code{FALSE}.} \item{transform}{A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed. Only allowed if \code{method = "posterior_predict"}.} \item{resolution}{Number of support points used to generate the plots. Higher resolution leads to smoother plots. Defaults to \code{100}. If \code{surface} is \code{TRUE}, this implies \code{10000} support points for interaction terms, so it might be necessary to reduce \code{resolution} when only few RAM is available.} \item{select_points}{Positive number. Only relevant if \code{points} or \code{rug} are set to \code{TRUE}: Actual data points of numeric variables that are too far away from the values specified in \code{conditions} can be excluded from the plot. Values are scaled into the unit interval and then points more than \code{select_points} from the values in \code{conditions} are excluded. By default, all points are used.} \item{too_far}{Positive number. For surface plots only: Grid points that are too far away from the actual data points can be excluded from the plot. \code{too_far} determines what is too far. The grid is scaled into the unit square and then grid points more than \code{too_far} from the predictor variables are excluded. By default, all grid points are used. Ignored for non-surface plots.} \item{probs}{(Deprecated) The quantiles to be used in the computation of uncertainty intervals. Please use argument \code{prob} instead.} \item{...}{Further arguments such as \code{draw_ids} or \code{ndraws} passed to \code{\link{posterior_predict}} or \code{\link{posterior_epred}}.} \item{ncol}{Number of plots to display per column for each effect. If \code{NULL} (default), \code{ncol} is computed internally based on the number of rows of \code{conditions}.} \item{points}{Logical. Indicates if the original data points should be added via \code{\link{geom_jitter}}. Default is \code{FALSE}. Note that only those data points will be added that match the specified conditions defined in \code{conditions}. For categorical predictors, the conditions have to match exactly. For numeric predictors, argument \code{select_points} is used to determine, which points do match a condition.} \item{rug}{Logical. Indicates if a rug representation of predictor values should be added via \code{\link{geom_rug}}. Default is \code{FALSE}. Depends on \code{select_points} in the same way as \code{points} does.} \item{mean}{Logical. Only relevant for spaghetti plots. If \code{TRUE} (the default), display the mean regression line on top of the regression lines for each sample.} \item{jitter_width}{Only used if \code{points = TRUE}: Amount of horizontal jittering of the data points. Mainly useful for ordinal models. Defaults to \code{0} that is no jittering.} \item{stype}{Indicates how surface plots should be displayed. Either \code{"contour"} or \code{"raster"}.} \item{line_args}{Only used in plots of continuous predictors: A named list of arguments passed to \code{\link{geom_smooth}}.} \item{cat_args}{Only used in plots of categorical predictors: A named list of arguments passed to \code{\link{geom_point}}.} \item{errorbar_args}{Only used in plots of categorical predictors: A named list of arguments passed to \code{\link{geom_errorbar}}.} \item{surface_args}{Only used in surface plots: A named list of arguments passed to \code{\link{geom_contour}} or \code{\link{geom_raster}} (depending on argument \code{stype}).} \item{spaghetti_args}{Only used in spaghetti plots: A named list of arguments passed to \code{\link{geom_smooth}}.} \item{point_args}{Only used if \code{points = TRUE}: A named list of arguments passed to \code{\link{geom_jitter}}.} \item{rug_args}{Only used if \code{rug = TRUE}: A named list of arguments passed to \code{\link{geom_rug}}.} \item{facet_args}{Only used if if multiple condtions are provided: A named list of arguments passed to \code{\link{facet_wrap}}.} \item{theme}{A \code{\link[ggplot2:theme]{theme}} object modifying the appearance of the plots. For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} and \code{\link[bayesplot:theme_default]{theme_default}}.} \item{ask}{Logical; indicates if the user is prompted before a new page is plotted. Only used if \code{plot} is \code{TRUE}.} \item{plot}{Logical; indicates if plots should be plotted directly in the active graphic device. Defaults to \code{TRUE}.} } \value{ An object of class \code{'brms_conditional_effects'} which is a named list with one data.frame per effect containing all information required to generate conditional effects plots. Among others, these data.frames contain some special variables, namely \code{estimate__} (predicted values of the response), \code{se__} (standard error of the predicted response), \code{lower__} and \code{upper__} (lower and upper bounds of the uncertainty interval of the response), as well as \code{cond__} (used in faceting when \code{conditions} contains multiple rows). The corresponding \code{plot} method returns a named list of \code{\link{ggplot}} objects, which can be further customized using the \pkg{ggplot2} package. } \description{ Display conditional effects of one or more numeric and/or categorical predictors including two-way interaction effects. } \details{ When creating \code{conditional_effects} for a particular predictor (or interaction of two predictors), one has to choose the values of all other predictors to condition on. By default, the mean is used for continuous variables and the reference category is used for factors, but you may change these values via argument \code{conditions}. This also has an implication for the \code{points} argument: In the created plots, only those points will be shown that correspond to the factor levels actually used in the conditioning, in order not to create the false impression of bad model fit, where it is just due to conditioning on certain factor levels. To fully change colors of the created plots, one has to amend both \code{scale_colour} and \code{scale_fill}. See \code{\link{scale_colour_grey}} or \code{\link{scale_colour_gradient}} for more details. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1 | patient), data = epilepsy, family = poisson()) ## plot all conditional effects plot(conditional_effects(fit), ask = FALSE) ## change colours to grey scale library(ggplot2) me <- conditional_effects(fit, "zBase:Trt") plot(me, plot = FALSE)[[1]] + scale_color_grey() + scale_fill_grey() ## only plot the conditional interaction effect of 'zBase:Trt' ## for different values for 'zAge' conditions <- data.frame(zAge = c(-1, 0, 1)) plot(conditional_effects(fit, effects = "zBase:Trt", conditions = conditions)) ## also incorporate group-level effects variance over patients ## also add data points and a rug representation of predictor values plot(conditional_effects(fit, effects = "zBase:Trt", conditions = conditions, re_formula = NULL), points = TRUE, rug = TRUE) ## change handling of two-way interactions int_conditions <- list( zBase = setNames(c(-2, 1, 0), c("b", "c", "a")) ) conditional_effects(fit, effects = "Trt:zBase", int_conditions = int_conditions) conditional_effects(fit, effects = "Trt:zBase", int_conditions = list(zBase = quantile)) ## fit a model to illustrate how to plot 3-way interactions fit3way <- brm(count ~ zAge * zBase * Trt, data = epilepsy) conditions <- make_conditions(fit3way, "zAge") conditional_effects(fit3way, "zBase:Trt", conditions = conditions) ## only include points close to the specified values of zAge me <- conditional_effects( fit3way, "zBase:Trt", conditions = conditions, select_points = 0.1 ) plot(me, points = TRUE) } } brms/man/fixef.brmsfit.Rd0000644000175000017500000000320113701270367015161 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{fixef.brmsfit} \alias{fixef.brmsfit} \alias{fixef} \title{Extract Population-Level Estimates} \usage{ \method{fixef}{brmsfit}( object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{...}{Currently ignored.} } \value{ If \code{summary} is \code{TRUE}, a matrix returned by \code{\link{posterior_summary}} for the population-level effects. If \code{summary} is \code{FALSE}, a matrix with one row per posterior draw and one column per population-level effect. } \description{ Extract the population-level ('fixed') effects from a \code{brmsfit} object. } \examples{ \dontrun{ fit <- brm(time | cens(censored) ~ age + sex + disease, data = kidney, family = "exponential") fixef(fit) # extract only some coefficients fixef(fit, pars = c("age", "sex")) } } brms/man/get_refmodel.brmsfit.Rd0000644000175000017500000000461214116647701016524 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/projpred.R \name{get_refmodel.brmsfit} \alias{get_refmodel.brmsfit} \title{Projection Predictive Variable Selection: Get Reference Model} \usage{ get_refmodel.brmsfit(object, newdata = NULL, resp = NULL, cvfun = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{cvfun}{Optional cross-validation function (see \code{\link[projpred:get-refmodel]{get_refmodel}} for details). If \code{NULL} (the default), \code{cvfun} is defined internally based on \code{\link{kfold.brmsfit}}.} \item{...}{Further arguments passed to \code{\link[projpred:get-refmodel]{init_refmodel}}.} } \value{ A \code{refmodel} object to be used in conjunction with the \pkg{projpred} package. } \description{ The \code{get_refmodel.brmsfit} method can be used to create the reference model structure which is needed by the \pkg{projpred} package for performing a projection predictive variable selection. This method is called automatically when performing variable selection via \code{\link[projpred:varsel]{varsel}} or \code{\link[projpred:cv_varsel]{cv_varsel}}, so you will rarely need to call it manually yourself. } \details{ Note that the \code{extract_model_data} function used internally by \code{get_refmodel.brmsfit} ignores arguments \code{wrhs}, \code{orhs}, and \code{extract_y}. This is relevant for \code{\link[projpred:predict.refmodel]{predict.refmodel}}, for example. } \examples{ \dontrun{ # fit a simple model fit <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson()) summary(fit) # The following code requires the 'projpred' package to be installed: library(projpred) # perform variable selection without cross-validation vs <- varsel(fit) summary(vs) plot(vs) # perform variable selection with cross-validation cv_vs <- cv_varsel(fit) summary(cv_vs) plot(cv_vs) } } brms/man/fitted.brmsfit.Rd0000644000175000017500000001122614111751667015350 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{fitted.brmsfit} \alias{fitted.brmsfit} \title{Expected Values of the Posterior Predictive Distribution} \usage{ \method{fitted}{brmsfit}( object, newdata = NULL, re_formula = NULL, scale = c("response", "linear"), resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{scale}{Either \code{"response"} or \code{"linear"}. If \code{"response"}, results are returned on the scale of the response variable. If \code{"linear"}, results are returned on the scale of the linear predictor term, that is without applying the inverse link function or other transformations.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}..} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted \emph{mean} response values. If \code{summary = FALSE} the output resembles those of \code{\link{posterior_epred.brmsfit}}. If \code{summary = TRUE} the output depends on the family: For categorical and ordinal families, the output is an N x E x C array, where N is the number of observations, E is the number of summary statistics, and C is the number of categories. For all other families, the output is an N x E matrix. The number of summary statistics E is equal to \code{2 + length(probs)}: The \code{Estimate} column contains point estimates (either mean or median depending on argument \code{robust}), while the \code{Est.Error} column contains uncertainty estimates (either standard deviation or median absolute deviation depending on argument \code{robust}). The remaining columns starting with \code{Q} contain quantile estimates as specified via argument \code{probs}. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ This method is an alias of \code{\link{posterior_epred.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## compute expected predictions fitted_values <- fitted(fit) head(fitted_values) ## plot expected predictions against actual response dat <- as.data.frame(cbind(Y = standata(fit)$Y, fitted_values)) ggplot(dat) + geom_point(aes(x = Estimate, y = Y)) } } \seealso{ \code{\link{posterior_epred.brmsfit}} } brms/man/logm1.Rd0000644000175000017500000000070413661463272013443 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{logm1} \alias{logm1} \title{Logarithm with a minus one offset.} \usage{ logm1(x, base = exp(1)) } \arguments{ \item{x}{A numeric or complex vector.} \item{base}{A positive or complex number: the base with respect to which logarithms are computed. Defaults to \emph{e} = \code{exp(1)}.} } \description{ Computes \code{log(x - 1)}. } brms/man/rename_pars.Rd0000644000175000017500000000165014105230573014707 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rename_pars.R \name{rename_pars} \alias{rename_pars} \title{Rename Parameters} \usage{ rename_pars(x) } \arguments{ \item{x}{A brmsfit object.} } \value{ A brmfit object with adjusted parameter names. } \description{ Rename parameters within the \code{stanfit} object after model fitting to ensure reasonable parameter names. This function is usually called automatically by \code{\link{brm}} and users will rarely be required to call it themselves. } \examples{ \dontrun{ # fit a model manually via rstan scode <- make_stancode(count ~ Trt, data = epilepsy) sdata <- make_standata(count ~ Trt, data = epilepsy) stanfit <- rstan::stan(model_code = scode, data = sdata) # feed the Stan model back into brms fit <- brm(count ~ Trt, data = epilepsy, empty = TRUE) fit$fit <- stanfit fit <- rename_pars(fit) summary(fit) } } brms/man/StudentT.Rd0000644000175000017500000000236514111751667014202 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{StudentT} \alias{StudentT} \alias{dstudent_t} \alias{pstudent_t} \alias{qstudent_t} \alias{rstudent_t} \title{The Student-t Distribution} \usage{ dstudent_t(x, df, mu = 0, sigma = 1, log = FALSE) pstudent_t(q, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) qstudent_t(p, df, mu = 0, sigma = 1) rstudent_t(n, df, mu = 0, sigma = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{df}{Vector of degrees of freedom.} \item{mu}{Vector of location values.} \item{sigma}{Vector of scale values.} \item{log, log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the Student-t distribution with location \code{mu}, scale \code{sigma}, and degrees of freedom \code{df}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } \seealso{ \code{\link[stats:TDist]{TDist}} } brms/man/mixture.Rd0000644000175000017500000000677313625767110014133 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{mixture} \alias{mixture} \title{Finite Mixture Families in \pkg{brms}} \usage{ mixture(..., flist = NULL, nmix = 1, order = NULL) } \arguments{ \item{...}{One or more objects providing a description of the response distributions to be combined in the mixture model. These can be family functions, calls to family functions or character strings naming the families. For details of supported families see \code{\link{brmsfamily}}.} \item{flist}{Optional list of objects, which are treated in the same way as objects passed via the \code{...} argument.} \item{nmix}{Optional numeric vector specifying the number of times each family is repeated. If specified, it must have the same length as the number of families passed via \code{...} and \code{flist}.} \item{order}{Ordering constraint to identify mixture components. If \code{'mu'} or \code{TRUE}, population-level intercepts of the mean parameters are ordered in non-ordinal models and fixed to the same value in ordinal models (see details). If \code{'none'} or \code{FALSE}, no ordering constraint is applied. If \code{NULL} (the default), \code{order} is set to \code{'mu'} if all families are the same and \code{'none'} otherwise. Other ordering constraints may be implemented in the future.} } \value{ An object of class \code{mixfamily}. } \description{ Set up a finite mixture family for use in \pkg{brms}. } \details{ Most families supported by \pkg{brms} can be used to form mixtures. The response variable has to be valid for all components of the mixture family. Currently, the number of mixture components has to be specified by the user. It is not yet possible to estimate the number of mixture components from the data. Ordering intercepts in mixtures of ordinal families is not possible as each family has itself a set of vector of intercepts (i.e. ordinal thresholds). Instead, \pkg{brms} will fix the vector of intercepts across components in ordinal mixtures, if desired, so that users can try to identify the mixture model via selective inclusion of predictors. For most mixture models, you may want to specify priors on the population-level intercepts via \code{\link{set_prior}} to improve convergence. In addition, it is sometimes necessary to set \code{inits = 0} in the call to \code{\link{brm}} to allow chains to initialize properly. For more details on the specification of mixture models, see \code{\link{brmsformula}}. } \examples{ \dontrun{ ## simulate some data set.seed(1234) dat <- data.frame( y = c(rnorm(200), rnorm(100, 6)), x = rnorm(300), z = sample(0:1, 300, TRUE) ) ## fit a simple normal mixture model mix <- mixture(gaussian, gaussian) prior <- c( prior(normal(0, 7), Intercept, dpar = mu1), prior(normal(5, 7), Intercept, dpar = mu2) ) fit1 <- brm(bf(y ~ x + z), dat, family = mix, prior = prior, chains = 2) summary(fit1) pp_check(fit1) ## use different predictors for the components fit2 <- brm(bf(y ~ 1, mu1 ~ x, mu2 ~ z), dat, family = mix, prior = prior, chains = 2) summary(fit2) ## fix the mixing proportions fit3 <- brm(bf(y ~ x + z, theta1 = 1, theta2 = 2), dat, family = mix, prior = prior, inits = 0, chains = 2) summary(fit3) pp_check(fit3) ## predict the mixing proportions fit4 <- brm(bf(y ~ x + z, theta2 ~ x), dat, family = mix, prior = prior, inits = 0, chains = 2) summary(fit4) pp_check(fit4) ## compare model fit LOO(fit1, fit2, fit3, fit4) } } brms/man/kfold.brmsfit.Rd0000644000175000017500000001367013701270367015172 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kfold.R \name{kfold.brmsfit} \alias{kfold.brmsfit} \alias{kfold} \title{K-Fold Cross-Validation} \usage{ \method{kfold}{brmsfit}( x, ..., K = 10, Ksub = NULL, folds = NULL, group = NULL, exact_loo = NULL, compare = TRUE, resp = NULL, model_names = NULL, save_fits = FALSE ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{K}{The number of subsets of equal (if possible) size into which the data will be partitioned for performing \eqn{K}-fold cross-validation. The model is refit \code{K} times, each time leaving out one of the \code{K} subsets. If \code{K} is equal to the total number of observations in the data then \eqn{K}-fold cross-validation is equivalent to exact leave-one-out cross-validation.} \item{Ksub}{Optional number of subsets (of those subsets defined by \code{K}) to be evaluated. If \code{NULL} (the default), \eqn{K}-fold cross-validation will be performed on all subsets. If \code{Ksub} is a single integer, \code{Ksub} subsets (out of all \code{K}) subsets will be randomly chosen. If \code{Ksub} consists of multiple integers or a one-dimensional array (created via \code{as.array}) potentially of length one, the corresponding subsets will be used. This argument is primarily useful, if evaluation of all subsets is infeasible for some reason.} \item{folds}{Determines how the subsets are being constructed. Possible values are \code{NULL} (the default), \code{"stratified"}, \code{"grouped"}, or \code{"loo"}. May also be a vector of length equal to the number of observations in the data. Alters the way \code{group} is handled. More information is provided in the 'Details' section.} \item{group}{Optional name of a grouping variable or factor in the model. What exactly is done with this variable depends on argument \code{folds}. More information is provided in the 'Details' section.} \item{exact_loo}{Deprecated! Please use \code{folds = "loo"} instead.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{save_fits}{If \code{TRUE}, a component \code{fits} is added to the returned object to store the cross-validated \code{brmsfit} objects and the indices of the omitted observations for each fold. Defaults to \code{FALSE}.} } \value{ \code{kfold} returns an object that has a similar structure as the objects returned by the \code{loo} and \code{waic} methods and can be used with the same post-processing functions. } \description{ Perform exact K-fold cross-validation by refitting the model \eqn{K} times each leaving out one-\eqn{K}th of the original data. Folds can be run in parallel using the \pkg{future} package. } \details{ The \code{kfold} function performs exact \eqn{K}-fold cross-validation. First the data are partitioned into \eqn{K} folds (i.e. subsets) of equal (or as close to equal as possible) size by default. Then the model is refit \eqn{K} times, each time leaving out one of the \code{K} subsets. If \eqn{K} is equal to the total number of observations in the data then \eqn{K}-fold cross-validation is equivalent to exact leave-one-out cross-validation (to which \code{loo} is an efficient approximation). The \code{compare_ic} function is also compatible with the objects returned by \code{kfold}. The subsets can be constructed in multiple different ways: \itemize{ \item If both \code{folds} and \code{group} are \code{NULL}, the subsets are randomly chosen so that they have equal (or as close to equal as possible) size. \item If \code{folds} is \code{NULL} but \code{group} is specified, the data is split up into subsets, each time omitting all observations of one of the factor levels, while ignoring argument \code{K}. \item If \code{folds = "stratified"} the subsets are stratified after \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_stratified}}. \item If \code{folds = "grouped"} the subsets are split by \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_grouped}}. \item If \code{folds = "loo"} exact leave-one-out cross-validation will be performed and \code{K} will be ignored. Further, if \code{group} is specified, all observations corresponding to the factor level of the currently predicted single value are omitted. Thus, in this case, the predicted values are only a subset of the omitted ones. \item If \code{folds} is a numeric vector, it must contain one element per observation in the data. Each element of the vector is an integer in \code{1:K} indicating to which of the \code{K} folds the corresponding observation belongs. There are some convenience functions available in the \pkg{loo} package that create integer vectors to use for this purpose (see the Examples section below and also the \link[loo:kfold-helpers]{kfold-helpers} page). } } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) # perform 10-fold cross validation (kfold1 <- kfold(fit1, chains = 1)) # use the future package for parallelization library(future) plan(multiprocess) kfold(fit1, chains = 1) } } \seealso{ \code{\link{loo}}, \code{\link{reloo}} } brms/man/emmeans-brms-helpers.Rd0000644000175000017500000000530414111751667016452 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emmeans.R \name{emmeans-brms-helpers} \alias{emmeans-brms-helpers} \alias{recover_data.brmsfit} \alias{emm_basis.brmsfit} \title{Support Functions for \pkg{emmeans}} \usage{ recover_data.brmsfit( object, data, resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ... ) emm_basis.brmsfit( object, trms, xlev, grid, vcov., resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{data, trms, xlev, grid, vcov.}{Arguments required by \pkg{emmeans}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{re_formula}{Optional formula containing group-level effects to be considered in the prediction. If \code{NULL}, include all group-level effects; if \code{NA} (default), include no group-level effects.} \item{epred}{Logical. If \code{TRUE} compute predictions of the posterior predictive distribution's mean (see \code{\link{posterior_epred.brmsfit}}) while ignoring arguments \code{dpar} and \code{nlpar}. Defaults to \code{FALSE}.} \item{...}{Additional arguments passed to \pkg{emmeans}.} } \description{ Functions required for compatibility of \pkg{brms} with \pkg{emmeans}. Users are not required to call these functions themselves. Instead, they will be called automatically by the \code{emmeans} function of the \pkg{emmeans} package. } \details{ In order to ensure compatibility of most \pkg{brms} models with \pkg{emmeans}, predictions are not generated 'manually' via a design matrix and coefficient vector, but rather via \code{\link{posterior_linpred.brmsfit}}. This appears to generally work well, but note that it produces an `.@linfct` slot that contains the computed predictions as columns instead of the coefficients. } \examples{ \dontrun{ fit <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), data = kidney, family = lognormal()) summary(fit) # summarize via 'emmeans' library(emmeans) rg <- ref_grid(fit) em <- emmeans(rg, "disease") summary(em, point.est = mean) # obtain estimates for the posterior predictive distribution's mean epred <- emmeans(fit, "disease", epred = TRUE) summary(epred, point.est = mean) } } brms/man/diagnostic-quantities.Rd0000644000175000017500000000304714050266727016736 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostics.R \name{diagnostic-quantities} \alias{diagnostic-quantities} \alias{log_posterior} \alias{nuts_params} \alias{rhat} \alias{neff_ratio} \alias{log_posterior.brmsfit} \alias{nuts_params.brmsfit} \alias{rhat.brmsfit} \alias{neff_ratio.brmsfit} \title{Extract Diagnostic Quantities of \pkg{brms} Models} \usage{ \method{log_posterior}{brmsfit}(object, ...) \method{nuts_params}{brmsfit}(object, pars = NULL, ...) \method{rhat}{brmsfit}(object, pars = NULL, ...) \method{neff_ratio}{brmsfit}(object, pars = NULL, ...) } \arguments{ \item{object}{A \code{brmsfit} object.} \item{...}{Arguments passed to individual methods.} \item{pars}{An optional character vector of parameter names. For \code{nuts_params} these will be NUTS sampler parameter names rather than model parameters. If pars is omitted all parameters are included.} } \value{ The exact form of the output depends on the method. } \description{ Extract quantities that can be used to diagnose sampling behavior of the algorithms applied by \pkg{Stan} at the back-end of \pkg{brms}. } \details{ For more details see \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}. } \examples{ \dontrun{ fit <- brm(time ~ age * sex, data = kidney) lp <- log_posterior(fit) head(lp) np <- nuts_params(fit) str(np) # extract the number of divergence transitions sum(subset(np, Parameter == "divergent__")$Value) head(rhat(fit)) head(neff_ratio(fit)) } } brms/man/VonMises.Rd0000644000175000017500000000216514111751667014171 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{VonMises} \alias{VonMises} \alias{dvon_mises} \alias{pvon_mises} \alias{rvon_mises} \title{The von Mises Distribution} \usage{ dvon_mises(x, mu, kappa, log = FALSE) pvon_mises(q, mu, kappa, lower.tail = TRUE, log.p = FALSE, acc = 1e-20) rvon_mises(n, mu, kappa) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of location values.} \item{kappa}{Vector of precision values.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{acc}{Accuracy of numerical approximations.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the von Mises distribution with location \code{mu}, and precision \code{kappa}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/is.brmsfit_multiple.Rd0000644000175000017500000000055713701270367016421 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \name{is.brmsfit_multiple} \alias{is.brmsfit_multiple} \title{Checks if argument is a \code{brmsfit_multiple} object} \usage{ is.brmsfit_multiple(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsfit_multiple} object } brms/man/add_ic.Rd0000644000175000017500000000225313701270367013624 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{add_loo} \alias{add_loo} \alias{add_waic} \alias{add_ic} \alias{add_ic.brmsfit} \alias{add_ic<-} \title{Add model fit criteria to model objects} \usage{ add_loo(x, model_name = NULL, ...) add_waic(x, model_name = NULL, ...) add_ic(x, ...) \method{add_ic}{brmsfit}(x, ic = "loo", model_name = NULL, ...) add_ic(x, ...) <- value } \arguments{ \item{x}{An \R object typically of class \code{brmsfit}.} \item{model_name}{Optional name of the model. If \code{NULL} (the default) the name is taken from the call to \code{x}.} \item{...}{Further arguments passed to the underlying functions computing the model fit criteria.} \item{ic, value}{Names of model fit criteria to compute. Currently supported are \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"R2"} (R-squared), and \code{"marglik"} (log marginal likelihood).} } \value{ An object of the same class as \code{x}, but with model fit criteria added for later usage. Previously computed criterion objects will be overwritten. } \description{ Deprecated aliases of \code{\link{add_criterion}}. } brms/man/do_call.Rd0000644000175000017500000000174514111751667014026 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{do_call} \alias{do_call} \title{Execute a Function Call} \usage{ do_call(what, args, pkg = NULL, envir = parent.frame()) } \arguments{ \item{what}{Either a function or a non-empty character string naming the function to be called.} \item{args}{A list of arguments to the function call. The names attribute of \code{args} gives the argument names.} \item{pkg}{Optional name of the package in which to search for the function if \code{what} is a character string.} \item{envir}{An environment within which to evaluate the call.} } \value{ The result of the (evaluated) function call. } \description{ Execute a function call similar to \code{\link{do.call}}, but without deparsing function arguments. For large number of arguments (i.e., more than a few thousand) this function currently is somewhat inefficient and should be used with care in this case. } \keyword{internal} brms/man/loo_subsample.brmsfit.Rd0000644000175000017500000000324114010776134016725 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_subsample.R \name{loo_subsample.brmsfit} \alias{loo_subsample.brmsfit} \alias{loo_subsample} \title{Efficient approximate leave-one-out cross-validation (LOO) using subsampling} \usage{ \method{loo_subsample}{brmsfit}(x, ..., compare = TRUE, resp = NULL, model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \description{ Efficient approximate leave-one-out cross-validation (LOO) using subsampling } \details{ More details can be found on \code{\link[loo:loo_subsample]{loo_subsample}}. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (loo1 <- loo_subsample(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (loo2 <- loo_subsample(fit2)) # compare both models loo_compare(loo1, loo2) } } brms/man/is.mvbrmsterms.Rd0000644000175000017500000000061113701270367015410 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{is.mvbrmsterms} \alias{is.mvbrmsterms} \title{Checks if argument is a \code{mvbrmsterms} object} \usage{ is.mvbrmsterms(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{mvbrmsterms} object } \seealso{ \code{\link[brms:brmsterms]{brmsterms}} } brms/man/control_params.Rd0000644000175000017500000000140613701270367015443 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostics.R \name{control_params} \alias{control_params} \alias{control_params.brmsfit} \title{Extract Control Parameters of the NUTS Sampler} \usage{ control_params(x, ...) \method{control_params}{brmsfit}(x, pars = NULL, ...) } \arguments{ \item{x}{An \R object} \item{...}{Currently ignored.} \item{pars}{Optional names of the control parameters to be returned. If \code{NULL} (the default) all control parameters are returned. See \code{\link[rstan:stan]{stan}} for more details.} } \value{ A named \code{list} with control parameter values. } \description{ Extract control parameters of the NUTS sampler such as \code{adapt_delta} or \code{max_treedepth}. } brms/man/print.brmsprior.Rd0000644000175000017500000000105414010776135015567 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{print.brmsprior} \alias{print.brmsprior} \title{Print method for \code{brmsprior} objects} \usage{ \method{print}{brmsprior}(x, show_df = NULL, ...) } \arguments{ \item{x}{An object of class \code{brmsprior}.} \item{show_df}{Logical; Print priors as a single \code{data.frame} (\code{TRUE}) or as a sequence of sampling statements (\code{FALSE})?} \item{...}{Currently ignored.} } \description{ Print method for \code{brmsprior} objects } brms/man/waic.brmsfit.Rd0000644000175000017500000000574713701270367015024 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{waic.brmsfit} \alias{waic.brmsfit} \alias{waic} \alias{WAIC} \alias{WAIC.brmsfit} \title{Widely Applicable Information Criterion (WAIC)} \usage{ \method{waic}{brmsfit}( x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once or separately for each observation. The latter approach is usually considerably slower but requires much less working memory. Accordingly, if one runs into memory issues, \code{pointwise = TRUE} is the way to go.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ If just one object is provided, an object of class \code{loo}. If multiple objects are provided, an object of class \code{loolist}. } \description{ Compute the widely applicable information criterion (WAIC) based on the posterior likelihood using the \pkg{loo} package. For more details see \code{\link[loo:waic]{waic}}. } \details{ See \code{\link{loo_compare}} for details on model comparisons. For \code{brmsfit} objects, \code{WAIC} is an alias of \code{waic}. Use method \code{\link[brms:add_criterion]{add_criterion}} to store information criteria in the fitted model object for later usage. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (waic1 <- waic(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (waic2 <- waic(fit2)) # compare both models loo_compare(waic1, waic2) } } \references{ Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. In Statistics and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. Gelman, A., Hwang, J., & Vehtari, A. (2014). Understanding predictive information criteria for Bayesian models. Statistics and Computing, 24, 997-1016. Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. The Journal of Machine Learning Research, 11, 3571-3594. } brms/man/posterior_epred.brmsfit.Rd0000644000175000017500000001012514111751667017273 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{posterior_epred.brmsfit} \alias{posterior_epred.brmsfit} \alias{pp_expect} \alias{posterior_epred} \title{Expected Values of the Posterior Predictive Distribution} \usage{ \method{posterior_epred}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted \emph{mean} response values. For categorical and ordinal models, the output is an S x N x C array. Otherwise, the output is an S x N matrix, where S is the number of posterior draws, N is the number of observations, and C is the number of categories. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ Compute posterior draws of the expected value/mean of the posterior predictive distribution. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. By definition, these predictions have smaller variance than the posterior predictions performed by the \code{\link{posterior_predict.brmsfit}} method. This is because only the uncertainty in the mean is incorporated in the draws computed by \code{posterior_epred} while any residual error is ignored. However, the estimated means of both methods averaged across draws should be very similar. } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## compute expected predictions ppe <- posterior_epred(fit) str(ppe) } } brms/man/bayes_R2.brmsfit.Rd0000644000175000017500000000450414111751667015540 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayes_R2.R \name{bayes_R2.brmsfit} \alias{bayes_R2.brmsfit} \alias{bayes_R2} \title{Compute a Bayesian version of R-squared for regression models} \usage{ \method{bayes_R2}{brmsfit}( object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}, which is used in the computation of the R-squared values.} } \value{ If \code{summary = TRUE}, an M x C matrix is returned (M = number of response variables and c = \code{length(probs) + 2}) containing summary statistics of the Bayesian R-squared values. If \code{summary = FALSE}, the posterior draws of the Bayesian R-squared values are returned in an S x M matrix (S is the number of draws). } \description{ Compute a Bayesian version of R-squared for regression models } \details{ For an introduction to the approach, see Gelman et al. (2018) and \url{https://github.com/jgabry/bayes_R2/}. } \examples{ \dontrun{ fit <- brm(mpg ~ wt + cyl, data = mtcars) summary(fit) bayes_R2(fit) # compute R2 with new data nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) bayes_R2(fit, newdata = nd) } } \references{ Andrew Gelman, Ben Goodrich, Jonah Gabry & Aki Vehtari. (2018). R-squared for Bayesian regression models, \emph{The American Statistician}. \code{10.1080/00031305.2018.1549100} (Preprint available at \url{https://stat.columbia.edu/~gelman/research/published/bayes_R2_v3.pdf}) } brms/man/SkewNormal.Rd0000644000175000017500000000352214111751667014506 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{SkewNormal} \alias{SkewNormal} \alias{dskew_normal} \alias{pskew_normal} \alias{qskew_normal} \alias{rskew_normal} \title{The Skew-Normal Distribution} \usage{ dskew_normal( x, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, log = FALSE ) pskew_normal( q, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE ) qskew_normal( p, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE, tol = 1e-08 ) rskew_normal(n, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of mean values.} \item{sigma}{Vector of standard deviation values.} \item{alpha}{Vector of skewness values.} \item{xi}{Optional vector of location values. If \code{NULL} (the default), will be computed internally.} \item{omega}{Optional vector of scale values. If \code{NULL} (the default), will be computed internally.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{tol}{Tolerance of the approximation used in the computation of quantiles.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the skew-normal distribution with mean \code{mu}, standard deviation \code{sigma}, and skewness \code{alpha}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/combine_models.Rd0000644000175000017500000000177014105230573015375 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brm_multiple.R \name{combine_models} \alias{combine_models} \title{Combine Models fitted with \pkg{brms}} \usage{ combine_models(..., mlist = NULL, check_data = TRUE) } \arguments{ \item{...}{One or more \code{brmsfit} objects.} \item{mlist}{Optional list of one or more \code{brmsfit} objects.} \item{check_data}{Logical; indicates if the data should be checked for being the same across models (defaults to \code{TRUE}). Setting it to \code{FALSE} may be useful for instance when combining models fitted on multiple imputed data sets.} } \value{ A \code{brmsfit} object. } \description{ Combine multiple \code{brmsfit} objects, which fitted the same model. This is usefully for instance when having manually run models in parallel. } \details{ This function just takes the first model and replaces its \code{stanfit} object (slot \code{fit}) by the combined \code{stanfit} objects of all models. } brms/man/cs.Rd0000644000175000017500000000166513701270367013034 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-cs.R \name{cs} \alias{cs} \alias{cse} \title{Category Specific Predictors in \pkg{brms} Models} \usage{ cs(expr) } \arguments{ \item{expr}{Expression containing predictors, for which category specific effects should be estimated. For evaluation, \R formula syntax is applied.} } \description{ Category Specific Predictors in \pkg{brms} Models } \details{ For detailed documentation see \code{help(brmsformula)} as well as \code{vignette("brms_overview")}. This function is almost solely useful when called in formulas passed to the \pkg{brms} package. } \examples{ \dontrun{ fit <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("cloglog"), prior = set_prior("normal(0,5)"), chains = 2) summary(fit) plot(fit, ask = FALSE) } } \seealso{ \code{\link{brmsformula}} } brms/man/is.mvbrmsformula.Rd0000644000175000017500000000053613661463272015735 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{is.mvbrmsformula} \alias{is.mvbrmsformula} \title{Checks if argument is a \code{mvbrmsformula} object} \usage{ is.mvbrmsformula(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{mvbrmsformula} object } brms/man/kidney.Rd0000644000175000017500000000364314105230573013702 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{kidney} \alias{kidney} \title{Infections in kidney patients} \format{ A data frame of 76 observations containing information on the following 7 variables. \describe{ \item{time}{The time to first or second recurrence of the infection, or the time of censoring} \item{recur}{A factor of levels \code{1} or \code{2} indicating if the infection recurred for the first or second time for this patient} \item{censored}{Either \code{0} or \code{1}, where \code{0} indicates no censoring of recurrence time and \code{1} indicates right censoring} \item{patient}{The patient number} \item{age}{The age of the patient} \item{sex}{The sex of the patient} \item{disease}{A factor of levels \code{other, GN, AN}, and \code{PKD} specifying the type of disease} } } \source{ McGilchrist, C. A., & Aisbett, C. W. (1991). Regression with frailty in survival analysis. \emph{Biometrics}, 47(2), 461-466. } \usage{ kidney } \description{ This dataset, originally discussed in McGilchrist and Aisbett (1991), describes the first and second (possibly right censored) recurrence time of infection in kidney patients using portable dialysis equipment. In addition, information on the risk variables age, sex and disease type is provided. } \examples{ \dontrun{ ## performing surivival analysis using the "weibull" family fit1 <- brm(time | cens(censored) ~ age + sex + disease, data = kidney, family = weibull, inits = "0") summary(fit1) plot(fit1) ## adding random intercepts over patients fit2 <- brm(time | cens(censored) ~ age + sex + disease + (1|patient), data = kidney, family = weibull(), inits = "0", prior = set_prior("cauchy(0,2)", class = "sd")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/brmsformula-helpers.Rd0000644000175000017500000001137213701270367016414 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{brmsformula-helpers} \alias{brmsformula-helpers} \alias{bf-helpers} \alias{nlf} \alias{lf} \alias{set_nl} \alias{set_rescor} \alias{acformula} \alias{set_mecor} \title{Linear and Non-linear formulas in \pkg{brms}} \usage{ nlf(formula, ..., flist = NULL, dpar = NULL, resp = NULL, loop = NULL) lf( ..., flist = NULL, dpar = NULL, resp = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL ) acformula(autocor, resp = NULL) set_nl(nl = TRUE, dpar = NULL, resp = NULL) set_rescor(rescor = TRUE) set_mecor(mecor = TRUE) } \arguments{ \item{formula}{Non-linear formula for a distributional parameter. The name of the distributional parameter can either be specified on the left-hand side of \code{formula} or via argument \code{dpar}.} \item{...}{Additional \code{formula} objects to specify predictors of non-linear and distributional parameters. Formulas can either be named directly or contain names on their left-hand side. Alternatively, it is possible to fix parameters to certain values by passing numbers or character strings in which case arguments have to be named to provide the parameter names. See 'Details' for more information.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{dpar}{Optional character string specifying the distributional parameter to which the formulas passed via \code{...} and \code{flist} belong.} \item{resp}{Optional character string specifying the response variable to which the formulas passed via \code{...} and \code{flist} belong. Only relevant in multivariate models.} \item{loop}{Logical; Only used in non-linear models. Indicates if the computation of the non-linear formula should be done inside (\code{TRUE}) or outside (\code{FALSE}) a loop over observations. Defaults to \code{TRUE}.} \item{center}{Logical; Indicates if the population-level design matrix should be centered, which usually increases sampling efficiency. See the 'Details' section for more information. Defaults to \code{TRUE} for distributional parameters and to \code{FALSE} for non-linear parameters.} \item{cmc}{Logical; Indicates whether automatic cell-mean coding should be enabled when removing the intercept by adding \code{0} to the right-hand of model formulas. Defaults to \code{TRUE} to mirror the behavior of standard \R formula parsing.} \item{sparse}{Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased.} \item{decomp}{Optional name of the decomposition used for the population-level design matrix. Defaults to \code{NULL} that is no decomposition. Other options currently available are \code{"QR"} for the QR decomposition that helps in fitting models with highly correlated predictors.} \item{autocor}{A one sided formula containing autocorrelation terms. All none autocorrelation terms in \code{autocor} will be silently ignored.} \item{nl}{Logical; Indicates whether \code{formula} should be treated as specifying a non-linear model. By default, \code{formula} is treated as an ordinary linear model formula.} \item{rescor}{Logical; Indicates if residual correlation between the response variables should be modeled. Currently this is only possible in multivariate \code{gaussian} and \code{student} models. Only relevant in multivariate models.} \item{mecor}{Logical; Indicates if correlations between latent variables defined by \code{\link{me}} terms should be modeled. Defaults to \code{TRUE}.} } \value{ For \code{lf} and \code{nlf} a \code{list} that can be passed to \code{\link[brms:brmsformula]{brmsformula}} or added to an existing \code{brmsformula} or \code{mvbrmsformula} object. For \code{set_nl} and \code{set_rescor} a logical value that can be added to an existing \code{brmsformula} or \code{mvbrmsformula} object. } \description{ Helper functions to specify linear and non-linear formulas for use with \code{\link[brms:brmsformula]{brmsformula}}. } \examples{ # add more formulas to the model bf(y ~ 1) + nlf(sigma ~ a * exp(b * x)) + lf(a ~ x, b ~ z + (1|g)) + gaussian() # specify 'nl' later on bf(y ~ a * inv_logit(x * b)) + lf(a + b ~ z) + set_nl(TRUE) # specify a multivariate model bf(y1 ~ x + (1|g)) + bf(y2 ~ z) + set_rescor(TRUE) # add autocorrelation terms bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(W)) } \seealso{ \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/fcor.Rd0000644000175000017500000000223413701270367013351 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{fcor} \alias{fcor} \title{Fixed residual correlation (FCOR) structures} \usage{ fcor(M) } \arguments{ \item{M}{Known correlation/covariance matrix of the response variable. If a vector is passed, it will be used as diagonal entries (variances) and correlations/covariances will be set to zero. The actual covariance matrix used in the likelihood is obtained by multiplying \code{M} by the square of the residual standard deviation parameter \code{sigma} estimated as part of the model.} } \value{ An object of class \code{'fcor_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a fixed residual correlation (FCOR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with FCOR terms. } \examples{ \dontrun{ dat <- data.frame(y = rnorm(3)) V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) fit <- brm(y ~ 1 + fcor(V), data = dat, data2 = list(V = V)) } } \seealso{ \code{\link{autocor-terms}} } brms/man/validate_newdata.Rd0000644000175000017500000000421014111751667015713 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-helpers.R \name{validate_newdata} \alias{validate_newdata} \title{Validate New Data} \usage{ validate_newdata( newdata, object, re_formula = NULL, allow_new_levels = FALSE, newdata2 = NULL, resp = NULL, check_response = TRUE, incl_autocor = TRUE, group_vars = NULL, req_vars = NULL, ... ) } \arguments{ \item{newdata}{A \code{data.frame} containing new data to be validated.} \item{object}{A \code{brmsfit} object.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{allow_new_levels}{A flag indicating if new levels of group-level effects are allowed (defaults to \code{FALSE}). Only relevant if \code{newdata} is provided.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check_response}{Logical; Indicates if response variables should be checked as well. Defaults to \code{TRUE}.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{group_vars}{Optional names of grouping variables to be validated. Defaults to all grouping variables in the model.} \item{req_vars}{Optional names of variables required in \code{newdata}. If \code{NULL} (the default), all variables in the original data are required (unless ignored for some other reason).} \item{...}{Currently ignored.} } \value{ A validated \code{'data.frame'} based on \code{newdata}. } \description{ Validate new data passed to post-processing methods of \pkg{brms}. Unless you are a package developer, you will rarely need to call \code{validate_newdata} directly. } brms/man/mvbrmsformula.Rd0000644000175000017500000000274513565500267015326 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{mvbrmsformula} \alias{mvbrmsformula} \alias{mvbf} \title{Set up a multivariate model formula for use in \pkg{brms}} \usage{ mvbrmsformula(..., flist = NULL, rescor = NULL) } \arguments{ \item{...}{Objects of class \code{formula} or \code{brmsformula}, each specifying a univariate model. See \code{\link{brmsformula}} for details on how to specify univariate models.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{rescor}{Logical; Indicates if residual correlation between the response variables should be modeled. Currently, this is only possible in multivariate \code{gaussian} and \code{student} models. If \code{NULL} (the default), \code{rescor} is internally set to \code{TRUE} when possible.} } \value{ An object of class \code{mvbrmsformula}, which is essentially a \code{list} containing all model formulas as well as some additional information for multivariate models. } \description{ Set up a multivariate model formula for use in the \pkg{brms} package allowing to define (potentially non-linear) additive multilevel models for all parameters of the assumed response distributions. } \details{ See \code{vignette("brms_multivariate")} for a case study. } \examples{ bf1 <- bf(y1 ~ x + (1|g)) bf2 <- bf(y2 ~ s(z)) mvbf(bf1, bf2) } \seealso{ \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/Hurdle.Rd0000644000175000017500000000327714105230573013645 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Hurdle} \alias{Hurdle} \alias{dhurdle_poisson} \alias{phurdle_poisson} \alias{dhurdle_negbinomial} \alias{phurdle_negbinomial} \alias{dhurdle_gamma} \alias{phurdle_gamma} \alias{dhurdle_lognormal} \alias{phurdle_lognormal} \title{Hurdle Distributions} \usage{ dhurdle_poisson(x, lambda, hu, log = FALSE) phurdle_poisson(q, lambda, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_negbinomial(x, mu, shape, hu, log = FALSE) phurdle_negbinomial(q, mu, shape, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_gamma(x, shape, scale, hu, log = FALSE) phurdle_gamma(q, shape, scale, hu, lower.tail = TRUE, log.p = FALSE) dhurdle_lognormal(x, mu, sigma, hu, log = FALSE) phurdle_lognormal(q, mu, sigma, hu, lower.tail = TRUE, log.p = FALSE) } \arguments{ \item{x}{Vector of quantiles.} \item{hu}{hurdle probability} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{q}{Vector of quantiles.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{mu, lambda}{location parameter} \item{shape}{shape parameter} \item{sigma, scale}{scale parameter} } \description{ Density and distribution functions for hurdle distributions. } \details{ The density of a hurdle distribution can be specified as follows. If \eqn{x = 0} set \eqn{f(x) = \theta}. Else set \eqn{f(x) = (1 - \theta) * g(x) / (1 - G(0))} where \eqn{g(x)} and \eqn{G(x)} are the density and distribution function of the non-hurdle part, respectively. } brms/man/stancode.brmsfit.Rd0000644000175000017500000000212214111751667015664 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_stancode.R \name{stancode.brmsfit} \alias{stancode.brmsfit} \alias{stancode} \title{Extract Stan model code} \usage{ \method{stancode}{brmsfit}(object, version = TRUE, regenerate = NULL, threads = NULL, ...) stancode(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{version}{Logical; indicates if the first line containing the \pkg{brms} version number should be included. Defaults to \code{TRUE}.} \item{regenerate}{Logical; indicates if the Stan code should be regenerated with the current \pkg{brms} version. By default, \code{regenerate} will be \code{FALSE} unless required to be \code{TRUE} by other arguments.} \item{threads}{Controls whether the Stan code should be threaded. See \code{\link{threading}} for details.} \item{...}{Further arguments passed to \code{\link{make_stancode}} if the Stan code is regenerated.} } \value{ Stan model code for further processing. } \description{ Extract Stan code that was used to specify the model. } brms/man/loss.Rd0000644000175000017500000000312714105230573013374 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{loss} \alias{loss} \title{Cumulative Insurance Loss Payments} \format{ A data frame of 55 observations containing information on the following 4 variables. \describe{ \item{AY}{Origin year of the insurance (1991 to 2000)} \item{dev}{Deviation from the origin year in months} \item{cum}{Cumulative loss payments} \item{premium}{Achieved premiums for the given origin year} } } \source{ Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. \emph{CAS Research Papers}. } \usage{ loss } \description{ This dataset, discussed in Gesmann & Morris (2020), contains cumulative insurance loss payments over the course of ten years. } \examples{ \dontrun{ # non-linear model to predict cumulative loss payments fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) # basic summaries summary(fit_loss) conditional_effects(fit_loss) # plot predictions per origin year conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) } } \keyword{datasets} brms/man/posterior_predict.brmsfit.Rd0000644000175000017500000001267414111751667017641 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{posterior_predict.brmsfit} \alias{posterior_predict.brmsfit} \alias{posterior_predict} \title{Draws from the Posterior Predictive Distribution} \usage{ \method{posterior_predict}{brmsfit}( object, newdata = NULL, re_formula = NULL, re.form = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{transform}{(Deprecated) A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{negative_rt}{Only relevant for Wiener diffusion models. A flag indicating whether response times of responses on the lower boundary should be returned as negative values. This allows to distinguish responses on the upper and lower boundary. Defaults to \code{FALSE}.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{ntrys}{Parameter used in rejection sampling for truncated discrete models only (defaults to \code{5}). See Details for more information.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted response values. In univariate models, the output is as an S x N matrix, where S is the number of posterior draws and N is the number of observations. In multivariate models, an additional dimension is added to the output which indexes along the different response variables. } \description{ Compute posterior draws of the posterior predictive distribution. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. By definition, these draws have higher variance than draws of the means of the posterior predictive distribution computed by \code{\link{posterior_epred.brmsfit}}. This is because the residual error is incorporated in \code{posterior_predict}. However, the estimated means of both methods averaged across draws should be very similar. } \details{ \code{NA} values within factors in \code{newdata}, are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding. In multilevel models, it is possible to allow new levels of grouping factors to be used in the predictions. This can be controlled via argument \code{allow_new_levels}. New levels can be sampled in multiple ways, which can be controlled via argument \code{sample_new_levels}. Both of these arguments are documented in \code{\link{prepare_predictions}} along with several other useful arguments to control specific aspects of the predictions. For truncated discrete models only: In the absence of any general algorithm to sample from truncated discrete distributions, rejection sampling is applied in this special case. This means that values are sampled until a value lies within the defined truncation boundaries. In practice, this procedure may be rather slow (especially in \R). Thus, we try to do approximate rejection sampling by sampling each value \code{ntrys} times and then select a valid value. If all values are invalid, the closest boundary is used, instead. If there are more than a few of these pathological cases, a warning will occur suggesting to increase argument \code{ntrys}. } \examples{ \dontrun{ ## fit a model fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), data = kidney, family = "exponential", inits = "0") ## predicted responses pp <- posterior_predict(fit) str(pp) ## predicted responses excluding the group-level effect of age pp <- posterior_predict(fit, re_formula = ~ (1 | patient)) str(pp) ## predicted responses of patient 1 for new data newdata <- data.frame( sex = factor(c("male", "female")), age = c(20, 50), patient = c(1, 1) ) pp <- posterior_predict(fit, newdata = newdata) str(pp) } } brms/man/ranef.brmsfit.Rd0000644000175000017500000000362613701270367015166 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{ranef.brmsfit} \alias{ranef.brmsfit} \alias{ranef} \title{Extract Group-Level Estimates} \usage{ \method{ranef}{brmsfit}( object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, groups = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{groups}{Optional names of grouping variables for which to extract effects.} \item{...}{Currently ignored.} } \value{ A list of 3D arrays (one per grouping factor). If \code{summary} is \code{TRUE}, the 1st dimension contains the factor levels, the 2nd dimension contains the summary statistics (see \code{\link{posterior_summary}}), and the 3rd dimension contains the group-level effects. If \code{summary} is \code{FALSE}, the 1st dimension contains the posterior draws, the 2nd dimension contains the factor levels, and the 3rd dimension contains the group-level effects. } \description{ Extract the group-level ('random') effects of each level from a \code{brmsfit} object. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) ranef(fit) } } brms/man/Shifted_Lognormal.Rd0000644000175000017500000000272614111751667016031 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{Shifted_Lognormal} \alias{Shifted_Lognormal} \alias{dshifted_lnorm} \alias{pshifted_lnorm} \alias{qshifted_lnorm} \alias{rshifted_lnorm} \title{The Shifted Log Normal Distribution} \usage{ dshifted_lnorm(x, meanlog = 0, sdlog = 1, shift = 0, log = FALSE) pshifted_lnorm( q, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE ) qshifted_lnorm( p, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE ) rshifted_lnorm(n, meanlog = 0, sdlog = 1, shift = 0) } \arguments{ \item{x, q}{Vector of quantiles.} \item{meanlog}{Vector of means.} \item{sdlog}{Vector of standard deviations.} \item{shift}{Vector of shifts.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{p}{Vector of probabilities.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, quantile function and random generation for the shifted log normal distribution with mean \code{meanlog}, standard deviation \code{sdlog}, and shift parameter \code{shift}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/mmc.Rd0000644000175000017500000000210114105230573013157 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{mmc} \alias{mmc} \title{Multi-Membership Covariates} \usage{ mmc(...) } \arguments{ \item{...}{One or more terms containing covariates corresponding to the grouping levels specified in \code{\link{mm}}.} } \value{ A matrix with covariates as columns. } \description{ Specify covariates that vary over different levels of multi-membership grouping factors thus requiring special treatment. This function is almost solely useful, when called in combination with \code{\link{mm}}. Outside of multi-membership terms it will behave very much like \code{\link{cbind}}. } \examples{ \dontrun{ # simulate some data dat <- data.frame( y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) ) # multi-membership model with level specific covariate values dat$xc <- (dat$x1 + dat$x2) / 2 fit <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) summary(fit) } } \seealso{ \code{\link{mm}} } brms/man/autocor-terms.Rd0000644000175000017500000000257513701270367015234 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{autocor-terms} \alias{autocor-terms} \title{Autocorrelation structures} \description{ Specify autocorrelation terms in \pkg{brms} models. Currently supported terms are \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, \code{\link{cosy}}, \code{\link{sar}}, \code{\link{car}}, and \code{\link{fcor}}. Terms can be directly specified within the formula, or passed to the \code{autocor} argument of \code{\link{brmsformula}} in the form of a one-sided formula. For deprecated ways of specifying autocorrelation terms, see \code{\link{cor_brms}}. } \details{ The autocor term functions are almost solely useful when called in formulas passed to the \pkg{brms} package. They do not evaluate its arguments -- but exist purely to help set up a model with autocorrelation terms. } \examples{ # specify autocor terms within the formula y ~ x + arma(p = 1, q = 1) + car(M) # specify autocor terms in the 'autocor' argument bf(y ~ x, autocor = ~ arma(p = 1, q = 1) + car(M)) # specify autocor terms via 'acformula' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(M)) } \seealso{ \code{\link{brmsformula}}, \code{\link{acformula}}, \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, \code{\link{cosy}}, \code{\link{sar}}, \code{\link{car}}, \code{\link{fcor}} } brms/man/loo_moment_match.brmsfit.Rd0000644000175000017500000000532314111751667017416 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_moment_match.R \name{loo_moment_match.brmsfit} \alias{loo_moment_match.brmsfit} \alias{loo_moment_match} \title{Moment matching for efficient approximate leave-one-out cross-validation} \usage{ \method{loo_moment_match}{brmsfit}( x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{loo}{An object of class \code{loo} originally created from \code{x}.} \item{k_threshold}{The threshold at which Pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{check}{Logical; If \code{TRUE} (the default), some checks check are performed if the \code{loo} object was generated from the \code{brmsfit} object passed to argument \code{fit}.} \item{...}{Further arguments passed to the underlying methods. Additional arguments initially passed to \code{\link{loo}}, for example, \code{newdata} or \code{resp} need to be passed again to \code{loo_moment_match} in order for the latter to work correctly.} } \value{ An updated object of class \code{loo}. } \description{ Moment matching for efficient approximate leave-one-out cross-validation (LOO-CV). See \code{\link[loo:loo_moment_match]{loo_moment_match}} for more details. } \details{ The moment matching algorithm requires draws of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{loo_moment_match} cannot be computed. Thus, please set \code{save_pars = save_pars(all = TRUE)} in the call to \code{\link{brm}}, if you are planning to apply \code{loo_moment_match} to your models. } \examples{ \dontrun{ fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), save_pars = save_pars(all = TRUE)) # throws warning about some pareto k estimates being too high (loo1 <- loo(fit1)) (mmloo1 <- loo_moment_match(fit1, loo = loo1)) } } \references{ Paananen, T., Piironen, J., Buerkner, P.-C., Vehtari, A. (2021). Implicitly Adaptive Importance Sampling. Statistics and Computing. } brms/man/loo.brmsfit.Rd0000644000175000017500000001030414111751667014656 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo.brmsfit} \alias{loo.brmsfit} \alias{loo} \alias{LOO} \alias{LOO.brmsfit} \title{Efficient approximate leave-one-out cross-validation (LOO)} \usage{ \method{loo}{brmsfit}( x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL ) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{compare}{A flag indicating if the information criteria of the models should be compared to each other via \code{\link{loo_compare}}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{pointwise}{A flag indicating whether to compute the full log-likelihood matrix at once or separately for each observation. The latter approach is usually considerably slower but requires much less working memory. Accordingly, if one runs into memory issues, \code{pointwise = TRUE} is the way to go.} \item{moment_match}{Logical; Indicate whether \code{\link{loo_moment_match}} should be applied on problematic observations. Defaults to \code{FALSE}. For most models, moment matching will only work if you have set \code{save_pars = save_pars(all = TRUE)} when fitting the model with \code{\link{brm}}. See \code{\link{loo_moment_match.brmsfit}} for more details.} \item{reloo}{Logical; Indicate whether \code{\link{reloo}} should be applied on problematic observations. Defaults to \code{FALSE}.} \item{k_threshold}{The threshold at which pareto \eqn{k} estimates are treated as problematic. Defaults to \code{0.7}. Only used if argument \code{reloo} is \code{TRUE}. See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details.} \item{save_psis}{Should the \code{"psis"} object created internally be saved in the returned object? For more details see \code{\link[loo:loo]{loo}}.} \item{moment_match_args}{Optional \code{list} of additional arguments passed to \code{\link{loo_moment_match}}.} \item{reloo_args}{Optional \code{list} of additional arguments passed to \code{\link{reloo}}.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ If just one object is provided, an object of class \code{loo}. If multiple objects are provided, an object of class \code{loolist}. } \description{ Perform approximate leave-one-out cross-validation based on the posterior likelihood using the \pkg{loo} package. For more details see \code{\link[loo:loo]{loo}}. } \details{ See \code{\link{loo_compare}} for details on model comparisons. For \code{brmsfit} objects, \code{LOO} is an alias of \code{loo}. Use method \code{\link{add_criterion}} to store information criteria in the fitted model object for later usage. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) (loo1 <- loo(fit1)) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) (loo2 <- loo(fit2)) # compare both models loo_compare(loo1, loo2) } } \references{ Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. In Statistics and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. Gelman, A., Hwang, J., & Vehtari, A. (2014). Understanding predictive information criteria for Bayesian models. Statistics and Computing, 24, 997-1016. Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. The Journal of Machine Learning Research, 11, 3571-3594. } brms/man/posterior_interval.brmsfit.Rd0000644000175000017500000000243214111751667020022 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_interval.brmsfit} \alias{posterior_interval.brmsfit} \alias{posterior_interval} \title{Compute posterior uncertainty intervals} \usage{ \method{posterior_interval}{brmsfit}(object, pars = NA, variable = NULL, prob = 0.95, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{pars}{Deprecated alias of \code{variable}. For reasons of backwards compatibility, \code{pars} is interpreted as a vector of regular expressions by default unless \code{fixed = TRUE} is specified.} \item{variable}{A character vector providing the variables to extract. By default, all variables are extracted.} \item{prob}{A value between 0 and 1 indicating the desired probability to be covered by the uncertainty intervals. The default is 0.95.} \item{...}{More arguments passed to \code{\link{as.matrix.brmsfit}}.} } \value{ A \code{matrix} with lower and upper interval bounds as columns and as many rows as selected variables. } \description{ Compute posterior uncertainty intervals for \code{brmsfit} objects. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = negbinomial()) posterior_interval(fit) } } brms/man/posterior_table.Rd0000644000175000017500000000161714111751667015624 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{posterior_table} \alias{posterior_table} \title{Table Creation for Posterior Draws} \usage{ posterior_table(x, levels = NULL) } \arguments{ \item{x}{A matrix of posterior draws where rows indicate draws and columns indicate parameters.} \item{levels}{Optional values of possible posterior values. Defaults to all unique values in \code{x}.} } \value{ A matrix where rows indicate parameters and columns indicate the unique values of posterior draws. } \description{ Create a table for unique values of posterior draws. This is usually only useful when summarizing predictions of ordinal models. } \examples{ \dontrun{ fit <- brm(rating ~ period + carry + treat, data = inhaler, family = cumulative()) pr <- predict(fit, summary = FALSE) posterior_table(pr) } } brms/man/predict.brmsfit.Rd0000644000175000017500000001152314111751667015523 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_predict.R \name{predict.brmsfit} \alias{predict.brmsfit} \title{Draws from the Posterior Predictive Distribution} \usage{ \method{predict}{brmsfit}( object, newdata = NULL, re_formula = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{transform}{(Deprecated) A function or a character string naming a function to be applied on the predicted responses before summary statistics are computed.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{negative_rt}{Only relevant for Wiener diffusion models. A flag indicating whether response times of responses on the lower boundary should be returned as negative values. This allows to distinguish responses on the upper and lower boundary. Defaults to \code{FALSE}.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{ntrys}{Parameter used in rejection sampling for truncated discrete models only (defaults to \code{5}). See Details for more information.} \item{cores}{Number of cores (defaults to \code{1}). On non-Windows systems, this argument can be set globally via the \code{mc.cores} option.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ An \code{array} of predicted response values. If \code{summary = FALSE} the output resembles those of \code{\link{posterior_predict.brmsfit}}. If \code{summary = TRUE} the output depends on the family: For categorical and ordinal families, the output is an N x C matrix, where N is the number of observations, C is the number of categories, and the values are predicted category probabilities. For all other families, the output is a N x E matrix where E = \code{2 + length(probs)} is the number of summary statistics: The \code{Estimate} column contains point estimates (either mean or median depending on argument \code{robust}), while the \code{Est.Error} column contains uncertainty estimates (either standard deviation or median absolute deviation depending on argument \code{robust}). The remaining columns starting with \code{Q} contain quantile estimates as specified via argument \code{probs}. } \description{ This method is an alias of \code{\link{posterior_predict.brmsfit}} with additional arguments for obtaining summaries of the computed draws. } \examples{ \dontrun{ ## fit a model fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), data = kidney, family = "exponential", inits = "0") ## predicted responses pp <- predict(fit) head(pp) ## predicted responses excluding the group-level effect of age pp <- predict(fit, re_formula = ~ (1 | patient)) head(pp) ## predicted responses of patient 1 for new data newdata <- data.frame( sex = factor(c("male", "female")), age = c(20, 50), patient = c(1, 1) ) predict(fit, newdata = newdata) } } \seealso{ \code{\link{posterior_predict.brmsfit}} } brms/man/custom_family.Rd0000644000175000017500000001457614111751667015312 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/families.R \name{custom_family} \alias{custom_family} \alias{customfamily} \title{Custom Families in \pkg{brms} Models} \usage{ custom_family( name, dpars = "mu", links = "identity", type = c("real", "int"), lb = NA, ub = NA, vars = NULL, loop = TRUE, specials = NULL, threshold = "flexible", log_lik = NULL, posterior_predict = NULL, posterior_epred = NULL, predict = NULL, fitted = NULL, env = parent.frame() ) } \arguments{ \item{name}{Name of the custom family.} \item{dpars}{Names of the distributional parameters of the family. One parameter must be named \code{"mu"} and the main formula of the model will correspond to that parameter.} \item{links}{Names of the link functions of the distributional parameters.} \item{type}{Indicates if the response distribution is continuous (\code{"real"}) or discrete (\code{"int"}). This controls if the corresponding density function will be named with \code{_lpdf} or \code{_lpmf}.} \item{lb}{Vector of lower bounds of the distributional parameters. Defaults to \code{NA} that is no lower bound.} \item{ub}{Vector of upper bounds of the distributional parameters. Defaults to \code{NA} that is no upper bound.} \item{vars}{Names of variables that are part of the likelihood function without being distributional parameters. That is, \code{vars} can be used to pass data to the likelihood. Such arguments will be added to the list of function arguments at the end, after the distributional parameters. See \code{\link{stanvar}} for details about adding self-defined data to the generated \pkg{Stan} model. Addition arguments \code{vreal} and \code{vint} may be used for this purpose as well (see Examples below). See also \code{\link{brmsformula}} and \code{\link{addition-terms}} for more details.} \item{loop}{Logical; Should the likelihood be evaluated via a loop (\code{TRUE}; the default) over observations in Stan? If \code{FALSE}, the Stan code will be written in a vectorized manner over observations if possible.} \item{specials}{A character vector of special options to enable for this custom family. Currently for internal use only.} \item{threshold}{Optional threshold type for custom ordinal families. Ignored for non-ordinal families.} \item{log_lik}{Optional function to compute log-likelihood values of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:log_lik.brmsfit]{log_lik}}.} \item{posterior_predict}{Optional function to compute posterior prediction of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}.} \item{posterior_epred}{Optional function to compute expected values of the posterior predictive distribution of the model in \R. This is only relevant if one wants to ensure compatibility with method \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}.} \item{predict}{Deprecated alias of `posterior_predict`.} \item{fitted}{Deprecated alias of `posterior_epred`.} \item{env}{An \code{\link{environment}} in which certain post-processing functions related to the custom family can be found, if there were not directly passed to \code{custom_family}. This is only relevant if one wants to ensure compatibility with the methods \code{\link[brms:log_lik.brmsfit]{log_lik}}, \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}, or \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. By default, \code{env} is the environment from which \code{custom_family} is called.} } \value{ An object of class \code{customfamily} inheriting from class \code{\link{brmsfamily}}. } \description{ Define custom families (i.e. response distribution) for use in \pkg{brms} models. It allows users to benefit from the modeling flexibility of \pkg{brms}, while applying their self-defined likelihood functions. All of the post-processing methods for \code{brmsfit} objects can be made compatible with custom families. See \code{vignette("brms_customfamilies")} for more details. For a list of built-in families see \code{\link{brmsfamily}}. } \details{ The corresponding probability density or mass \code{Stan} functions need to have the same name as the custom family. That is if a family is called \code{myfamily}, then the \pkg{Stan} functions should be called \code{myfamily_lpdf} or \code{myfamily_lpmf} depending on whether it defines a continuous or discrete distribution. } \examples{ \dontrun{ ## demonstrate how to fit a beta-binomial model ## generate some fake data phi <- 0.7 n <- 300 z <- rnorm(n, sd = 0.2) ntrials <- sample(1:10, n, replace = TRUE) eta <- 1 + z mu <- exp(eta) / (1 + exp(eta)) a <- mu * phi b <- (1 - mu) * phi p <- rbeta(n, a, b) y <- rbinom(n, ntrials, p) dat <- data.frame(y, z, ntrials) # define a custom family beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1[n]" ) # define the corresponding Stan density function stan_density <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_density, block = "functions") # fit the model fit <- brm(y | vint(ntrials) ~ z, data = dat, family = beta_binomial2, stanvars = stanvars) summary(fit) # define a *vectorized* custom family (no loop over observations) # notice also that 'vint' no longer has an observation index beta_binomial2_vec <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1", loop = FALSE ) # define the corresponding Stan density function stan_density_vec <- " real beta_binomial2_lpmf(int[] y, vector mu, real phi, int[] N) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars_vec <- stanvar(scode = stan_density_vec, block = "functions") # fit the model fit_vec <- brm(y | vint(ntrials) ~ z, data = dat, family = beta_binomial2_vec, stanvars = stanvars_vec) summary(fit_vec) } } \seealso{ \code{\link{brmsfamily}}, \code{\link{brmsformula}}, \code{\link{stanvar}} } brms/man/parnames.Rd0000644000175000017500000000073013701270367014225 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{parnames} \alias{parnames} \alias{parnames.brmsfit} \title{Extract Parameter Names} \usage{ parnames(x, ...) } \arguments{ \item{x}{An \R object} \item{...}{Further arguments passed to or from other methods.} } \value{ A character vector containing the parameter names of the model. } \description{ Extract all parameter names of a given model. } brms/man/car.Rd0000644000175000017500000000470614105230573013165 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{car} \alias{car} \title{Spatial conditional autoregressive (CAR) structures} \usage{ car(M, gr = NA, type = "escar") } \arguments{ \item{M}{Adjacency matrix of locations. All non-zero entries are treated as if the two locations are adjacent. If \code{gr} is specified, the row names of \code{M} have to match the levels of the grouping factor.} \item{gr}{An optional grouping factor mapping observations to spatial locations. If not specified, each observation is treated as a separate location. It is recommended to always specify a grouping factor to allow for handling of new data in post-processing methods.} \item{type}{Type of the CAR structure. Currently implemented are \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is provided in the 'Details' section.} } \value{ An object of class \code{'car_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an spatial conditional autoregressive (CAR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with CAR terms. } \details{ The \code{escar} and \code{esicar} types are implemented based on the case study of Max Joseph (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and \code{bym2} type is implemented based on the case study of Mitzi Morris (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). } \examples{ \dontrun{ # generate some spatial data east <- north <- 1:10 Grid <- expand.grid(east, north) K <- nrow(Grid) # set up distance and neighbourhood matrices distance <- as.matrix(dist(Grid)) W <- array(0, c(K, K)) W[distance == 1] <- 1 # generate the covariates and response data x1 <- rnorm(K) x2 <- rnorm(K) theta <- rnorm(K, sd = 0.05) phi <- rmulti_normal( 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) ) eta <- x1 + x2 + phi prob <- exp(eta) / (1 + exp(eta)) size <- rep(50, K) y <- rbinom(n = K, size = size, prob = prob) dat <- data.frame(y, size, x1, x2) # fit a CAR model fit <- brm(y | trials(size) ~ x1 + x2 + car(W), data = dat, data2 = list(W = W), family = binomial()) summary(fit) } } \seealso{ \code{\link{autocor-terms}} } brms/man/pp_mixture.brmsfit.Rd0000644000175000017500000000771114111751667016271 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pp_mixture.R \name{pp_mixture.brmsfit} \alias{pp_mixture.brmsfit} \alias{pp_mixture} \title{Posterior Probabilities of Mixture Component Memberships} \usage{ \method{pp_mixture}{brmsfit}( x, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, log = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) pp_mixture(x, ...) } \arguments{ \item{x}{An \R object usually of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{log}{Logical; Indicates whether to return probabilities on the log-scale.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \value{ If \code{summary = TRUE}, an N x E x K array, where N is the number of observations, K is the number of mixture components, and E is equal to \code{length(probs) + 2}. If \code{summary = FALSE}, an S x N x K array, where S is the number of posterior draws. } \description{ Compute the posterior probabilities of mixture component memberships for each observation including uncertainty estimates. } \details{ The returned probabilities can be written as \eqn{P(Kn = k | Yn)}, that is the posterior probability that observation n originates from component k. They are computed using Bayes' Theorem \deqn{P(Kn = k | Yn) = P(Yn | Kn = k) P(Kn = k) / P(Yn),} where \eqn{P(Yn | Kn = k)} is the (posterior) likelihood of observation n for component k, \eqn{P(Kn = k)} is the (posterior) mixing probability of component k (i.e. parameter \code{theta}), and \deqn{P(Yn) = \sum (k=1,...,K) P(Yn | Kn = k) P(Kn = k)} is a normalizing constant. } \examples{ \dontrun{ ## simulate some data set.seed(1234) dat <- data.frame( y = c(rnorm(100), rnorm(50, 2)), x = rnorm(150) ) ## fit a simple normal mixture model mix <- mixture(gaussian, nmix = 2) prior <- c( prior(normal(0, 5), Intercept, nlpar = mu1), prior(normal(0, 5), Intercept, nlpar = mu2), prior(dirichlet(2, 2), theta) ) fit1 <- brm(bf(y ~ x), dat, family = mix, prior = prior, chains = 2, inits = 0) summary(fit1) ## compute the membership probabilities ppm <- pp_mixture(fit1) str(ppm) ## extract point estimates for each observation head(ppm[, 1, ]) ## classify every observation according to ## the most likely component apply(ppm[, 1, ], 1, which.max) } } brms/man/is.cor_brms.Rd0000644000175000017500000000103613701270367014637 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{is.cor_brms} \alias{is.cor_brms} \alias{is.cor_arma} \alias{is.cor_cosy} \alias{is.cor_sar} \alias{is.cor_car} \alias{is.cor_fixed} \title{Check if argument is a correlation structure} \usage{ is.cor_brms(x) is.cor_arma(x) is.cor_cosy(x) is.cor_sar(x) is.cor_car(x) is.cor_fixed(x) } \arguments{ \item{x}{An \R object.} } \description{ Check if argument is one of the correlation structures used in \pkg{brms}. } brms/man/mvbind.Rd0000644000175000017500000000102713625767110013700 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{mvbind} \alias{mvbind} \title{Bind response variables in multivariate models} \usage{ mvbind(...) } \arguments{ \item{...}{Same as in \code{\link{cbind}}} } \description{ Can be used to specify a multivariate \pkg{brms} model within a single formula. Outside of \code{\link{brmsformula}}, it just behaves like \code{\link{cbind}}. } \examples{ bf(mvbind(y1, y2) ~ x) } \seealso{ \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} } brms/man/cosy.Rd0000644000175000017500000000201713701270367013374 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{cosy} \alias{cosy} \title{Set up COSY correlation structures} \usage{ cosy(time = NA, gr = NA) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} } \value{ An object of class \code{'cosy_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a compounds symmetry (COSY) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with COSY terms. } \examples{ \dontrun{ data("lh") lh <- as.data.frame(lh) fit <- brm(x ~ cosy(), data = lh) summary(fit) } } \seealso{ \code{\link{autocor-terms}} } brms/man/hypothesis.brmsfit.Rd0000644000175000017500000001551114111751667016271 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{hypothesis.brmsfit} \alias{hypothesis.brmsfit} \alias{hypothesis} \alias{hypothesis.default} \title{Non-Linear Hypothesis Testing} \usage{ \method{hypothesis}{brmsfit}( x, hypothesis, class = "b", group = "", scope = c("standard", "ranef", "coef"), alpha = 0.05, robust = FALSE, seed = NULL, ... ) hypothesis(x, ...) \method{hypothesis}{default}(x, hypothesis, alpha = 0.05, robust = FALSE, ...) } \arguments{ \item{x}{An \code{R} object. If it is no \code{brmsfit} object, it must be coercible to a \code{data.frame}. In the latter case, the variables used in the \code{hypothesis} argument need to correspond to column names of \code{x}, while the rows are treated as representing posterior draws of the variables.} \item{hypothesis}{A character vector specifying one or more non-linear hypothesis concerning parameters of the model.} \item{class}{A string specifying the class of parameters being tested. Default is "b" for population-level effects. Other typical options are "sd" or "cor". If \code{class = NULL}, all parameters can be tested against each other, but have to be specified with their full name (see also \code{\link[brms:draws-index-brms]{variables}})} \item{group}{Name of a grouping factor to evaluate only group-level effects parameters related to this grouping factor.} \item{scope}{Indicates where to look for the variables specified in \code{hypothesis}. If \code{"standard"}, use the full parameter names (subject to the restriction given by \code{class} and \code{group}). If \code{"coef"} or \code{"ranef"}, compute the hypothesis for all levels of the grouping factor given in \code{"group"}, based on the output of \code{\link{coef.brmsfit}} and \code{\link{ranef.brmsfit}}, respectively.} \item{alpha}{The alpha-level of the tests (default is 0.05; see 'Details' for more information).} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} \item{...}{Currently ignored.} } \value{ A \code{\link{brmshypothesis}} object. } \description{ Perform non-linear hypothesis testing for all model parameters. } \details{ Among others, \code{hypothesis} computes an evidence ratio (\code{Evid.Ratio}) for each hypothesis. For a one-sided hypothesis, this is just the posterior probability (\code{Post.Prob}) under the hypothesis against its alternative. That is, when the hypothesis is of the form \code{a > b}, the evidence ratio is the ratio of the posterior probability of \code{a > b} and the posterior probability of \code{a < b}. In this example, values greater than one indicate that the evidence in favor of \code{a > b} is larger than evidence in favor of \code{a < b}. For an two-sided (point) hypothesis, the evidence ratio is a Bayes factor between the hypothesis and its alternative computed via the Savage-Dickey density ratio method. That is the posterior density at the point of interest divided by the prior density at that point. Values greater than one indicate that evidence in favor of the point hypothesis has increased after seeing the data. In order to calculate this Bayes factor, all parameters related to the hypothesis must have proper priors and argument \code{sample_prior} of function \code{brm} must be set to \code{"yes"}. Otherwise \code{Evid.Ratio} (and \code{Post.Prob}) will be \code{NA}. Please note that, for technical reasons, we cannot sample from priors of certain parameters classes. Most notably, these include overall intercept parameters (prior class \code{"Intercept"}) as well as group-level coefficients. When interpreting Bayes factors, make sure that your priors are reasonable and carefully chosen, as the result will depend heavily on the priors. In particular, avoid using default priors. The \code{Evid.Ratio} may sometimes be \code{0} or \code{Inf} implying very small or large evidence, respectively, in favor of the tested hypothesis. For one-sided hypotheses pairs, this basically means that all posterior draws are on the same side of the value dividing the two hypotheses. In that sense, instead of \code{0} or \code{Inf,} you may rather read it as \code{Evid.Ratio} smaller \code{1 / S} or greater \code{S}, respectively, where \code{S} denotes the number of posterior draws used in the computations. The argument \code{alpha} specifies the size of the credible interval (i.e., Bayesian confidence interval). For instance, if we tested a two-sided hypothesis and set \code{alpha = 0.05} (5\%) an, the credible interval will contain \code{1 - alpha = 0.95} (95\%) of the posterior values. Hence, \code{alpha * 100}\% of the posterior values will lie outside of the credible interval. Although this allows testing of hypotheses in a similar manner as in the frequentist null-hypothesis testing framework, we strongly argue against using arbitrary cutoffs (e.g., \code{p < .05}) to determine the 'existence' of an effect. } \examples{ \dontrun{ ## define priors prior <- c(set_prior("normal(0,2)", class = "b"), set_prior("student_t(10,0,1)", class = "sigma"), set_prior("student_t(10,0,1)", class = "sd")) ## fit a linear mixed effects models fit <- brm(time ~ age + sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = prior, sample_prior = "yes", control = list(adapt_delta = 0.95)) ## perform two-sided hypothesis testing (hyp1 <- hypothesis(fit, "sexfemale = age + diseasePKD")) plot(hyp1) hypothesis(fit, "exp(age) - 3 = 0", alpha = 0.01) ## perform one-sided hypothesis testing hypothesis(fit, "diseasePKD + diseaseGN - 3 < 0") hypothesis(fit, "age < Intercept", class = "sd", group = "patient") ## test the amount of random intercept variance on all variance h <- paste("sd_patient__Intercept^2 / (sd_patient__Intercept^2 +", "sd_patient__age^2 + sigma^2) = 0") (hyp2 <- hypothesis(fit, h, class = NULL)) plot(hyp2) ## test more than one hypothesis at once h <- c("diseaseGN = diseaseAN", "2 * diseaseGN - diseasePKD = 0") (hyp3 <- hypothesis(fit, h)) plot(hyp3, ignore_prior = TRUE) ## compute hypotheses for all levels of a grouping factor hypothesis(fit, "age = 0", scope = "coef", group = "patient") ## use the default method dat <- as.data.frame(fit) str(dat) hypothesis(dat, "b_age > 0") } } \seealso{ \code{\link{brmshypothesis}} } \author{ Paul-Christian Buerkner \email{paul.buerkner@gmail.com} } brms/man/family.brmsfit.Rd0000644000175000017500000000116113701270367015344 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{family.brmsfit} \alias{family.brmsfit} \title{Extract Model Family Objects} \usage{ \method{family}{brmsfit}(object, resp = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{...}{Currently unused.} } \value{ A \code{brmsfamily} object or a list of such objects for multivariate models. } \description{ Extract Model Family Objects } brms/man/prior_summary.brmsfit.Rd0000644000175000017500000000217614111751667017005 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{prior_summary.brmsfit} \alias{prior_summary.brmsfit} \alias{prior_summary} \title{Extract Priors of a Bayesian Model Fitted with \pkg{brms}} \usage{ \method{prior_summary}{brmsfit}(object, all = TRUE, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{all}{Logical; Show all parameters in the model which may have priors (\code{TRUE}) or only those with proper priors (\code{FALSE})?} \item{...}{Further arguments passed to or from other methods.} } \value{ For \code{brmsfit} objects, an object of class \code{brmsprior}. } \description{ Extract Priors of a Bayesian Model Fitted with \pkg{brms} } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = c(prior(student_t(5,0,10), class = b), prior(cauchy(0,2), class = sd))) prior_summary(fit) prior_summary(fit, all = FALSE) print(prior_summary(fit, all = FALSE), show_df = FALSE) } } brms/man/posterior_smooths.brmsfit.Rd0000644000175000017500000000411614111751667017673 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_smooths.R \name{posterior_smooths.brmsfit} \alias{posterior_smooths.brmsfit} \alias{posterior_smooths} \title{Posterior Predictions of Smooth Terms} \usage{ \method{posterior_smooths}{brmsfit}( object, smooth, newdata = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, ... ) posterior_smooths(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{smooth}{Name of a single smooth term for which predictions should be computed.} \item{newdata}{An optional \code{data.frame} for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. Only those variables appearing in the chosen \code{smooth} term are required.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Optional name of a predicted distributional parameter. If specified, expected predictions of this parameters are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{...}{Currently ignored.} } \value{ An S x N matrix, where S is the number of posterior draws and N is the number of observations. } \description{ Compute posterior predictions of smooth \code{s} and \code{t2} terms of models fitted with \pkg{brms}. } \examples{ \dontrun{ set.seed(0) dat <- mgcv::gamSim(1, n = 200, scale = 2) fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) summary(fit) newdata <- data.frame(x2 = seq(0, 1, 10)) str(posterior_smooths(fit, smooth = "s(x2)", newdata = newdata)) } } brms/man/pp_check.brmsfit.Rd0000644000175000017500000000602514111751667015646 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pp_check.R \name{pp_check.brmsfit} \alias{pp_check.brmsfit} \alias{pp_check} \title{Posterior Predictive Checks for \code{brmsfit} Objects} \usage{ \method{pp_check}{brmsfit}( object, type, ndraws = NULL, nsamples = NULL, group = NULL, x = NULL, newdata = NULL, resp = NULL, draw_ids = NULL, subset = NULL, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{type}{Type of the ppc plot as given by a character string. See \code{\link[bayesplot:PPC-overview]{PPC}} for an overview of currently supported types. You may also use an invalid type (e.g. \code{type = "xyz"}) to get a list of supported types in the resulting error message.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} all draws are used. If not specified, the number of posterior draws is chosen automatically. Ignored if \code{draw_ids} is not \code{NULL}.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{group}{Optional name of a factor variable in the model by which to stratify the ppc plot. This argument is required for ppc \code{*_grouped} types and ignored otherwise.} \item{x}{Optional name of a variable in the model. Only used for ppc types having an \code{x} argument and ignored otherwise.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{...}{Further arguments passed to \code{\link{predict.brmsfit}} as well as to the PPC function specified in \code{type}.} } \value{ A ggplot object that can be further customized using the \pkg{ggplot2} package. } \description{ Perform posterior predictive checks with the help of the \pkg{bayesplot} package. } \details{ For a detailed explanation of each of the ppc functions, see the \code{\link[bayesplot:PPC-overview]{PPC}} documentation of the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson()) pp_check(fit) # shows dens_overlay plot by default pp_check(fit, type = "error_hist", ndraws = 11) pp_check(fit, type = "scatter_avg", ndraws = 100) pp_check(fit, type = "stat_2d") pp_check(fit, type = "rootogram") pp_check(fit, type = "loo_pit") ## get an overview of all valid types pp_check(fit, type = "xyz") } } brms/man/GenExtremeValue.Rd0000644000175000017500000000236414111751667015467 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{GenExtremeValue} \alias{GenExtremeValue} \alias{dgen_extreme_value} \alias{pgen_extreme_value} \alias{rgen_extreme_value} \title{The Generalized Extreme Value Distribution} \usage{ dgen_extreme_value(x, mu = 0, sigma = 1, xi = 0, log = FALSE) pgen_extreme_value( q, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE ) rgen_extreme_value(n, mu = 0, sigma = 1, xi = 0) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{sigma}{Vector of scales.} \item{xi}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the generalized extreme value distribution with location \code{mu}, scale \code{sigma} and shape \code{xi}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/loo_compare.brmsfit.Rd0000644000175000017500000000257013701270367016367 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{loo_compare.brmsfit} \alias{loo_compare.brmsfit} \alias{loo_compare} \title{Model comparison with the \pkg{loo} package} \usage{ \method{loo_compare}{brmsfit}(x, ..., criterion = c("loo", "waic", "kfold"), model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects.} \item{criterion}{The name of the criterion to be extracted from \code{brmsfit} objects.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \value{ An object of class "\code{compare.loo}". } \description{ For more details see \code{\link[loo:loo_compare]{loo_compare}}. } \details{ All \code{brmsfit} objects should contain precomputed criterion objects. See \code{\link{add_criterion}} for more help. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) fit1 <- add_criterion(fit1, "waic") # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) fit2 <- add_criterion(fit2, "waic") # compare both models loo_compare(fit1, fit2, criterion = "waic") } } brms/man/brmsformula.Rd0000644000175000017500000010542114111751667014756 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{brmsformula} \alias{brmsformula} \alias{bf} \title{Set up a model formula for use in \pkg{brms}} \usage{ brmsformula( formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL, unused = NULL ) } \arguments{ \item{formula}{An object of class \code{formula} (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given in 'Details'.} \item{...}{Additional \code{formula} objects to specify predictors of non-linear and distributional parameters. Formulas can either be named directly or contain names on their left-hand side. Alternatively, it is possible to fix parameters to certain values by passing numbers or character strings in which case arguments have to be named to provide the parameter names. See 'Details' for more information.} \item{flist}{Optional list of formulas, which are treated in the same way as formulas passed via the \code{...} argument.} \item{family}{Same argument as in \code{\link{brm}}. If \code{family} is specified in \code{brmsformula}, it will overwrite the value specified in other functions.} \item{autocor}{An optional \code{formula} which contains autocorrelation terms as described in \code{\link{autocor-terms}} or alternatively a \code{\link{cor_brms}} object (deprecated). If \code{autocor} is specified in \code{brmsformula}, it will overwrite the value specified in other functions.} \item{nl}{Logical; Indicates whether \code{formula} should be treated as specifying a non-linear model. By default, \code{formula} is treated as an ordinary linear model formula.} \item{loop}{Logical; Only used in non-linear models. Indicates if the computation of the non-linear formula should be done inside (\code{TRUE}) or outside (\code{FALSE}) a loop over observations. Defaults to \code{TRUE}.} \item{center}{Logical; Indicates if the population-level design matrix should be centered, which usually increases sampling efficiency. See the 'Details' section for more information. Defaults to \code{TRUE} for distributional parameters and to \code{FALSE} for non-linear parameters.} \item{cmc}{Logical; Indicates whether automatic cell-mean coding should be enabled when removing the intercept by adding \code{0} to the right-hand of model formulas. Defaults to \code{TRUE} to mirror the behavior of standard \R formula parsing.} \item{sparse}{Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased.} \item{decomp}{Optional name of the decomposition used for the population-level design matrix. Defaults to \code{NULL} that is no decomposition. Other options currently available are \code{"QR"} for the QR decomposition that helps in fitting models with highly correlated predictors.} \item{unused}{An optional \code{formula} which contains variables that are unused in the model but should still be stored in the model's data frame. This can be useful, for example, if those variables are required for post-processing the model.} } \value{ An object of class \code{brmsformula}, which is essentially a \code{list} containing all model formulas as well as some additional information. } \description{ Set up a model formula for use in the \pkg{brms} package allowing to define (potentially non-linear) additive multilevel models for all parameters of the assumed response distribution. } \details{ \bold{General formula structure} The \code{formula} argument accepts formulas of the following syntax: \code{response | aterms ~ pterms + (gterms | group)} The \code{pterms} part contains effects that are assumed to be the same across observations. We call them 'population-level' or 'overall' effects, or (adopting frequentist vocabulary) 'fixed' effects. The optional \code{gterms} part may contain effects that are assumed to vary across grouping variables specified in \code{group}. We call them 'group-level' or 'varying' effects, or (adopting frequentist vocabulary) 'random' effects, although the latter name is misleading in a Bayesian context. For more details type \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")}. \bold{Group-level terms} Multiple grouping factors each with multiple group-level effects are possible. (Of course we can also run models without any group-level effects.) Instead of \code{|} you may use \code{||} in grouping terms to prevent correlations from being modeled. Equivalently, the \code{cor} argument of the \code{\link{gr}} function can be used for this purpose, for example, \code{(1 + x || g)} is equivalent to \code{(1 + x | gr(g, cor = FALSE))}. It is also possible to model different group-level terms of the same grouping factor as correlated (even across different formulas, e.g., in non-linear models) by using \code{||} instead of \code{|}. All group-level terms sharing the same ID will be modeled as correlated. If, for instance, one specifies the terms \code{(1+x|i|g)} and \code{(1+z|i|g)} somewhere in the formulas passed to \code{brmsformula}, correlations between the corresponding group-level effects will be estimated. In the above example, \code{i} is not a variable in the data but just a symbol to indicate correlations between multiple group-level terms. Equivalently, the \code{id} argument of the \code{\link{gr}} function can be used as well, for example, \code{(1 + x | gr(g, id = "i"))}. If levels of the grouping factor belong to different sub-populations, it may be reasonable to assume a different covariance matrix for each of the sub-populations. For instance, the variation within the treatment group and within the control group in a randomized control trial might differ. Suppose that \code{y} is the outcome, and \code{x} is the factor indicating the treatment and control group. Then, we could estimate different hyper-parameters of the varying effects (in this case a varying intercept) for treatment and control group via \code{y ~ x + (1 | gr(subject, by = x))}. You can specify multi-membership terms using the \code{\link{mm}} function. For instance, a multi-membership term with two members could be \code{(1 | mm(g1, g2))}, where \code{g1} and \code{g2} specify the first and second member, respectively. Moreover, if a covariate \code{x} varies across the levels of the grouping-factors \code{g1} and \code{g2}, we can save the respective covariate values in the variables \code{x1} and \code{x2} and then model the varying effect as \code{(1 + mmc(x1, x2) | mm(g1, g2))}. \bold{Special predictor terms} Flexible non-linear smooth terms can modeled using the \code{\link{s}} and \code{\link{t2}} functions in the \code{pterms} part of the model formula. This allows to fit generalized additive mixed models (GAMMs) with \pkg{brms}. The implementation is similar to that used in the \pkg{gamm4} package. For more details on this model class see \code{\link[mgcv:gam]{gam}} and \code{\link[mgcv:gamm]{gamm}}. Gaussian process terms can be fitted using the \code{\link{gp}} function in the \code{pterms} part of the model formula. Similar to smooth terms, Gaussian processes can be used to model complex non-linear relationships, for instance temporal or spatial autocorrelation. However, they are computationally demanding and are thus not recommended for very large datasets or approximations need to be used. The \code{pterms} and \code{gterms} parts may contain four non-standard effect types namely monotonic, measurement error, missing value, and category specific effects, which can be specified using terms of the form \code{mo(predictor)}, \code{me(predictor, sd_predictor)}, \code{mi(predictor)}, and \code{cs()}, respectively. Category specific effects can only be estimated in ordinal models and are explained in more detail in the package's main vignette (type \code{vignette("brms_overview")}). The other three effect types are explained in the following. A monotonic predictor must either be integer valued or an ordered factor, which is the first difference to an ordinary continuous predictor. More importantly, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter takes care of the direction and size of the effect similar to an ordinary regression parameter, while an additional parameter vector estimates the normalized distances between consecutive predictor categories. A main application of monotonic effects are ordinal predictors that can this way be modeled without (falsely) treating them as continuous or as unordered categorical predictors. For more details and examples see \code{vignette("brms_monotonic")}. Quite often, predictors are measured and as such naturally contain measurement error. Although most researchers are well aware of this problem, measurement error in predictors is ignored in most regression analyses, possibly because only few packages allow for modeling it. Notably, measurement error can be handled in structural equation models, but many more general regression models (such as those featured by \pkg{brms}) cannot be transferred to the SEM framework. In \pkg{brms}, effects of noise-free predictors can be modeled using the \code{me} (for 'measurement error') function. If, say, \code{y} is the response variable and \code{x} is a measured predictor with known measurement error \code{sdx}, we can simply include it on the right-hand side of the model formula via \code{y ~ me(x, sdx)}. This can easily be extended to more general formulas. If \code{x2} is another measured predictor with corresponding error \code{sdx2} and \code{z} is a predictor without error (e.g., an experimental setting), we can model all main effects and interactions of the three predictors in the well known manner: \code{y ~ me(x, sdx) * me(x2, sdx2) * z}. The \code{me} function is soft deprecated in favor of the more flexible and consistent \code{mi} function (see below). When a variable contains missing values, the corresponding rows will be excluded from the data by default (row-wise exclusion). However, quite often we want to keep these rows and instead estimate the missing values. There are two approaches for this: (a) Impute missing values before the model fitting for instance via multiple imputation (see \code{\link{brm_multiple}} for a way to handle multiple imputed datasets). (b) Impute missing values on the fly during model fitting. The latter approach is explained in the following. Using a variable with missing values as predictors requires two things, First, we need to specify that the predictor contains missings that should to be imputed. If, say, \code{y} is the primary response, \code{x} is a predictor with missings and \code{z} is a predictor without missings, we go for \code{y ~ mi(x) + z}. Second, we need to model \code{x} as an additional response with corresponding predictors and the addition term \code{mi()}. In our example, we could write \code{x | mi() ~ z}. Measurement error may be included via the \code{sdy} argument, say, \code{x | mi(sdy = se) ~ z}. See \code{\link{mi}} for examples with real data. \bold{Autocorrelation terms} Autocorrelation terms can be directly specified inside the \code{pterms} part as well. Details can be found in \code{\link{autocor-terms}}. \bold{Additional response information} Another special of the \pkg{brms} formula syntax is the optional \code{aterms} part, which may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. \code{fun} can be replaced with either \code{se}, \code{weights}, \code{subset}, \code{cens}, \code{trunc}, \code{trials}, \code{cat}, \code{dec}, \code{rate}, \code{vreal}, or \code{vint}. Their meanings are explained below. (see also \code{\link{addition-terms}}). For families \code{gaussian}, \code{student} and \code{skew_normal}, it is possible to specify standard errors of the observations, thus allowing to perform meta-analysis. Suppose that the variable \code{yi} contains the effect sizes from the studies and \code{sei} the corresponding standard errors. Then, fixed and random effects meta-analyses can be conducted using the formulas \code{yi | se(sei) ~ 1} and \code{yi | se(sei) ~ 1 + (1|study)}, respectively, where \code{study} is a variable uniquely identifying every study. If desired, meta-regression can be performed via \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1|study)} or \cr \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1 + mod1 + mod2|study)}, where \code{mod1} and \code{mod2} represent moderator variables. By default, the standard errors replace the parameter \code{sigma}. To model \code{sigma} in addition to the known standard errors, set argument \code{sigma} in function \code{se} to \code{TRUE}, for instance, \code{yi | se(sei, sigma = TRUE) ~ 1}. For all families, weighted regression may be performed using \code{weights} in the \code{aterms} part. Internally, this is implemented by multiplying the log-posterior values of each observation by their corresponding weights. Suppose that variable \code{wei} contains the weights and that \code{yi} is the response variable. Then, formula \code{yi | weights(wei) ~ predictors} implements a weighted regression. For multivariate models, \code{subset} may be used in the \code{aterms} part, to use different subsets of the data in different univariate models. For instance, if \code{sub} is a logical variable and \code{y} is the response of one of the univariate models, we may write \code{y | subset(sub) ~ predictors} so that \code{y} is predicted only for those observations for which \code{sub} evaluates to \code{TRUE}. For log-linear models such as poisson models, \code{rate} may be used in the \code{aterms} part to specify the denominator of a response that is expressed as a rate. The numerator is given by the actual response variable and has a distribution according to the family as usual. Using \code{rate(denom)} is equivalent to adding \code{offset(log(denom))} to the linear predictor of the main parameter but the former is arguably more convenient and explicit. With the exception of categorical and ordinal families, left, right, and interval censoring can be modeled through \code{y | cens(censored) ~ predictors}. The censoring variable (named \code{censored} in this example) should contain the values \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate that the corresponding observation is left censored, not censored, right censored, or interval censored. For interval censored data, a second variable (let's call it \code{y2}) has to be passed to \code{cens}. In this case, the formula has the structure \code{y | cens(censored, y2) ~ predictors}. While the lower bounds are given in \code{y}, the upper bounds are given in \code{y2} for interval censored data. Intervals are assumed to be open on the left and closed on the right: \code{(y, y2]}. With the exception of categorical and ordinal families, the response distribution can be truncated using the \code{trunc} function in the addition part. If the response variable is truncated between, say, 0 and 100, we can specify this via \code{yi | trunc(lb = 0, ub = 100) ~ predictors}. Instead of numbers, variables in the data set can also be passed allowing for varying truncation points across observations. Defining only one of the two arguments in \code{trunc} leads to one-sided truncation. For all continuous families, missing values in the responses can be imputed within Stan by using the addition term \code{mi}. This is mostly useful in combination with \code{mi} predictor terms as explained above under 'Special predictor terms'. For families \code{binomial} and \code{zero_inflated_binomial}, addition should contain a variable indicating the number of trials underlying each observation. In \code{lme4} syntax, we may write for instance \code{cbind(success, n - success)}, which is equivalent to \code{success | trials(n)} in \pkg{brms} syntax. If the number of trials is constant across all observations, say \code{10}, we may also write \code{success | trials(10)}. \bold{Please note that the \code{cbind()} syntax will not work in \pkg{brms} in the expected way because this syntax is reserved for other purposes.} For all ordinal families, \code{aterms} may contain a term \code{thres(number)} to specify the number thresholds (e.g, \code{thres(6)}), which should be equal to the total number of response categories - 1. If not given, the number of thresholds is calculated from the data. If different threshold vectors should be used for different subsets of the data, the \code{gr} argument can be used to provide the grouping variable (e.g, \code{thres(6, gr = item)}, if \code{item} is the grouping variable). In this case, the number of thresholds can also be a variable in the data with different values per group. A deprecated quasi alias of \code{thres()} is \code{cat()} with which the total number of response categories (i.e., number of thresholds + 1) can be specified. In Wiener diffusion models (family \code{wiener}) the addition term \code{dec} is mandatory to specify the (vector of) binary decisions corresponding to the reaction times. Non-zero values will be treated as a response on the upper boundary of the diffusion process and zeros will be treated as a response on the lower boundary. Alternatively, the variable passed to \code{dec} might also be a character vector consisting of \code{'lower'} and \code{'upper'}. All families support the \code{index} addition term to uniquely identify each observation of the corresponding response variable. Currently, \code{index} is primarily useful in combination with the \code{subset} addition and \code{\link{mi}} terms. For custom families, it is possible to pass an arbitrary number of real and integer vectors via the addition terms \code{vreal} and \code{vint}, respectively. An example is provided in \code{vignette('brms_customfamilies')}. To pass multiple vectors of the same data type, provide them separated by commas inside a single \code{vreal} or \code{vint} statement. Multiple addition terms of different types may be specified at the same time using the \code{+} operator. For example, the formula \code{formula = yi | se(sei) + cens(censored) ~ 1} implies a censored meta-analytic model. The addition argument \code{disp} (short for dispersion) has been removed in version 2.0. You may instead use the distributional regression approach by specifying \code{sigma ~ 1 + offset(log(xdisp))} or \code{shape ~ 1 + offset(log(xdisp))}, where \code{xdisp} is the variable being previously passed to \code{disp}. \bold{Parameterization of the population-level intercept} By default, the population-level intercept (if incorporated) is estimated separately and not as part of population-level parameter vector \code{b} As a result, priors on the intercept also have to be specified separately. Furthermore, to increase sampling efficiency, the population-level design matrix \code{X} is centered around its column means \code{X_means} if the intercept is incorporated. This leads to a temporary bias in the intercept equal to \code{}, where \code{<,>} is the scalar product. The bias is corrected after fitting the model, but be aware that you are effectively defining a prior on the intercept of the centered design matrix not on the real intercept. You can turn off this special handling of the intercept by setting argument \code{center} to \code{FALSE}. For more details on setting priors on population-level intercepts, see \code{\link{set_prior}}. This behavior can be avoided by using the reserved (and internally generated) variable \code{Intercept}. Instead of \code{y ~ x}, you may write \code{y ~ 0 + Intercept + x}. This way, priors can be defined on the real intercept, directly. In addition, the intercept is just treated as an ordinary population-level effect and thus priors defined on \code{b} will also apply to it. Note that this parameterization may be less efficient than the default parameterization discussed above. \bold{Formula syntax for non-linear models} In \pkg{brms}, it is possible to specify non-linear models of arbitrary complexity. The non-linear model can just be specified within the \code{formula} argument. Suppose, that we want to predict the response \code{y} through the predictor \code{x}, where \code{x} is linked to \code{y} through \code{y = alpha - beta * lambda^x}, with parameters \code{alpha}, \code{beta}, and \code{lambda}. This is certainly a non-linear model being defined via \code{formula = y ~ alpha - beta * lambda^x} (addition arguments can be added in the same way as for ordinary formulas). To tell \pkg{brms} that this is a non-linear model, we set argument \code{nl} to \code{TRUE}. Now we have to specify a model for each of the non-linear parameters. Let's say we just want to estimate those three parameters with no further covariates or random effects. Then we can pass \code{alpha + beta + lambda ~ 1} or equivalently (and more flexible) \code{alpha ~ 1, beta ~ 1, lambda ~ 1} to the \code{...} argument. This can, of course, be extended. If we have another predictor \code{z} and observations nested within the grouping factor \code{g}, we may write for instance \code{alpha ~ 1, beta ~ 1 + z + (1|g), lambda ~ 1}. The formula syntax described above applies here as well. In this example, we are using \code{z} and \code{g} only for the prediction of \code{beta}, but we might also use them for the other non-linear parameters (provided that the resulting model is still scientifically reasonable). By default, non-linear covariates are treated as real vectors in Stan. However, if the data of the covariates is of type `integer` in \R (which can be enforced by the `as.integer` function), the Stan type will be changed to an integer array. That way, covariates can also be used for indexing purposes in Stan. Non-linear models may not be uniquely identified and / or show bad convergence. For this reason it is mandatory to specify priors on the non-linear parameters. For instructions on how to do that, see \code{\link{set_prior}}. For some examples of non-linear models, see \code{vignette("brms_nonlinear")}. \bold{Formula syntax for predicting distributional parameters} It is also possible to predict parameters of the response distribution such as the residual standard deviation \code{sigma} in gaussian models or the hurdle probability \code{hu} in hurdle models. The syntax closely resembles that of a non-linear parameter, for instance \code{sigma ~ x + s(z) + (1+x|g)}. For some examples of distributional models, see \code{vignette("brms_distreg")}. Parameter \code{mu} exists for every family and can be used as an alternative to specifying terms in \code{formula}. If both \code{mu} and \code{formula} are given, the right-hand side of \code{formula} is ignored. Accordingly, specifying terms on the right-hand side of both \code{formula} and \code{mu} at the same time is deprecated. In future versions, \code{formula} might be updated by \code{mu}. The following are distributional parameters of specific families (all other parameters are treated as non-linear parameters): \code{sigma} (residual standard deviation or scale of the \code{gaussian}, \code{student}, \code{skew_normal}, \code{lognormal} \code{exgaussian}, and \code{asym_laplace} families); \code{shape} (shape parameter of the \code{Gamma}, \code{weibull}, \code{negbinomial}, and related zero-inflated / hurdle families); \code{nu} (degrees of freedom parameter of the \code{student} and \code{frechet} families); \code{phi} (precision parameter of the \code{beta} and \code{zero_inflated_beta} families); \code{kappa} (precision parameter of the \code{von_mises} family); \code{beta} (mean parameter of the exponential component of the \code{exgaussian} family); \code{quantile} (quantile parameter of the \code{asym_laplace} family); \code{zi} (zero-inflation probability); \code{hu} (hurdle probability); \code{zoi} (zero-one-inflation probability); \code{coi} (conditional one-inflation probability); \code{disc} (discrimination) for ordinal models; \code{bs}, \code{ndt}, and \code{bias} (boundary separation, non-decision time, and initial bias of the \code{wiener} diffusion model). By default, distributional parameters are modeled on the log scale if they can be positive only or on the logit scale if the can only be within the unit interval. Alternatively, one may fix distributional parameters to certain values. However, this is mainly useful when models become too complicated and otherwise have convergence issues. We thus suggest to be generally careful when making use of this option. The \code{quantile} parameter of the \code{asym_laplace} distribution is a good example where it is useful. By fixing \code{quantile}, one can perform quantile regression for the specified quantile. For instance, \code{quantile = 0.25} allows predicting the 25\%-quantile. Furthermore, the \code{bias} parameter in drift-diffusion models, is assumed to be \code{0.5} (i.e. no bias) in many applications. To achieve this, simply write \code{bias = 0.5}. Other possible applications are the Cauchy distribution as a special case of the Student-t distribution with \code{nu = 1}, or the geometric distribution as a special case of the negative binomial distribution with \code{shape = 1}. Furthermore, the parameter \code{disc} ('discrimination') in ordinal models is fixed to \code{1} by default and not estimated, but may be modeled as any other distributional parameter if desired (see examples). For reasons of identification, \code{'disc'} can only be positive, which is achieved by applying the log-link. In categorical models, distributional parameters do not have fixed names. Instead, they are named after the response categories (excluding the first one, which serves as the reference category), with the prefix \code{'mu'}. If, for instance, categories are named \code{cat1}, \code{cat2}, and \code{cat3}, the distributional parameters will be named \code{mucat2} and \code{mucat3}. Some distributional parameters currently supported by \code{brmsformula} have to be positive (a negative standard deviation or precision parameter does not make any sense) or are bounded between 0 and 1 (for zero-inflated / hurdle probabilities, quantiles, or the initial bias parameter of drift-diffusion models). However, linear predictors can be positive or negative, and thus the log link (for positive parameters) or logit link (for probability parameters) are used by default to ensure that distributional parameters are within their valid intervals. This implies that, by default, effects for such distributional parameters are estimated on the log / logit scale and one has to apply the inverse link function to get to the effects on the original scale. Alternatively, it is possible to use the identity link to predict parameters on their original scale, directly. However, this is much more likely to lead to problems in the model fitting, if the parameter actually has a restricted range. See also \code{\link{brmsfamily}} for an overview of valid link functions. \bold{Formula syntax for mixture models} The specification of mixture models closely resembles that of non-mixture models. If not specified otherwise (see below), all mean parameters of the mixture components are predicted using the right-hand side of \code{formula}. All types of predictor terms allowed in non-mixture models are allowed in mixture models as well. Distributional parameters of mixture distributions have the same name as those of the corresponding ordinary distributions, but with a number at the end to indicate the mixture component. For instance, if you use family \code{mixture(gaussian, gaussian)}, the distributional parameters are \code{sigma1} and \code{sigma2}. Distributional parameters of the same class can be fixed to the same value. For the above example, we could write \code{sigma2 = "sigma1"} to make sure that both components have the same residual standard deviation, which is in turn estimated from the data. In addition, there are two types of special distributional parameters. The first are named \code{mu}, that allow for modeling different predictors for the mean parameters of different mixture components. For instance, if you want to predict the mean of the first component using predictor \code{x} and the mean of the second component using predictor \code{z}, you can write \code{mu1 ~ x} as well as \code{mu2 ~ z}. The second are named \code{theta}, which constitute the mixing proportions. If the mixing proportions are fixed to certain values, they are internally normalized to form a probability vector. If one seeks to predict the mixing proportions, all but one of the them has to be predicted, while the remaining one is used as the reference category to identify the model. The \code{softmax} function is applied on the linear predictor terms to form a probability vector. For more information on mixture models, see the documentation of \code{\link{mixture}}. \bold{Formula syntax for multivariate models} Multivariate models may be specified using \code{mvbind} notation or with help of the \code{\link{mvbf}} function. Suppose that \code{y1} and \code{y2} are response variables and \code{x} is a predictor. Then \code{mvbind(y1, y2) ~ x} specifies a multivariate model. The effects of all terms specified at the RHS of the formula are assumed to vary across response variables. For instance, two parameters will be estimated for \code{x}, one for the effect on \code{y1} and another for the effect on \code{y2}. This is also true for group-level effects. When writing, for instance, \code{mvbind(y1, y2) ~ x + (1+x|g)}, group-level effects will be estimated separately for each response. To model these effects as correlated across responses, use the ID syntax (see above). For the present example, this would look as follows: \code{mvbind(y1, y2) ~ x + (1+x|2|g)}. Of course, you could also use any value other than \code{2} as ID. It is also possible to specify different formulas for different responses. If, for instance, \code{y1} should be predicted by \code{x} and \code{y2} should be predicted by \code{z}, we could write \code{mvbf(y1 ~ x, y2 ~ z)}. Alternatively, multiple \code{brmsformula} objects can be added to specify a joint multivariate model (see 'Examples'). } \examples{ # multilevel model with smoothing terms brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2)) # additionally predict 'sigma' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2), sigma ~ x1 + (1|g2)) # use the shorter alias 'bf' (formula1 <- brmsformula(y ~ x + (x|g))) (formula2 <- bf(y ~ x + (x|g))) # will be TRUE identical(formula1, formula2) # incorporate censoring bf(y | cens(censor_variable) ~ predictors) # define a simple non-linear model bf(y ~ a1 - a2^x, a1 + a2 ~ 1, nl = TRUE) # predict a1 and a2 differently bf(y ~ a1 - a2^x, a1 ~ 1, a2 ~ x + (x|g), nl = TRUE) # correlated group-level effects across parameters bf(y ~ a1 - a2^x, a1 ~ 1 + (1 |2| g), a2 ~ x + (x |2| g), nl = TRUE) # alternative but equivalent way to specify the above model bf(y ~ a1 - a2^x, a1 ~ 1 + (1 | gr(g, id = 2)), a2 ~ x + (x | gr(g, id = 2)), nl = TRUE) # define a multivariate model bf(mvbind(y1, y2) ~ x * z + (1|g)) # define a zero-inflated model # also predicting the zero-inflation part bf(y ~ x * z + (1+x|ID1|g), zi ~ x + (1|ID1|g)) # specify a predictor as monotonic bf(y ~ mo(x) + more_predictors) # for ordinal models only # specify a predictor as category specific bf(y ~ cs(x) + more_predictors) # add a category specific group-level intercept bf(y ~ cs(x) + (cs(1)|g)) # specify parameter 'disc' bf(y ~ person + item, disc ~ item) # specify variables containing measurement error bf(y ~ me(x, sdx)) # specify predictors on all parameters of the wiener diffusion model # the main formula models the drift rate 'delta' bf(rt | dec(decision) ~ x, bs ~ x, ndt ~ x, bias ~ x) # fix the bias parameter to 0.5 bf(rt | dec(decision) ~ x, bias = 0.5) # specify different predictors for different mixture components mix <- mixture(gaussian, gaussian) bf(y ~ 1, mu1 ~ x, mu2 ~ z, family = mix) # fix both residual standard deviations to the same value bf(y ~ x, sigma2 = "sigma1", family = mix) # use the '+' operator to specify models bf(y ~ 1) + nlf(sigma ~ a * exp(b * x), a ~ x) + lf(b ~ z + (1|g), dpar = "sigma") + gaussian() # specify a multivariate model using the '+' operator bf(y1 ~ x + (1|g)) + gaussian() + cor_ar(~1|g) + bf(y2 ~ z) + poisson() # specify correlated residuals of a gaussian and a poisson model form1 <- bf(y1 ~ 1 + x + (1|c|obs), sigma = 1) + gaussian() form2 <- bf(y2 ~ 1 + x + (1|c|obs)) + poisson() # model missing values in predictors bf(bmi ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) # model sigma as a function of the mean bf(y ~ eta, nl = TRUE) + lf(eta ~ 1 + x) + nlf(sigma ~ tau * sqrt(eta)) + lf(tau ~ 1) } \seealso{ \code{\link{mvbrmsformula}}, \code{\link{brmsformula-helpers}} } brms/man/bridge_sampler.brmsfit.Rd0000644000175000017500000000453214111751667017052 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{bridge_sampler.brmsfit} \alias{bridge_sampler.brmsfit} \alias{bridge_sampler} \title{Log Marginal Likelihood via Bridge Sampling} \usage{ \method{bridge_sampler}{brmsfit}(samples, ...) } \arguments{ \item{samples}{A \code{brmsfit} object.} \item{...}{Additional arguments passed to \code{\link[bridgesampling:bridge_sampler]{bridge_sampler.stanfit}}.} } \description{ Computes log marginal likelihood via bridge sampling, which can be used in the computation of bayes factors and posterior model probabilities. The \code{brmsfit} method is just a thin wrapper around the corresponding method for \code{stanfit} objects. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{bridge_sampler} cannot be computed. Thus, please set \code{save_pars = save_pars(all = TRUE)} in the call to \code{brm}, if you are planning to apply \code{bridge_sampler} to your models. The computation of marginal likelihoods based on bridge sampling requires a lot more posterior draws than usual. A good conservative rule of thump is perhaps 10-fold more draws (read: the default of 4000 draws may not be enough in many cases). If not enough posterior draws are provided, the bridge sampling algorithm tends to be unstable leading to considerably different results each time it is run. We thus recommend running \code{bridge_sampler} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:bridge_sampler]{bridgesampling::bridge_sampler}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_pars = save_pars(all = TRUE) ) summary(fit1) bridge_sampler(fit1) # model without the treatment effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_pars = save_pars(all = TRUE) ) summary(fit2) bridge_sampler(fit2) } } \seealso{ \code{ \link[brms:bayes_factor.brmsfit]{bayes_factor}, \link[brms:post_prob.brmsfit]{post_prob} } } brms/man/ngrps.brmsfit.Rd0000644000175000017500000000100313701270370015201 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{ngrps.brmsfit} \alias{ngrps.brmsfit} \alias{ngrps} \title{Number of Grouping Factor Levels} \usage{ \method{ngrps}{brmsfit}(object, ...) ngrps(object, ...) } \arguments{ \item{object}{An \R object.} \item{...}{Currently ignored.} } \value{ A named list containing the number of levels per grouping factor. } \description{ Extract the number of levels of one or more grouping factors. } brms/man/print.brmsfit.Rd0000644000175000017500000000136713701270367015227 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary.R \name{print.brmsfit} \alias{print.brmsfit} \alias{print.brmssummary} \title{Print a summary for a fitted model represented by a \code{brmsfit} object} \usage{ \method{print}{brmsfit}(x, digits = 2, ...) } \arguments{ \item{x}{An object of class \code{brmsfit}} \item{digits}{The number of significant digits for printing out the summary; defaults to 2. The effective sample size is always rounded to integers.} \item{...}{Additional arguments that would be passed to method \code{summary} of \code{brmsfit}.} } \description{ Print a summary for a fitted model represented by a \code{brmsfit} object } \seealso{ \code{\link{summary.brmsfit}} } brms/man/brms-package.Rd0000644000175000017500000000762714105230573014761 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brms-package.R \docType{package} \name{brms-package} \alias{brms-package} \alias{brms} \title{Bayesian Regression Models using 'Stan'} \description{ \if{html}{ \figure{stanlogo.png}{options: width="50px" alt="https://mc-stan.org/about/logo/"} \emph{Stan Development Team} } The \pkg{brms} package provides an interface to fit Bayesian generalized multivariate (non-)linear multilevel models using \pkg{Stan}, which is a C++ package for obtaining full Bayesian inference (see \url{https://mc-stan.org/}). The formula syntax is an extended version of the syntax applied in the \pkg{lme4} package to provide a familiar and simple interface for performing regression analyses. } \details{ The main function of \pkg{brms} is \code{\link{brm}}, which uses formula syntax to specify a wide range of complex Bayesian models (see \code{\link{brmsformula}} for details). Based on the supplied formulas, data, and additional information, it writes the Stan code on the fly via \code{\link{make_stancode}}, prepares the data via \code{\link{make_standata}}, and fits the model using \pkg{\link[rstan:rstan]{Stan}}. Subsequently, a large number of post-processing methods can be applied: To get an overview on the estimated parameters, \code{\link[brms:summary.brmsfit]{summary}} or \code{\link[brms:conditional_effects.brmsfit]{conditional_effects}} are perfectly suited. Detailed visual analyses can be performed by applying the \code{\link{pp_check}} and \code{\link{stanplot}} methods, which both rely on the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. Model comparisons can be done via \code{\link{loo}} and \code{\link{waic}}, which make use of the \pkg{\link[loo:loo-package]{loo}} package as well as via \code{\link{bayes_factor}} which relies on the \pkg{bridgesampling} package. For a full list of methods to apply, type \code{methods(class = "brmsfit")}. Because \pkg{brms} is based on \pkg{Stan}, a C++ compiler is required. The program Rtools (available on \url{https://cran.r-project.org/bin/windows/Rtools/}) comes with a C++ compiler for Windows. On Mac, you should use Xcode. For further instructions on how to get the compilers running, see the prerequisites section at the \href{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}{RStan-Getting-Started} page. When comparing other packages fitting multilevel models to \pkg{brms}, keep in mind that the latter needs to compile models before actually fitting them, which will require between 20 and 40 seconds depending on your machine, operating system and overall model complexity. Thus, fitting smaller models may be relatively slow as compilation time makes up the majority of the whole running time. For larger / more complex models however, fitting my take several minutes or even hours, so that the compilation time won't make much of a difference for these models. See \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")} for a general introduction and overview of \pkg{brms}. For a full list of available vignettes, type \code{vignette(package = "brms")}. } \references{ Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. \code{doi:10.18637/jss.v080.i01} Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling with the R Package brms. \emph{The R Journal}. 10(1), 395–411. \code{doi:10.32614/RJ-2018-017} The Stan Development Team. \emph{Stan Modeling Language User's Guide and Reference Manual}. \url{https://mc-stan.org/users/documentation/}. Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. \url{https://mc-stan.org/} } \seealso{ \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}}, \code{\link{brmsfit}} } brms/man/ma.Rd0000644000175000017500000000334513701270367013021 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{ma} \alias{ma} \title{Set up MA(q) correlation structures} \usage{ ma(time = NA, gr = NA, q = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up a moving average (MA) term of order q in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with MA terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ ma(p = 2), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ar}} } brms/man/gr.Rd0000644000175000017500000000425713701270367013037 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-re.R \name{gr} \alias{gr} \title{Set up basic grouping terms in \pkg{brms}} \usage{ gr(..., by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") } \arguments{ \item{...}{One or more terms containing grouping factors.} \item{by}{An optional factor variable, specifying sub-populations of the groups. For each level of the \code{by} variable, a separate variance-covariance matrix will be fitted. Levels of the grouping factor must be nested in levels of the \code{by} variable.} \item{cor}{Logical. If \code{TRUE} (the default), group-level terms will be modelled as correlated.} \item{id}{Optional character string. All group-level terms across the model with the same \code{id} will be modeled as correlated (if \code{cor} is \code{TRUE}). See \code{\link{brmsformula}} for more details.} \item{cov}{An optional matrix which is proportional to the withon-group covariance matrix of the group-level effects. All levels of the grouping factor should appear as rownames of the corresponding matrix. This argument can be used, among others, to model pedigrees and phylogenetic effects. See \code{vignette("brms_phylogenetics")} for more details. By default, levels of the same grouping factor are modeled as independent of each other.} \item{dist}{Name of the distribution of the group-level effects. Currently \code{"gaussian"} is the only option.} } \description{ Function used to set up a basic grouping term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with grouping terms. \code{gr} is called implicitly inside the package and there is usually no need to call it directly. } \examples{ \dontrun{ # model using basic lme4-style formula fit1 <- brm(count ~ Trt + (1|patient), data = epilepsy) summary(fit1) # equivalent model using 'gr' which is called anyway internally fit2 <- brm(count ~ Trt + (1|gr(patient)), data = epilepsy) summary(fit2) # include Trt as a by variable fit3 <- brm(count ~ Trt + (1|gr(patient, by = Trt)), data = epilepsy) summary(fit3) } } \seealso{ \code{\link{brmsformula}} } brms/man/cor_ma.Rd0000644000175000017500000000345613701270367013667 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_ma} \alias{cor_ma} \title{(Deprecated) MA(q) correlation structure} \usage{ cor_ma(formula = ~1, q = 1, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is 1.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma} containing solely moving average terms. } \description{ This function is deprecated. Please see \code{\link{ma}} for the new syntax. This function is a constructor for the \code{cor_arma} class, allowing for moving average terms only. } \examples{ cor_ma(~visit|patient, q = 2) } \seealso{ \code{\link{cor_arma}} } brms/man/mcmc_plot.brmsfit.Rd0000644000175000017500000000566714111751667016062 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{mcmc_plot.brmsfit} \alias{mcmc_plot.brmsfit} \alias{stanplot} \alias{stanplot.brmsfit} \alias{mcmc_plot} \title{MCMC Plots Implemented in \pkg{bayesplot}} \usage{ \method{mcmc_plot}{brmsfit}( object, pars = NA, type = "intervals", variable = NULL, regex = FALSE, fixed = FALSE, ... ) mcmc_plot(object, ...) } \arguments{ \item{object}{An \R object typically of class \code{brmsfit}} \item{pars}{Deprecated alias of \code{variable}. Names of the parameters to plot, as given by a character vector or a regular expression.} \item{type}{The type of the plot. Supported types are (as names) \code{hist}, \code{dens}, \code{hist_by_chain}, \code{dens_overlay}, \code{violin}, \code{intervals}, \code{areas}, \code{acf}, \code{acf_bar},\code{trace}, \code{trace_highlight}, \code{scatter}, \code{rhat}, \code{rhat_hist}, \code{neff}, \code{neff_hist} \code{nuts_acceptance}, \code{nuts_divergence}, \code{nuts_stepsize}, \code{nuts_treedepth}, and \code{nuts_energy}. For an overview on the various plot types see \code{\link[bayesplot:MCMC-overview]{MCMC-overview}}.} \item{variable}{Names of the variables (parameters) to plot, as given by a character vector or a regular expression (if \code{regex = TRUE}). By default, a hopefully not too large selection of variables is plotted.} \item{regex}{Logical; Indicates whether \code{variable} should be treated as regular expressions. Defaults to \code{FALSE}.} \item{fixed}{(Deprecated) Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE} and only works with argument \code{pars}.} \item{...}{Additional arguments passed to the plotting functions. See \code{\link[bayesplot:MCMC-overview]{MCMC-overview}} for more details.} } \value{ A \code{\link[ggplot2:ggplot]{ggplot}} object that can be further customized using the \pkg{ggplot2} package. } \description{ Convenient way to call MCMC plotting functions implemented in the \pkg{bayesplot} package. } \details{ Also consider using the \pkg{shinystan} package available via method \code{\link{launch_shinystan}} in \pkg{brms} for flexible and interactive visual analysis. } \examples{ \dontrun{ model <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = "poisson") # plot posterior intervals mcmc_plot(model) # only show population-level effects in the plots mcmc_plot(model, variable = "^b_", regex = TRUE) # show histograms of the posterior distributions mcmc_plot(model, type = "hist") # plot some diagnostics of the sampler mcmc_plot(model, type = "neff") mcmc_plot(model, type = "rhat") # plot some diagnostics specific to the NUTS sampler mcmc_plot(model, type = "nuts_acceptance") mcmc_plot(model, type = "nuts_divergence") } } brms/man/cor_sar.Rd0000644000175000017500000000345513701270367014056 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_sar} \alias{cor_sar} \alias{cor_lagsar} \alias{cor_errorsar} \title{(Deprecated) Spatial simultaneous autoregressive (SAR) structures} \usage{ cor_sar(W, type = c("lag", "error")) cor_lagsar(W) cor_errorsar(W) } \arguments{ \item{W}{An object specifying the spatial weighting matrix. Can be either the spatial weight matrix itself or an object of class \code{listw} or \code{nb}, from which the spatial weighting matrix can be computed.} \item{type}{Type of the SAR structure. Either \code{"lag"} (for SAR of the response values) or \code{"error"} (for SAR of the residuals).} } \value{ An object of class \code{cor_sar} to be used in calls to \code{\link{brm}}. } \description{ Thse functions are deprecated. Please see \code{\link{sar}} for the new syntax. These functions are constructors for the \code{cor_sar} class implementing spatial simultaneous autoregressive structures. The \code{lagsar} structure implements SAR of the response values: \deqn{y = \rho W y + \eta + e} The \code{errorsar} structure implements SAR of the residuals: \deqn{y = \eta + u, u = \rho W u + e} In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are independent normally or t-distributed residuals. } \details{ Currently, only families \code{gaussian} and \code{student} support SAR structures. } \examples{ \dontrun{ data(oldcol, package = "spdep") fit1 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, autocor = cor_lagsar(COL.nb), chains = 2, cores = 2) summary(fit1) plot(fit1) fit2 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, autocor = cor_errorsar(COL.nb), chains = 2, cores = 2) summary(fit2) plot(fit2) } } brms/man/inhaler.Rd0000644000175000017500000000332614105230573014037 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{inhaler} \alias{inhaler} \title{Clarity of inhaler instructions} \format{ A data frame of 572 observations containing information on the following 5 variables. \describe{ \item{subject}{The subject number} \item{rating}{The rating of the inhaler instructions on a scale ranging from 1 to 4} \item{treat}{A contrast to indicate which of the two inhaler devices was used} \item{period}{A contrast to indicate the time of administration} \item{carry}{A contrast to indicate possible carry over effects} } } \source{ Ezzet, F., & Whitehead, J. (1991). A random effects model for ordinal responses from a crossover trial. \emph{Statistics in Medicine}, 10(6), 901-907. } \usage{ inhaler } \description{ Ezzet and Whitehead (1991) analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a 4-point ordinal scale. } \examples{ \dontrun{ ## ordinal regression with family "sratio" fit1 <- brm(rating ~ treat + period + carry, data = inhaler, family = sratio(), prior = set_prior("normal(0,5)")) summary(fit1) plot(fit1) ## ordinal regression with family "cumulative" ## and random intercept over subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative(), prior = set_prior("normal(0,5)")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/get_dpar.Rd0000644000175000017500000000345214111751667014213 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{get_dpar} \alias{get_dpar} \title{Draws of a Distributional Parameter} \usage{ get_dpar(prep, dpar, i = NULL, ilink = NULL) } \arguments{ \item{prep}{A 'brmsprep' or 'mvbrmsprep' object created by \code{\link[brms:prepare_predictions.brmsfit]{prepare_predictions}}.} \item{dpar}{Name of the distributional parameter.} \item{i}{The observation numbers for which predictions shall be extracted. If \code{NULL} (the default), all observation will be extracted. Ignored if \code{dpar} is not predicted.} \item{ilink}{Should the inverse link function be applied? If \code{NULL} (the default), the value is chosen internally. In particular, \code{ilink} is \code{TRUE} by default for custom families.} } \value{ If the parameter is predicted and \code{i} is \code{NULL} or \code{length(i) > 1}, an \code{S x N} matrix. If the parameter it not predicted or \code{length(i) == 1}, a vector of length \code{S}. Here \code{S} is the number of draws and \code{N} is the number of observations or length of \code{i} if specified. } \description{ Get draws of a distributional parameter from a \code{brmsprep} or \code{mvbrmsprep} object. This function is primarily useful when developing custom families or packages depending on \pkg{brms}. This function lets callers easily handle both the case when the distributional parameter is predicted directly, via a (non-)linear predictor or fixed to a constant. See the vignette \code{vignette("brms_customfamilies")} for an example use case. } \examples{ \dontrun{ posterior_predict_my_dist <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) mypar <- brms::get_dpar(prep, "mypar", i = i) my_rng(mu, mypar) } } } brms/man/coef.brmsfit.Rd0000644000175000017500000000355013701270367015003 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{coef.brmsfit} \alias{coef.brmsfit} \title{Extract Model Coefficients} \usage{ \method{coef}{brmsfit}(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link{fixef.brmsfit}} and \code{\link{ranef.brmsfit}}.} } \value{ A list of 3D arrays (one per grouping factor). If \code{summary} is \code{TRUE}, the 1st dimension contains the factor levels, the 2nd dimension contains the summary statistics (see \code{\link{posterior_summary}}), and the 3rd dimension contains the group-level effects. If \code{summary} is \code{FALSE}, the 1st dimension contains the posterior draws, the 2nd dimension contains the factor levels, and the 3rd dimension contains the group-level effects. } \description{ Extract model coefficients, which are the sum of population-level effects and corresponding group-level effects } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) ## extract population and group-level coefficients separately fixef(fit) ranef(fit) ## extract combined coefficients coef(fit) } } brms/man/update.brmsfit_multiple.Rd0000644000175000017500000000221313701270367017257 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update.R \name{update.brmsfit_multiple} \alias{update.brmsfit_multiple} \title{Update \pkg{brms} models based on multiple data sets} \usage{ \method{update}{brmsfit_multiple}(object, formula., newdata = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit_multiple}.} \item{formula.}{Changes to the formula; for details see \code{\link{update.formula}} and \code{\link{brmsformula}}.} \item{newdata}{List of \code{data.frames} to update the model with new data. Currently required even if the original data should be used.} \item{...}{Other arguments passed to \code{\link{update.brmsfit}} and \code{\link{brm_multiple}}.} } \description{ This method allows to update an existing \code{brmsfit_multiple} object. } \examples{ \dontrun{ library(mice) imp <- mice(nhanes2) # initially fit the model fit_imp1 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) summary(fit_imp1) # update the model using fewer predictors fit_imp2 <- update(fit_imp1, formula. = . ~ hyp + chl, newdata = imp) summary(fit_imp2) } } brms/man/cor_arma.Rd0000644000175000017500000000414413701270367014205 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_arma} \alias{cor_arma} \alias{cor_arma-class} \title{(Deprecated) ARMA(p,q) correlation structure} \usage{ cor_arma(formula = ~1, p = 0, q = 0, r = 0, cov = FALSE) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is 0.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is 0.} \item{r}{No longer supported.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default) a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{cor_arma}, representing an autoregression-moving-average correlation structure. } \description{ This function is deprecated. Please see \code{\link{arma}} for the new syntax. This functions is a constructor for the \code{cor_arma} class, representing an autoregression-moving average correlation structure of order (p, q). } \examples{ cor_arma(~ visit | patient, p = 2, q = 2) } \seealso{ \code{\link{cor_ar}}, \code{\link{cor_ma}} } brms/man/recompile_model.Rd0000644000175000017500000000153114136566260015561 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{recompile_model} \alias{recompile_model} \title{Recompile Stan models in \code{brmsfit} objects} \usage{ recompile_model(x, recompile = NULL) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{recompile}{Logical, indicating whether the Stan model should be recompiled. If \code{NULL} (the default), \code{recompile_model} tries to figure out internally, if recompilation is necessary. Setting it to \code{FALSE} will cause \code{recompile_model} to always return the \code{brmsfit} object unchanged.} } \value{ A (possibly updated) \code{brmsfit} object. } \description{ Recompile the Stan model inside a \code{brmsfit} object, if necessary. This does not change the model, it simply recreates the executable so that sampling is possible again. } brms/man/logit_scaled.Rd0000644000175000017500000000073213661463272015056 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{logit_scaled} \alias{logit_scaled} \title{Scaled logit-link} \usage{ logit_scaled(x, lb = 0, ub = 1) } \arguments{ \item{x}{A numeric or complex vector.} \item{lb}{Lower bound defaulting to \code{0}.} \item{ub}{Upper bound defaulting to \code{1}.} } \value{ A numeric or complex vector. } \description{ Computes \code{logit((x - lb) / (ub - lb))} } brms/man/vcov.brmsfit.Rd0000644000175000017500000000214614111751667015047 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{vcov.brmsfit} \alias{vcov.brmsfit} \title{Covariance and Correlation Matrix of Population-Level Effects} \usage{ \method{vcov}{brmsfit}(object, correlation = FALSE, pars = NULL, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{correlation}{Logical; if \code{FALSE} (the default), compute the covariance matrix, if \code{TRUE}, compute the correlation matrix.} \item{pars}{Optional names of coefficients to extract. By default, all coefficients are extracted.} \item{...}{Currently ignored.} } \value{ covariance or correlation matrix of population-level parameters } \description{ Get a point estimate of the covariance or correlation matrix of population-level parameters } \details{ Estimates are obtained by calculating the maximum likelihood covariances (correlations) of the posterior draws. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) vcov(fit) } } brms/man/rows2labels.Rd0000644000175000017500000000161013701270367014654 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conditional_effects.R \name{rows2labels} \alias{rows2labels} \title{Convert Rows to Labels} \usage{ rows2labels(x, digits = 2, sep = " & ", incl_vars = TRUE, ...) } \arguments{ \item{x}{A \code{data.frame} for which to extract labels.} \item{digits}{Minimal number of decimal places shown in the labels of numeric variables.} \item{sep}{A single character string defining the separator between variables used in the labels.} \item{incl_vars}{Indicates if variable names should be part of the labels. Defaults to \code{TRUE}.} \item{...}{Currently unused.} } \value{ A character vector of the same length as the number of rows of \code{x}. } \description{ Convert information in rows to labels for each row. } \seealso{ \code{\link{make_conditions}}, \code{\link{conditional_effects}} } brms/man/prepare_predictions.Rd0000644000175000017500000001206614111751667016470 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prepare_predictions.R \name{prepare_predictions.brmsfit} \alias{prepare_predictions.brmsfit} \alias{prepare_predictions} \alias{extract_draws} \title{Prepare Predictions} \usage{ \method{prepare_predictions}{brmsfit}( x, newdata = NULL, re_formula = NULL, allow_new_levels = FALSE, sample_new_levels = "uncertainty", incl_autocor = TRUE, oos = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, nug = NULL, smooths_only = FALSE, offset = TRUE, newdata2 = NULL, new_objects = NULL, point_estimate = NULL, ... ) prepare_predictions(x, ...) } \arguments{ \item{x}{An \R object typically of class \code{'brmsfit'}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{allow_new_levels}{A flag indicating if new levels of group-level effects are allowed (defaults to \code{FALSE}). Only relevant if \code{newdata} is provided.} \item{sample_new_levels}{Indicates how to sample new levels for grouping factors specified in \code{re_formula}. This argument is only relevant if \code{newdata} is provided and \code{allow_new_levels} is set to \code{TRUE}. If \code{"uncertainty"} (default), each posterior sample for a new level is drawn from the posterior draws of a randomly chosen existing level. Each posterior sample for a new level may be drawn from a different existing level such that the resulting set of new posterior draws represents the variation across existing levels. If \code{"gaussian"}, sample new levels from the (multivariate) normal distribution implied by the group-level standard deviations and correlations. This options may be useful for conducting Bayesian power analysis or predicting new levels in situations where relatively few levels where observed in the old_data. If \code{"old_levels"}, directly sample new levels from the existing levels, where a new level is assigned all of the posterior draws of the same (randomly chosen) existing level.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{oos}{Optional indices of observations for which to compute out-of-sample rather than in-sample predictions. Only required in models that make use of response values to make predictions, that is, currently only ARMA models.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{subset}{Deprecated alias of \code{draw_ids}.} \item{nug}{Small positive number for Gaussian process terms only. For numerical reasons, the covariance matrix of a Gaussian process might not be positive definite. Adding a very small number to the matrix's diagonal often solves this problem. If \code{NULL} (the default), \code{nug} is chosen internally.} \item{smooths_only}{Logical; If \code{TRUE} only predictions related to the} \item{offset}{Logical; Indicates if offsets should be included in the predictions. Defaults to \code{TRUE}.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{new_objects}{Deprecated alias of \code{newdata2}.} \item{point_estimate}{Shall the returned object contain only point estimates of the parameters instead of their posterior draws? Defaults to \code{NULL} in which case no point estimate is computed. Alternatively, may be set to \code{"mean"} or \code{"median"}. This argument is primarily implemented to ensure compatibility with the \code{\link{loo_subsample}} method.} \item{...}{Further arguments passed to \code{\link{validate_newdata}}.} } \value{ An object of class \code{'brmsprep'} or \code{'mvbrmsprep'}, depending on whether a univariate or multivariate model is passed. } \description{ This method helps in preparing \pkg{brms} models for certin post-processing tasks most notably various forms of predictions. Unless you are a package developer, you will rarely need to call \code{prepare_predictions} directly. } brms/man/arma.Rd0000644000175000017500000000362213701270367013342 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{arma} \alias{arma} \title{Set up ARMA(p,q) correlation structures} \usage{ arma(time = NA, gr = NA, p = 1, q = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is \code{1}.} \item{q}{A non-negative integer specifying the moving average (MA) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an autoregressive moving average (ARMA) term of order (p, q) in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with ARMA terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ arma(p = 2, q = 1), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{ar}}, \code{\link{ma}}, } brms/man/epilepsy.Rd0000644000175000017500000000403014105230573014240 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{epilepsy} \alias{epilepsy} \title{Epileptic seizure counts} \format{ A data frame of 236 observations containing information on the following 9 variables. \describe{ \item{Age}{The age of the patients in years} \item{Base}{The seizure count at 8-weeks baseline} \item{Trt}{Either \code{0} or \code{1} indicating if the patient received anti-convulsant therapy} \item{patient}{The patient number} \item{visit}{The session number from \code{1} (first visit) to \code{4} (last visit)} \item{count}{The seizure count between two visits} \item{obs}{The observation number, that is a unique identifier for each observation} \item{zAge}{Standardized \code{Age}} \item{zBase}{Standardized \code{Base}} } } \source{ Thall, P. F., & Vail, S. C. (1990). Some covariance models for longitudinal count data with overdispersion. \emph{Biometrics, 46(2)}, 657-671. \cr Breslow, N. E., & Clayton, D. G. (1993). Approximate inference in generalized linear mixed models. \emph{Journal of the American Statistical Association}, 88(421), 9-25. } \usage{ epilepsy } \description{ Breslow and Clayton (1993) analyze data initially provided by Thall and Vail (1990) concerning seizure counts in a randomized trial of anti-convulsant therapy in epilepsy. Covariates are treatment, 8-week baseline seizure counts, and age of the patients in years. } \examples{ \dontrun{ ## poisson regression without random effects. fit1 <- brm(count ~ zAge + zBase * Trt, data = epilepsy, family = poisson()) summary(fit1) plot(fit1) ## poisson regression with varying intercepts of patients ## as well as normal priors for overall effects parameters. fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), prior = set_prior("normal(0,5)")) summary(fit2) plot(fit2) } } \keyword{datasets} brms/man/nsamples.brmsfit.Rd0000644000175000017500000000137614111751667015720 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{nsamples.brmsfit} \alias{nsamples.brmsfit} \alias{nsamples} \title{(Deprecated) Number of Posterior Samples} \usage{ \method{nsamples}{brmsfit}(object, subset = NULL, incl_warmup = FALSE, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{subset}{An optional integer vector defining a subset of samples to be considered.} \item{incl_warmup}{A flag indicating whether to also count warmup / burn-in samples.} \item{...}{Currently ignored.} } \description{ Extract the number of posterior samples (draws) stored in a fitted Bayesian model. Method \code{nsamples} is deprecated. Please use \code{ndraws} instead. } brms/man/posterior_linpred.brmsfit.Rd0000644000175000017500000000636014111751667017637 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_epred.R \name{posterior_linpred.brmsfit} \alias{posterior_linpred.brmsfit} \alias{posterior_linpred} \title{Posterior Draws of the Linear Predictor} \usage{ \method{posterior_linpred}{brmsfit}( object, transform = FALSE, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, incl_thres = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{transform}{Logical; if \code{FALSE} (the default), draws of the linear predictor are returned. If \code{TRUE}, draws of transformed linear predictor, that is, after applying the link function are returned.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{re.form}{Alias of \code{re_formula}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{dpar}{Name of a predicted distributional parameter for which draws are to be returned. By default, draws of the main distributional parameter(s) \code{"mu"} are returned.} \item{nlpar}{Optional name of a predicted non-linear parameter. If specified, expected predictions of this parameters are returned.} \item{incl_thres}{Logical; only relevant for ordinal models when \code{transform} is \code{FALSE}, and ignored otherwise. Shall the thresholds and category-specific effects be included in the linear predictor? For backwards compatibility, the default is to not include them.} \item{ndraws}{Positive integer indicating how many posterior draws should be used. If \code{NULL} (the default) all draws are used. Ignored if \code{draw_ids} is not \code{NULL}.} \item{draw_ids}{An integer vector specifying the posterior draws to be used. If \code{NULL} (the default), all draws are used.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{...}{Further arguments passed to \code{\link{prepare_predictions}} that control several aspects of data validation and prediction.} } \description{ Compute posterior draws of the linear predictor, that is draws before applying any link functions or other transformations. Can be performed for the data used to fit the model (posterior predictive checks) or for new data. } \examples{ \dontrun{ ## fit a model fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) ## extract linear predictor values pl <- posterior_linpred(fit) str(pl) } } \seealso{ \code{\link{posterior_epred.brmsfit}} } brms/man/brmsfit-class.Rd0000644000175000017500000000514514111751667015200 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \docType{class} \name{brmsfit-class} \alias{brmsfit-class} \alias{brmsfit} \title{Class \code{brmsfit} of models fitted with the \pkg{brms} package} \description{ Models fitted with the \code{\link[brms:brms-package]{brms}} package are represented as a \code{brmsfit} object, which contains the posterior draws (samples), model formula, Stan code, relevant data, and other information. } \details{ See \code{methods(class = "brmsfit")} for an overview of available methods. } \section{Slots}{ \describe{ \item{\code{formula}}{A \code{\link{brmsformula}} object.} \item{\code{data}}{A \code{data.frame} containing all variables used in the model.} \item{\code{data2}}{A \code{list} of data objects which cannot be passed via \code{data}.} \item{\code{prior}}{A \code{\link{brmsprior}} object containing information on the priors used in the model.} \item{\code{stanvars}}{A \code{\link{stanvars}} object.} \item{\code{model}}{The model code in \pkg{Stan} language.} \item{\code{ranef}}{A \code{data.frame} containing the group-level structure.} \item{\code{exclude}}{The names of the parameters for which draws are not saved.} \item{\code{algorithm}}{The name of the algorithm used to fit the model.} \item{\code{backend}}{The name of the backend used to fit the model.} \item{\code{threads}}{An object of class `brmsthreads` created by \code{\link{threading}}.} \item{\code{opencl}}{An object of class `brmsopencl` created by \code{\link{opencl}}.} \item{\code{fit}}{An object of class \code{\link[rstan:stanfit-class]{stanfit}} among others containing the posterior draws.} \item{\code{criteria}}{An empty \code{list} for adding model fit criteria after estimation of the model.} \item{\code{file}}{Optional name of a file in which the model object was stored in or loaded from.} \item{\code{version}}{The versions of \pkg{brms} and \pkg{rstan} with which the model was fitted.} \item{\code{family}}{(Deprecated) A \code{\link{brmsfamily}} object.} \item{\code{autocor}}{(Deprecated) An \code{\link{cor_brms}} object containing the autocorrelation structure if specified.} \item{\code{cov_ranef}}{(Deprecated) A \code{list} of customized group-level covariance matrices.} \item{\code{stan_funs}}{(Deprecated) A character string of length one or \code{NULL}.} \item{\code{data.name}}{(Deprecated) The name of \code{data} as specified by the user.} }} \seealso{ \code{\link{brms}}, \code{\link{brm}}, \code{\link{brmsformula}}, \code{\link{brmsfamily}} } brms/man/loo_R2.brmsfit.Rd0000644000175000017500000000367214111751667015233 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo_predict.R \name{loo_R2.brmsfit} \alias{loo_R2.brmsfit} \alias{loo_R2} \title{Compute a LOO-adjusted R-squared for regression models} \usage{ \method{loo_R2}{brmsfit}( object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{resp}{Optional names of response variables. If specified, predictions are performed only for the specified response variables.} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Further arguments passed to \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}} and \code{\link[brms:log_lik.brmsfit]{log_lik}}, which are used in the computation of the R-squared values.} } \value{ If \code{summary = TRUE}, an M x C matrix is returned (M = number of response variables and c = \code{length(probs) + 2}) containing summary statistics of the LOO-adjusted R-squared values. If \code{summary = FALSE}, the posterior draws of the LOO-adjusted R-squared values are returned in an S x M matrix (S is the number of draws). } \description{ Compute a LOO-adjusted R-squared for regression models } \examples{ \dontrun{ fit <- brm(mpg ~ wt + cyl, data = mtcars) summary(fit) loo_R2(fit) # compute R2 with new data nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) loo_R2(fit, newdata = nd) } } brms/man/ZeroInflated.Rd0000644000175000017500000000355414105230573015006 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{ZeroInflated} \alias{ZeroInflated} \alias{dzero_inflated_poisson} \alias{pzero_inflated_poisson} \alias{dzero_inflated_negbinomial} \alias{pzero_inflated_negbinomial} \alias{dzero_inflated_binomial} \alias{pzero_inflated_binomial} \alias{dzero_inflated_beta} \alias{pzero_inflated_beta} \title{Zero-Inflated Distributions} \usage{ dzero_inflated_poisson(x, lambda, zi, log = FALSE) pzero_inflated_poisson(q, lambda, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_negbinomial(x, mu, shape, zi, log = FALSE) pzero_inflated_negbinomial(q, mu, shape, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_binomial(x, size, prob, zi, log = FALSE) pzero_inflated_binomial(q, size, prob, zi, lower.tail = TRUE, log.p = FALSE) dzero_inflated_beta(x, shape1, shape2, zi, log = FALSE) pzero_inflated_beta(q, shape1, shape2, zi, lower.tail = TRUE, log.p = FALSE) } \arguments{ \item{x}{Vector of quantiles.} \item{zi}{zero-inflation probability} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{q}{Vector of quantiles.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{mu, lambda}{location parameter} \item{shape, shape1, shape2}{shape parameter} \item{size}{number of trials} \item{prob}{probability of success on each trial} } \description{ Density and distribution functions for zero-inflated distributions. } \details{ The density of a zero-inflated distribution can be specified as follows. If \eqn{x = 0} set \eqn{f(x) = \theta + (1 - \theta) * g(0)}. Else set \eqn{f(x) = (1 - \theta) * g(x)}, where \eqn{g(x)} is the density of the non-zero-inflated part. } brms/man/cor_arr.Rd0000644000175000017500000000165613701270367014056 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_arr} \alias{cor_arr} \title{(Defunct) ARR correlation structure} \usage{ cor_arr(formula = ~1, r = 1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} \item{r}{No longer supported.} } \description{ The ARR correlation structure is no longer supported. } \keyword{internal} brms/man/get_y.Rd0000644000175000017500000000167414105230573013530 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-response.R \name{get_y} \alias{get_y} \title{Extract response values} \usage{ get_y(x, resp = NULL, sort = FALSE, warn = FALSE, ...) } \arguments{ \item{x}{A \code{\link{brmsfit}} object.} \item{resp}{Optional names of response variables for which to extract values.} \item{sort}{Logical. Only relevant for time series models. Indicating whether to return predicted values in the original order (\code{FALSE}; default) or in the order of the time series (\code{TRUE}).} \item{warn}{For internal use only.} \item{...}{Further arguments passed to \code{\link{standata}}.} } \value{ Returns a vector of response values for univariate models and a matrix of response values with one column per response variable for multivariate models. } \description{ Extract response values from a \code{\link{brmsfit}} object. } \keyword{internal} brms/man/inv_logit_scaled.Rd0000644000175000017500000000077313625767111015736 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/numeric-helpers.R \name{inv_logit_scaled} \alias{inv_logit_scaled} \title{Scaled inverse logit-link} \usage{ inv_logit_scaled(x, lb = 0, ub = 1) } \arguments{ \item{x}{A numeric or complex vector.} \item{lb}{Lower bound defaulting to \code{0}.} \item{ub}{Upper bound defaulting to \code{1}.} } \value{ A numeric or complex vector between \code{lb} and \code{ub}. } \description{ Computes \code{inv_logit(x) * (ub - lb) + lb} } brms/man/ar.Rd0000644000175000017500000000334613701270367013027 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{ar} \alias{ar} \title{Set up AR(p) correlation structures} \usage{ ar(time = NA, gr = NA, p = 1, cov = FALSE) } \arguments{ \item{time}{An optional time variable specifying the time ordering of the observations. By default, the existing order of the observations in the data is used.} \item{gr}{An optional grouping variable. If specified, the correlation structure is assumed to apply only to observations within the same grouping level.} \item{p}{A non-negative integer specifying the autoregressive (AR) order of the ARMA structure. Default is \code{1}.} \item{cov}{A flag indicating whether ARMA effects should be estimated by means of residual covariance matrices. This is currently only possible for stationary ARMA effects of order 1. If the model family does not have natural residuals, latent residuals are added automatically. If \code{FALSE} (the default), a regression formulation is used that is considerably faster and allows for ARMA effects of order higher than 1 but is only available for \code{gaussian} models and some of its generalizations.} } \value{ An object of class \code{'arma_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an autoregressive (AR) term of order p in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with AR terms. } \examples{ \dontrun{ data("LakeHuron") LakeHuron <- as.data.frame(LakeHuron) fit <- brm(x ~ ar(p = 2), data = LakeHuron) summary(fit) } } \seealso{ \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ma}} } brms/man/cor_cosy.Rd0000644000175000017500000000235213701270367014241 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_cosy} \alias{cor_cosy} \alias{cor_cosy-class} \title{(Deprecated) Compound Symmetry (COSY) Correlation Structure} \usage{ cor_cosy(formula = ~1) } \arguments{ \item{formula}{A one sided formula of the form \code{~ t}, or \code{~ t | g}, specifying a time covariate \code{t} and, optionally, a grouping factor \code{g}. A covariate for this correlation structure must be integer valued. When a grouping factor is present in \code{formula}, the correlation structure is assumed to apply only to observations within the same grouping level; observations with different grouping levels are assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to using the order of the observations in the data as a covariate, and no groups.} } \value{ An object of class \code{cor_cosy}, representing a compound symmetry correlation structure. } \description{ This function is deprecated. Please see \code{\link{cosy}} for the new syntax. This functions is a constructor for the \code{cor_cosy} class, representing a compound symmetry structure corresponding to uniform correlation. } \examples{ cor_cosy(~ visit | patient) } brms/man/ExGaussian.Rd0000644000175000017500000000250714111751667014475 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{ExGaussian} \alias{ExGaussian} \alias{dexgaussian} \alias{pexgaussian} \alias{rexgaussian} \title{The Exponentially Modified Gaussian Distribution} \usage{ dexgaussian(x, mu, sigma, beta, log = FALSE) pexgaussian(q, mu, sigma, beta, lower.tail = TRUE, log.p = FALSE) rexgaussian(n, mu, sigma, beta) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of means of the combined distribution.} \item{sigma}{Vector of standard deviations of the gaussian component.} \item{beta}{Vector of scales of the exponential component.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the exponentially modified Gaussian distribution with mean \code{mu} and standard deviation \code{sigma} of the gaussian component, as well as scale \code{beta} of the exponential component. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/pp_average.brmsfit.Rd0000644000175000017500000000727314111751667016211 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{pp_average.brmsfit} \alias{pp_average.brmsfit} \alias{pp_average} \title{Posterior predictive draws averaged across models} \usage{ \method{pp_average}{brmsfit}( x, ..., weights = "stacking", method = "posterior_predict", ndraws = NULL, nsamples = NULL, summary = TRUE, probs = c(0.025, 0.975), robust = FALSE, model_names = NULL, control = list(), seed = NULL ) pp_average(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{method}{Method used to obtain predictions to average over. Should be one of \code{"posterior_predict"} (default), \code{"posterior_epred"}, \code{"posterior_linpred"} or \code{"predictive_error"}.} \item{ndraws}{Total number of posterior draws to use.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{summary}{Should summary statistics (i.e. means, sds, and 95\% intervals) be returned instead of the raw values? Default is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{control}{Optional \code{list} of further arguments passed to the function specified in \code{weights}.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} } \value{ Same as the output of the method specified in argument \code{method}. } \description{ Compute posterior predictive draws averaged across models. Weighting can be done in various ways, for instance using Akaike weights based on information criteria or marginal likelihoods. } \details{ Weights are computed with the \code{\link{model_weights}} method. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # compute model-averaged predicted values (df <- unique(inhaler[, c("treat", "period", "carry")])) pp_average(fit1, fit2, newdata = df) # compute model-averaged fitted values pp_average(fit1, fit2, method = "fitted", newdata = df) } } \seealso{ \code{\link{model_weights}}, \code{\link{posterior_average}} } brms/man/sar.Rd0000644000175000017500000000360513701270367013210 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-ac.R \name{sar} \alias{sar} \title{Spatial simultaneous autoregressive (SAR) structures} \usage{ sar(M, type = "lag") } \arguments{ \item{M}{An object specifying the spatial weighting matrix. Can be either the spatial weight matrix itself or an object of class \code{listw} or \code{nb}, from which the spatial weighting matrix can be computed.} \item{type}{Type of the SAR structure. Either \code{"lag"} (for SAR of the response values) or \code{"error"} (for SAR of the residuals). More information is provided in the 'Details' section.} } \value{ An object of class \code{'sar_term'}, which is a list of arguments to be interpreted by the formula parsing functions of \pkg{brms}. } \description{ Set up an spatial simultaneous autoregressive (SAR) term in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up a model with SAR terms. } \details{ The \code{lagsar} structure implements SAR of the response values: \deqn{y = \rho W y + \eta + e} The \code{errorsar} structure implements SAR of the residuals: \deqn{y = \eta + u, u = \rho W u + e} In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are independent normally or t-distributed residuals. Currently, only families \code{gaussian} and \code{student} support SAR structures. } \examples{ \dontrun{ data(oldcol, package = "spdep") fit1 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "lag"), data = COL.OLD, data2 = list(COL.nb = COL.nb), chains = 2, cores = 2) summary(fit1) plot(fit1) fit2 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "error"), data = COL.OLD, data2 = list(COL.nb = COL.nb), chains = 2, cores = 2) summary(fit2) plot(fit2) } } \seealso{ \code{\link{autocor-terms}} } brms/man/s.Rd0000644000175000017500000000233213701270367012661 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula-sm.R \name{s} \alias{s} \alias{t2} \title{Defining smooths in \pkg{brms} formulas} \usage{ s(...) t2(...) } \arguments{ \item{...}{Arguments passed to \code{\link[mgcv:s]{mgcv::s}} or \code{\link[mgcv:t2]{mgcv::t2}}.} } \description{ Functions used in definition of smooth terms within a model formulas. The function does not evaluate a (spline) smooth - it exists purely to help set up a model using spline based smooths. } \details{ The function defined here are just simple wrappers of the respective functions of the \pkg{mgcv} package. } \examples{ \dontrun{ # simulate some data dat <- mgcv::gamSim(1, n = 200, scale = 2) # fit univariate smooths for all predictors fit1 <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, chains = 2) summary(fit1) plot(conditional_smooths(fit1), ask = FALSE) # fit a more complicated smooth model fit2 <- brm(y ~ t2(x0, x1) + s(x2, by = x3), data = dat, chains = 2) summary(fit2) plot(conditional_smooths(fit2), ask = FALSE) } } \seealso{ \code{\link{brmsformula}}, \code{\link[mgcv:s]{mgcv::s}}, \code{\link[mgcv:t2]{mgcv::t2}} } brms/man/post_prob.brmsfit.Rd0000644000175000017500000000547414010776135016103 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{post_prob.brmsfit} \alias{post_prob.brmsfit} \alias{post_prob} \title{Posterior Model Probabilities from Marginal Likelihoods} \usage{ \method{post_prob}{brmsfit}(x, ..., prior_prob = NULL, model_names = NULL) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{prior_prob}{Numeric vector with prior model probabilities. If omitted, a uniform prior is used (i.e., all models are equally likely a priori). The default \code{NULL} corresponds to equal prior model weights.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} } \description{ Compute posterior model probabilities from marginal likelihoods. The \code{brmsfit} method is just a thin wrapper around the corresponding method for \code{bridge} objects. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{post_prob} cannot be computed. Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, if you are planning to apply \code{post_prob} to your models. The computation of model probabilities based on bridge sampling requires a lot more posterior samples than usual. A good conservative rule of thump is perhaps 10-fold more samples (read: the default of 4000 samples may not be enough in many cases). If not enough posterior samples are provided, the bridge sampling algorithm tends to be unstable leading to considerably different results each time it is run. We thus recommend running \code{post_prob} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:post_prob]{bridgesampling::post_prob}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit1) # model without the treatent effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit2) # compute the posterior model probabilities post_prob(fit1, fit2) # specify prior model probabilities post_prob(fit1, fit2, prior_prob = c(0.8, 0.2)) } } \seealso{ \code{ \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, \link[brms:bayes_factor.brmsfit]{bayes_factor} } } brms/man/brmsfit_needs_refit.Rd0000644000175000017500000000305514111751667016442 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{brmsfit_needs_refit} \alias{brmsfit_needs_refit} \title{Check if cached fit can be used.} \usage{ brmsfit_needs_refit( fit, sdata = NULL, scode = NULL, data = NULL, algorithm = NULL, silent = FALSE, verbose = FALSE ) } \arguments{ \item{fit}{Old \code{brmsfit} object (e.g., loaded from file).} \item{sdata}{New Stan data (result of a call to \code{\link{make_standata}}). Pass \code{NULL} to avoid this data check.} \item{scode}{New Stan code (result of a call to \code{\link{make_stancode}}). Pass \code{NULL} to avoid this code check.} \item{data}{New data to check consistency of factor level names. Pass \code{NULL} to avoid this data check.} \item{algorithm}{New algorithm. Pass \code{NULL} to avoid algorithm check.} \item{silent}{Logical. If \code{TRUE}, no messages will be given.} \item{verbose}{Logical. If \code{TRUE} detailed report of the differences is printed to the console.} } \value{ A boolean indicating whether a refit is needed. } \description{ Checks whether a given cached fit can be used without refitting when \code{file_refit = "on_change"} is used. This function is internal and exposed only to facilitate debugging problems with cached fits. The function may change or be removed in future versions and scripts should not use it. } \details{ Use with \code{verbose = TRUE} to get additional info on how the stored fit differs from the given data and code. } \keyword{internal} brms/man/data_response.Rd0000644000175000017500000000101413701270367015242 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-response.R \name{data_response} \alias{data_response} \title{Prepare Response Data} \usage{ data_response(x, ...) } \arguments{ \item{x}{An \R object.} \item{...}{Further arguments passed to or from other methods.} } \value{ A named list of data related to response variables. } \description{ Prepare data related to response variables in \pkg{brms}. Only exported for use in package development. } \keyword{internal} brms/man/theme_default.Rd0000644000175000017500000000101013712504452015212 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{theme_default} \alias{theme_default} \title{Default \pkg{bayesplot} Theme for \pkg{ggplot2} Graphics} \arguments{ \item{base_size}{base font size} \item{base_family}{base font family} } \value{ A \code{theme} object used in \pkg{ggplot2} graphics. } \description{ This theme is imported from the \pkg{bayesplot} package. See \code{\link[bayesplot:theme_default]{theme_default}} for a complete documentation. } brms/man/save_pars.Rd0000644000175000017500000000404314111751667014406 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/exclude_pars.R \name{save_pars} \alias{save_pars} \title{Control Saving of Parameter Draws} \usage{ save_pars(group = TRUE, latent = FALSE, all = FALSE, manual = NULL) } \arguments{ \item{group}{A flag to indicate if group-level coefficients for each level of the grouping factors should be saved (default is \code{TRUE}). Set to \code{FALSE} to save memory. Alternatively, \code{group} may also be a character vector naming the grouping factors for which to save draws of coefficients.} \item{latent}{A flag to indicate if draws of latent variables obtained by using \code{me} and \code{mi} terms should be saved (default is \code{FALSE}). Saving these draws allows to better use methods such as \code{posterior_predict} with the latent variables but leads to very large \R objects even for models of moderate size and complexity. Alternatively, \code{latent} may also be a character vector naming the latent variables for which to save draws.} \item{all}{A flag to indicate if draws of all variables defined in Stan's \code{parameters} block should be saved (default is \code{FALSE}). Saving these draws is required in order to apply the certain methods such as \code{bridge_sampler} and \code{bayes_factor}.} \item{manual}{A character vector naming Stan variable names which should be saved. These names should match the variable names inside the Stan code before renaming. This feature is meant for power users only and will rarely be useful outside of very special cases.} } \value{ A list of class \code{"save_pars"}. } \description{ Control which (draws of) parameters should be saved in a \pkg{brms} model. The output of this function is ment for usage in the \code{save_pars} argument of \code{\link{brm}}. } \examples{ \dontrun{ # don't store group-level coefficients fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), save_pars = save_pars(group = FALSE)) variables(fit) } } brms/man/VarCorr.brmsfit.Rd0000644000175000017500000000337313701270367015450 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-methods.R \name{VarCorr.brmsfit} \alias{VarCorr.brmsfit} \alias{VarCorr} \title{Extract Variance and Correlation Components} \usage{ \method{VarCorr}{brmsfit}( x, sigma = 1, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ... ) } \arguments{ \item{x}{An object of class \code{brmsfit}.} \item{sigma}{Ignored (included for compatibility with \code{\link[nlme:VarCorr]{VarCorr}}).} \item{summary}{Should summary statistics be returned instead of the raw values? Default is \code{TRUE}.} \item{robust}{If \code{FALSE} (the default) the mean is used as the measure of central tendency and the standard deviation as the measure of variability. If \code{TRUE}, the median and the median absolute deviation (MAD) are applied instead. Only used if \code{summary} is \code{TRUE}.} \item{probs}{The percentiles to be computed by the \code{quantile} function. Only used if \code{summary} is \code{TRUE}.} \item{...}{Currently ignored.} } \value{ A list of lists (one per grouping factor), each with three elements: a matrix containing the standard deviations, an array containing the correlation matrix, and an array containing the covariance matrix with variances on the diagonal. } \description{ This function calculates the estimated standard deviations, correlations and covariances of the group-level terms in a multilevel model of class \code{brmsfit}. For linear models, the residual standard deviations, correlations and covariances are also returned. } \examples{ \dontrun{ fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), data = epilepsy, family = gaussian(), chains = 2) VarCorr(fit) } } brms/man/MultiNormal.Rd0000644000175000017500000000215614111751667014671 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{MultiNormal} \alias{MultiNormal} \alias{dmulti_normal} \alias{rmulti_normal} \title{The Multivariate Normal Distribution} \usage{ dmulti_normal(x, mu, Sigma, log = FALSE, check = FALSE) rmulti_normal(n, mu, Sigma, check = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mu}{Mean vector with length equal to the number of dimensions.} \item{Sigma}{Covariance matrix.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{check}{Logical; Indicates whether several input checks should be performed. Defaults to \code{FALSE} to improve efficiency.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density function and random generation for the multivariate normal distribution with mean vector \code{mu} and covariance matrix \code{Sigma}. } \details{ See the Stan user's manual \url{https://mc-stan.org/documentation/} for details on the parameterization } brms/man/is.brmsformula.Rd0000644000175000017500000000052413661463272015367 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsformula.R \name{is.brmsformula} \alias{is.brmsformula} \title{Checks if argument is a \code{brmsformula} object} \usage{ is.brmsformula(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsformula} object } brms/man/compare_ic.Rd0000644000175000017500000000313113701270367014516 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loo.R \name{compare_ic} \alias{compare_ic} \title{Compare Information Criteria of Different Models} \usage{ compare_ic(..., x = NULL, ic = c("loo", "waic", "kfold")) } \arguments{ \item{...}{At least two objects returned by \code{\link{waic}} or \code{\link{loo}}. Alternatively, \code{brmsfit} objects with information criteria precomputed via \code{\link{add_ic}} may be passed, as well.} \item{x}{A \code{list} containing the same types of objects as can be passed via \code{...}.} \item{ic}{The name of the information criterion to be extracted from \code{brmsfit} objects. Ignored if information criterion objects are only passed directly.} } \value{ An object of class \code{iclist}. } \description{ Compare information criteria of different models fitted with \code{\link{waic}} or \code{\link{loo}}. Deprecated and will be removed in the future. Please use \code{\link{loo_compare}} instead. } \details{ See \code{\link{loo_compare}} for the recommended way of comparing models with the \pkg{loo} package. } \examples{ \dontrun{ # model with population-level effects only fit1 <- brm(rating ~ treat + period + carry, data = inhaler) waic1 <- waic(fit1) # model with an additional varying intercept for subjects fit2 <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler) waic2 <- waic(fit2) # compare both models compare_ic(waic1, waic2) } } \seealso{ \code{\link{loo}}, \code{\link{loo_compare}} \code{\link{add_criterion}} } brms/man/InvGaussian.Rd0000644000175000017500000000213714111751667014654 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributions.R \name{InvGaussian} \alias{InvGaussian} \alias{dinv_gaussian} \alias{pinv_gaussian} \alias{rinv_gaussian} \title{The Inverse Gaussian Distribution} \usage{ dinv_gaussian(x, mu = 1, shape = 1, log = FALSE) pinv_gaussian(q, mu = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) rinv_gaussian(n, mu = 1, shape = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{mu}{Vector of locations.} \item{shape}{Vector of shapes.} \item{log}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{lower.tail}{Logical; If \code{TRUE} (default), return P(X <= x). Else, return P(X > x) .} \item{log.p}{Logical; If \code{TRUE}, values are returned on the log scale.} \item{n}{Number of draws to sample from the distribution.} } \description{ Density, distribution function, and random generation for the inverse Gaussian distribution with location \code{mu}, and shape \code{shape}. } \details{ See \code{vignette("brms_families")} for details on the parameterization. } brms/man/posterior_average.brmsfit.Rd0000644000175000017500000000643314111751667017615 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_weights.R \name{posterior_average.brmsfit} \alias{posterior_average.brmsfit} \alias{posterior_average} \title{Posterior draws of parameters averaged across models} \usage{ \method{posterior_average}{brmsfit}( x, ..., variable = NULL, pars = NULL, weights = "stacking", ndraws = NULL, nsamples = NULL, missing = NULL, model_names = NULL, control = list(), seed = NULL ) posterior_average(x, ...) } \arguments{ \item{x}{A \code{brmsfit} object.} \item{...}{More \code{brmsfit} objects or further arguments passed to the underlying post-processing functions. In particular, see \code{\link{prepare_predictions}} for further supported arguments.} \item{variable}{Names of variables (parameters) for which to average across models. Only those variables can be averaged that appear in every model. Defaults to all overlapping variables.} \item{pars}{Deprecated alias of \code{variable}.} \item{weights}{Name of the criterion to compute weights from. Should be one of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current default), or \code{"bma"}, \code{"pseudobma"}, For the former three options, Akaike weights will be computed based on the information criterion values returned by the respective methods. For \code{"stacking"} and \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be used to compute Bayesian model averaging weights based on log marginal likelihood values (make sure to specify reasonable priors in this case). For some methods, \code{weights} may also be a numeric vector of pre-specified weights.} \item{ndraws}{Total number of posterior draws to use.} \item{nsamples}{Deprecated alias of \code{ndraws}.} \item{missing}{An optional numeric value or a named list of numeric values to use if a model does not contain a variable for which posterior draws should be averaged. Defaults to \code{NULL}, in which case only those variables can be averaged that are present in all of the models.} \item{model_names}{If \code{NULL} (the default) will use model names derived from deparsing the call. Otherwise will use the passed values as model names.} \item{control}{Optional \code{list} of further arguments passed to the function specified in \code{weights}.} \item{seed}{A single numeric value passed to \code{\link{set.seed}} to make results reproducible.} } \value{ A \code{data.frame} of posterior draws. } \description{ Extract posterior draws of parameters averaged across models. Weighting can be done in various ways, for instance using Akaike weights based on information criteria or marginal likelihoods. } \details{ Weights are computed with the \code{\link{model_weights}} method. } \examples{ \dontrun{ # model with 'treat' as predictor fit1 <- brm(rating ~ treat + period + carry, data = inhaler) summary(fit1) # model without 'treat' as predictor fit2 <- brm(rating ~ period + carry, data = inhaler) summary(fit2) # compute model-averaged posteriors of overlapping parameters posterior_average(fit1, fit2, weights = "waic") } } \seealso{ \code{\link{model_weights}}, \code{\link{pp_average}} } brms/man/opencl.Rd0000644000175000017500000000246514111751667013711 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backends.R \name{opencl} \alias{opencl} \title{GPU support in Stan via OpenCL} \usage{ opencl(ids = NULL) } \arguments{ \item{ids}{(integer vector of length 2) The platform and device IDs of the OpenCL device to use for fitting. If you don't know the IDs of your OpenCL device, \code{c(0,0)} is most likely what you need.} } \value{ A \code{brmsopencl} object which can be passed to the \code{opencl} argument of \code{brm} and related functions. } \description{ Use OpenCL for GPU support in \pkg{Stan} via the \pkg{brms} interface. Only some \pkg{Stan} functions can be run on a GPU at this point and so a lot of \pkg{brms} models won't benefit from OpenCL for now. } \details{ For more details on OpenCL in \pkg{Stan}, check out \url{https://mc-stan.org/docs/2_26/cmdstan-guide/parallelization.html#opencl} as well as \url{https://mc-stan.org/docs/2_26/stan-users-guide/opencl.html}. } \examples{ \dontrun{ # this model just serves as an illustration # OpenCL may not actually speed things up here fit <- brm(count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 2, cores = 2, opencl = opencl(c(0, 0)), backend = "cmdstanr") summary(fit) } } brms/man/density_ratio.Rd0000644000175000017500000000302514111751667015277 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hypothesis.R \name{density_ratio} \alias{density_ratio} \title{Compute Density Ratios} \usage{ density_ratio(x, y = NULL, point = 0, n = 4096, ...) } \arguments{ \item{x}{Vector of draws from the first distribution, usually the posterior distribution of the quantity of interest.} \item{y}{Optional vector of draws from the second distribution, usually the prior distribution of the quantity of interest. If \code{NULL} (the default), only the density of \code{x} will be evaluated.} \item{point}{Numeric values at which to evaluate and compare the densities. Defaults to \code{0}.} \item{n}{Single numeric value. Influences the accuracy of the density estimation. See \code{\link[stats:density]{density}} for details.} \item{...}{Further arguments passed to \code{\link[stats:density]{density}}.} } \value{ A vector of length equal to \code{length(point)}. If \code{y} is provided, the density ratio of \code{x} against \code{y} is returned. Else, only the density of \code{x} is returned. } \description{ Compute the ratio of two densities at given points based on draws of the corresponding distributions. } \details{ In order to achieve sufficient accuracy in the density estimation, more draws than usual are required. That is you may need an effective sample size of 10,000 or more to reliably estimate the densities. } \examples{ x <- rnorm(10000) y <- rnorm(10000, mean = 1) density_ratio(x, y, point = c(0, 1)) } brms/man/standata.brmsfit.Rd0000644000175000017500000000325213701270370015657 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_standata.R \name{standata.brmsfit} \alias{standata.brmsfit} \alias{standata} \title{Extract data passed to Stan} \usage{ \method{standata}{brmsfit}( object, newdata = NULL, re_formula = NULL, newdata2 = NULL, new_objects = NULL, incl_autocor = TRUE, ... ) standata(object, ...) } \arguments{ \item{object}{An object of class \code{brmsfit}.} \item{newdata}{An optional data.frame for which to evaluate predictions. If \code{NULL} (default), the original data of the model is used. \code{NA} values within factors are interpreted as if all dummy variables of this factor are zero. This allows, for instance, to make predictions of the grand mean when using sum coding.} \item{re_formula}{formula containing group-level effects to be considered in the prediction. If \code{NULL} (default), include all group-level effects; if \code{NA}, include no group-level effects.} \item{newdata2}{A named \code{list} of objects containing new data, which cannot be passed via argument \code{newdata}. Required for some objects used in autocorrelation structures, or \code{\link{stanvars}}.} \item{new_objects}{Deprecated alias of \code{newdata2}.} \item{incl_autocor}{A flag indicating if correlation structures originally specified via \code{autocor} should be included in the predictions. Defaults to \code{TRUE}.} \item{...}{More arguments passed to \code{\link{make_standata}} and \code{\link{validate_newdata}}.} } \value{ A named list containing the data originally passed to Stan. } \description{ Extract all data that was used by Stan to fit the model. } brms/man/as.mcmc.brmsfit.Rd0000644000175000017500000000254013701270367015406 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{as.mcmc.brmsfit} \alias{as.mcmc.brmsfit} \alias{as.mcmc} \title{Extract posterior samples for use with the \pkg{coda} package} \usage{ \method{as.mcmc}{brmsfit}( x, pars = NA, fixed = FALSE, combine_chains = FALSE, inc_warmup = FALSE, ... ) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}} \item{pars}{Names of parameters for which posterior samples should be returned, as given by a character vector or regular expressions. By default, all posterior samples of all parameters are extracted.} \item{fixed}{Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE}.} \item{combine_chains}{Indicates whether chains should be combined.} \item{inc_warmup}{Indicates if the warmup samples should be included. Default is \code{FALSE}. Warmup samples are used to tune the parameters of the sampling algorithm and should not be analyzed.} \item{...}{currently unused} } \value{ If \code{combine_chains = TRUE} an \code{mcmc} object is returned. If \code{combine_chains = FALSE} an \code{mcmc.list} object is returned. } \description{ Extract posterior samples for use with the \pkg{coda} package } brms/man/horseshoe.Rd0000644000175000017500000001257314050266727014431 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{horseshoe} \alias{horseshoe} \title{Regularized horseshoe priors in \pkg{brms}} \usage{ horseshoe( df = 1, scale_global = 1, df_global = 1, scale_slab = 2, df_slab = 4, par_ratio = NULL, autoscale = TRUE ) } \arguments{ \item{df}{Degrees of freedom of student-t prior of the local shrinkage parameters. Defaults to \code{1}.} \item{scale_global}{Scale of the student-t prior of the global shrinkage parameter. Defaults to \code{1}. In linear models, \code{scale_global} will internally be multiplied by the residual standard deviation parameter \code{sigma}.} \item{df_global}{Degrees of freedom of student-t prior of the global shrinkage parameter. Defaults to \code{1}. If \code{df_global} is greater \code{1}, the shape of the prior will no longer resemble a horseshoe and it may be more appropriately called an hierarchical shrinkage prior in this case.} \item{scale_slab}{Scale of the student-t prior of the regularization parameter. Defaults to \code{2}. The original unregularized horseshoe prior is obtained by setting \code{scale_slab} to infinite, which we can approximate in practice by setting it to a very large real value.} \item{df_slab}{Degrees of freedom of the student-t prior of the regularization parameter. Defaults to \code{4}.} \item{par_ratio}{Ratio of the expected number of non-zero coefficients to the expected number of zero coefficients. If specified, \code{scale_global} is ignored and internally computed as \code{par_ratio / sqrt(N)}, where \code{N} is the total number of observations in the data.} \item{autoscale}{Logical; indicating whether the horseshoe prior should be scaled using the residual standard deviation \code{sigma} if possible and sensible (defaults to \code{TRUE}). Autoscaling is not applied for distributional parameters or when the model does not contain the parameter \code{sigma}.} } \value{ A character string obtained by \code{match.call()} with additional arguments. } \description{ Function used to set up regularized horseshoe priors and related hierarchical shrinkage priors for population-level effects in \pkg{brms}. The function does not evaluate its arguments -- it exists purely to help set up the model. } \details{ The horseshoe prior is a special shrinkage prior initially proposed by Carvalho et al. (2009). It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The \code{1} implies that the student-t prior of the local shrinkage parameters has 1 degrees of freedom. This may, however, lead to an increased number of divergent transition in \pkg{Stan}. Accordingly, increasing the degrees of freedom to slightly higher values (e.g., \code{3}) may often be a better option, although the prior no longer resembles a horseshoe in this case. Further, the scale of the global shrinkage parameter plays an important role in amount of shrinkage applied. It defaults to \code{1}, but this may result in too few shrinkage (Piironen & Vehtari, 2016). It is thus possible to change the scale using argument \code{scale_global} of the horseshoe prior, for instance \code{horseshoe(1, scale_global = 0.5)}. In linear models, \code{scale_global} will internally be multiplied by the residual standard deviation parameter \code{sigma}. See Piironen and Vehtari (2016) for recommendations how to properly set the global scale. The degrees of freedom of the global shrinkage prior may also be adjusted via argument \code{df_global}. Piironen and Vehtari (2017) recommend to specifying the ratio of the expected number of non-zero coefficients to the expected number of zero coefficients \code{par_ratio} rather than \code{scale_global} directly. As proposed by Piironen and Vehtari (2017), an additional regularization is applied that only affects non-zero coefficients. The amount of regularization can be controlled via \code{scale_slab} and \code{df_slab}. To make sure that shrinkage can equally affect all coefficients, predictors should be one the same scale. Generally, models with horseshoe priors a more likely than other models to have divergent transitions so that increasing \code{adapt_delta} from \code{0.8} to values closer to \code{1} will often be necessary. See the documentation of \code{\link{brm}} for instructions on how to increase \code{adapt_delta}. } \examples{ set_prior(horseshoe(df = 3, par_ratio = 0.1)) } \references{ Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009). Handling sparsity via the horseshoe. In International Conference on Artificial Intelligence and Statistics (pp. 73-80). Piironen J. & Vehtari A. (2016). On the Hyperprior Choice for the Global Shrinkage Parameter in the Horseshoe Prior. \url{https://arxiv.org/pdf/1610.05559v1.pdf} Piironen, J., and Vehtari, A. (2017). Sparsity information and regularization in the horseshoe and other shrinkage priors. \url{https://arxiv.org/abs/1707.01694} } \seealso{ \code{\link{set_prior}} } brms/man/get_prior.Rd0000644000175000017500000000760614105230573014414 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/priors.R \name{get_prior} \alias{get_prior} \title{Overview on Priors for \pkg{brms} Models} \usage{ get_prior( formula, data, family = gaussian(), autocor = NULL, data2 = NULL, knots = NULL, sparse = NULL, ... ) } \arguments{ \item{formula}{An object of class \code{\link[stats:formula]{formula}}, \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can be coerced to that classes): A symbolic description of the model to be fitted. The details of model specification are explained in \code{\link{brmsformula}}.} \item{data}{An object of class \code{data.frame} (or one that can be coerced to that class) containing data of all variables used in the model.} \item{family}{A description of the response distribution and link function to be used in the model. This can be a family function, a call to a family function or a character string naming the family. Every family function has a \code{link} argument allowing to specify the link function to be applied on the response variable. If not specified, default links are used. For details of supported families see \code{\link{brmsfamily}}. By default, a linear \code{gaussian} model is applied. In multivariate models, \code{family} might also be a list of families.} \item{autocor}{(Deprecated) An optional \code{\link{cor_brms}} object describing the correlation structure within the response variable (i.e., the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for a description of the available correlation structures. Defaults to \code{NULL}, corresponding to no correlations. In multivariate models, \code{autocor} might also be a list of autocorrelation structures. It is now recommend to specify autocorrelation terms directly within \code{formula}. See \code{\link{brmsformula}} for more details.} \item{data2}{A named \code{list} of objects containing data, which cannot be passed via argument \code{data}. Required for some objects used in autocorrelation structures to specify dependency structures as well as for within-group covariance matrices.} \item{knots}{Optional list containing user specified knot values to be used for basis construction of smoothing terms. See \code{\link[mgcv:gamm]{gamm}} for more details.} \item{sparse}{(Deprecated) Logical; indicates whether the population-level design matrices should be treated as sparse (defaults to \code{FALSE}). For design matrices with many zeros, this can considerably reduce required memory. Sampling speed is currently not improved or even slightly decreased. It is now recommended to use the \code{sparse} argument of \code{\link{brmsformula}} and related functions.} \item{...}{Other arguments for internal usage only.} } \value{ A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group} and several rows, each providing information on a parameter (or parameter class) on which priors can be specified. The prior column is empty except for internal default priors. } \description{ Get information on all parameters (and parameter classes) for which priors may be specified including default priors. } \examples{ ## get all parameters and parameters classes to define priors on (prior <- get_prior(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson())) ## define a prior on all population-level effects a once prior$prior[1] <- "normal(0,10)" ## define a specific prior on the population-level effect of Trt prior$prior[5] <- "student_t(10, 0, 5)" ## verify that the priors indeed found their way into Stan's model code make_stancode(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior) } \seealso{ \code{\link{set_prior}} } brms/man/figures/0000755000175000017500000000000014111751667013577 5ustar nileshnileshbrms/man/figures/README-conditional_effects-1.png0000644000175000017500000001460714105230573021377 0ustar nileshnileshPNG  IHDRX PPLTE:f:::f:ff333::::f:::::::f:::ff:f:f::MMMMMnMMMnMff:fff:f::f:fff:fffffffffnMMnMnnMnnMnnnnnnMMnMnȎ::ff:fffffېېřnMnnnMnff:ff:f۶۶۶ȎMȎnȫnȫȎې:ېf۶f۶۶۶۶n䫎ȎȫvmfȎې۶ hŗ pHYs&?IDATxcyC[+֤i]\!YInֱ"ۑJX3Z^ `?^zygv$ܡCr"E;!w(BPܡC ;!w(BPܡCr"E;!w(BPܡCr"E;!w(BPܡCr"E;!w(B#wH w(BPܡCrE;!wC#d?6#Hru7l]?j8$<~tt$"GGo.=]ҳ飆#8r߼~tptt$"G{I[GG?p)GEћ^{$E[٩-Hh'AQ&ܷ}{k1F#(rqĖ{o/飣#rY=~t$"OF" E;!wAPܡCr;=;!wCЃܡCr;=;!w(BcܡCr"=vz;!w(BcܡCr"=vz;!w(BcܡCr"=vz;!w(BcUr"EzLNCr"=k'wDܡC++r"=HNCr[wrG k'wĉܡC;#Jŵ;DPܡnj;#F3j'wĈ͏fe"wrWdV;1s/C!wrW][wrGtfN%{rGl]f$TR;#6.3>*ː;"CB#ᣲq)]q!wKJk'wD|q'wD%~]|+?mvGF"X嵇owÍ+۽{WǏD,mZ;Db=[KQn1DC/$ۼdK|xt+0oq [nfIG"P rO?ov} t 3:j<AR{ZvwWon]y njZDxuw`釿OAvR.KHj~[v?.?j0Rˑ _]t$RvrG,]v$B#*;"A#r LNWq wG # j'wDG2HxbH^;"@F2U>r1 w#굓;GVFne$PvrGHx팄N[ Aa;FFj'wN[ NYŝ2r7լ{:'μoCȐ;Ϛ?i\rGukonx?ߒ;^[f{_?Kg; Sujn4'AB/Fs_t=5Vws{zԿI~s;5X"w#Tj;EG¥&KHrw?y]w;7i9޿=<_.dnLמ{㾷]3F_c"w.D"צrYR{%pbj'k\ܳoMВalfjW{@NNURZM}woqD]z's\ #vqw…ʿo΅HVU}!nՕ{?]r_:tGR7=Cᛎ܇E9Vۧj&Nk7Wer|/R&7+^܏75JzWUUj_U҃+3\TIi/FOTVCgror_XH%S~ޤj38[ϽՅ'_x;wP{VxK쨷ND0 !7f^ǿOnJƕ޽QHH"~_֏O{8I\I_$KcG#@sOvl־I5"q/}#[{kWOD8;=K;M3uartptT>7 V:i|wj#q3ۼ?#=ܓɳ܃=,}HB_ ya t 3:3*39%2U{H'ǏOcr+_˓zhHT>+Ӈ\w}M&WUW||IU [[AMgo}KALٝ2 G"j(a hwk|&k'ѝN(ϽV#{>xu]ۏD澿_.tʾHUȠrOv7}Mw͑C=A9 ϙ]܃ʽr>3_]_sIAVӋLwː;2k'wN ~ 1;[Fvr̝SUH)ro"R;O EvrȽrPNr_L}q\<;8rMvx"=b;C{0j'wx#PNK!w$k'wx= ;2];{2N {; N ;INt/NN|_XXP;6wr'lN~ wG;;|`<K;<`kq'wxV-վBpZ++?b䮈=}+l!rflfn\/^t#w-i䮅ܕ `i+y䮃*ܱدጕG;rP;Ñr}}᎛Nظ;ܰNpU\NY,lefNaNeNmOSKk'wXvrEk'w#ϫaxP;W*NvW=ܿAo]?j6yQ{o};}vF#aG{w?IrtQӑ0˓c=]mGMG(_j([W5 =nfZ?:Rܝ(Sv 7 =_9Ԫ=ٻɧ#ʽ63:j<xU{DsGj_^{ ]I||I5 C$k__{uSyڽF#aoGBkN#_jRk_iX;Cbm}*vS; hi_%wcai7맫x;0]pB[W վ*;ؽDa&;ཏ\?qkk_]vrGmFk_.;2mnb--N>p~us`1rW vI,Z;*}nvrG1'b'wU|=INPG\?j/]r9 žr~:վ,F~0n?tOh1rhE[=-;Rz~B+!w'jY7ca\ʸ$  [g4UۭUBt [f2v 맱1rX뫶M#(=GX'0Vcbawٺ7G[힛FUz5m}esJϑ {F4HKϑ$C db\z4\K7KQl֥bQzky]Q9r7~O.P' r7vE(]gr7}S]?Mn[>-5޺'VkH&K IS3_CIB JeJ'Ƚ^|q^>rJkkSzŗ֭Sze^7y^+*٢7-#7J'rȽ҅ƹ~AL.uJ'ri>r^t"OEUA䥥PgF^T:Ns&#/(0JyQD5m#DU$}fNA2Y} |F?D4|Ӟ B].>8YAz YA6j#3&m6+۽{W$m82IrtHc-sOtC6Ͻcfiѭ<k`fVI]ʑ;|e4w#9Frg30w"չ28f;X6Ӆ}gfv{f<DPܡCr"E;!w(BPܡCr"E$r/p7rthi-Mt%dtܫ [m ;&@F[UC(BPܡCr"E˟}xS $_,uoLΧY}]v u3W$z?MWޚh[mf6$)<ӿ9$6zs DUa^~=57GUISPHl>XyGsԖs/|Ks$sZ{_|2Jg{2ng"IGo>$9g`mfz|)sAi -Asl6$Ƚ2v ܷ6}{ou{ӵ={ݳS:D^'WԻwY`](z[WsEB)~-^ўu5E!?<@@@;!w(BPܡCr"E;!wq빗>q`0 K4I~`sa?L"wӆ^n0tWsǂ nX1?os:slvl#w~͗a O?Iw~~A~nl^wICY;_ڍend~7z'Qr>z1݌]nB=O.DWkb w3ە0t߶yO?^|:6Ԁen-'/dGSL94`;2/s]Y9yz kO^fJ,s_XO7]ϥ{gfOc0ܫL?{^輺zɓ,J 8yc׳ϼQܻGBM:};!w(BPܡCr"E;!w(BPܡCr"E;ˊ &jVWIENDB`brms/man/figures/stanlogo.png0000644000175000017500000003745413271032273016137 0ustar nileshnileshPNG  IHDRwx+sBIT|d pHYs&:4tEXtSoftwarewww.inkscape.org< IDATxw|ՙsf$w+$` ؘbQCB ے'7B6R6uq6e7M ل$ ؒm Л$W43+WY{nyޯ/lIw >̙31 B2J #7¡# #a@H P U] ]tuW~V-nUt+ˊ@l sy#/f TILxC&f~I&`= PX]&b.{gʘɋE 邞0t hrh=OuO\Κ;gnnՓ zt2L¾xYIqAsb?l_3bw끳1s+WAŮmZ􇕻OA_LӀsxH`9pO_Can5 j.͠gڪ' UX;i\}2Ə A|g2xEnE٬Z;),V%sNLbALpCfX3j8w5O+~WϪg}1~X%L]PSyL1|/cʽ atC=؟{9ROLl;-!/aKH> `<` 4u-7%ʽNiܻ ;)x+֑|1c^"Qs.ȇ} hLOSq#cʽ-p+5o P;)7Ŵ0o܋|F dS |1J7(`-Nczʽ,a؈#~ܔgw3VE`ܗyBwS o0{V,sQ?|}1K"{/Y.+q5Jy9NZx "j9UX\oӶwa^2[xmoG!F@ǘ,٤׆2O2X{Lã)A¿6ҲwrdgK?%F#c]JF>;H9rϓJ?#ti;/evyʁ{4Qs%AFb_ .YB*2wc K ^;Kri*oC}1@J;-ߙ 0=Q=S8NRJܳZRGӠ_.[|s~5wS JBja킾 ˘ʎ7՞6rfjߣOASdb1E 8y)PF҄we߁ʑ{-aї1hnY@ʽG1a8wc Jл,Exq@f_VsaLy%p",CþYTFnwc r =U[(H_,N?+LpleK鲑;ɕt\/;}g1&V.NpTi/}2W徘 z+YɒdFɒzd˽ ^ r,C<{hyt$CʶR}&{)R{)-`ʲQ} VĘUnw*{9+EԾW¦mDe_鑲*&j&` J5Kgw Sʦܛ -=;r\Ȕ(&j?s^KY;)M%_¿aʚMޕ )|wSvv 9A;[Y;)?%9r^#ࣾ@,]NߙLy+ro?@;)i1"ДܴLfnrdP%Xs(%5roS5sJE2^n1Ţdʽ+\O 7oV.܂9/E)*%Q~ <5}" 9~wc˽F)&̞*"ܔ.%AQd mĉ_1( $W69I1ٗ۩{AP!Ug[S2r_ȸG,003.;1&rA5 Z[hL}15O89}4 5U|'1GNK}؍)/9-SypZ.a0B"Fq xN\t뮐z˘ÑY{sO;hnLƏ Vӕ|bYモc= [܊"&p 4a/71 b؍)5xSLSXHwyܛȋ@U6kЖ&u-y4,er$S''L&o+Hl;#wV7؍g9;U0i S#'!> |*[6rof\<?dϫevA)TiB2k$~*?+/z1ٷqnT 2 =϶vO][V-e쨈j`@6gzF3 9zk|g1X)d"]8&B8ɄY=&(Vy ؍f L z2ȉ'v]qx pe9Xhg7f?ׁ͋la\v(o#y/Vy'r{)w9$`*N2S+'r4/2zYҟ+H35O`F5&sSZjXOG+i2_#b3e­L<"E<]$EO9`-x]Eh]L^o ˜@j'(LKΡSA|BKĻ1sKЇzg;}12ۀ'+:ݡ')LVuG`j=ˋɻ$-T pCNQS]T[?^}'(7&(-3K"_ie&?1G"'55'_{DVA+)\Z]U]yBJ ~ړq rES>lV|*=NHftɚ*7.zX=ZD8Vlk> Cꩂ:8;) NՇ U4p{a۽~\n68Ȕ%Nu]#1\;y͵ԥb*SaG~ȅq{&kuϋLo88d3ɂW}M\Vy߳bYc# z:l8͘Cp!ɥa]!kLOll}(UK86">8]f88"3zOMqpn˽R&IoRHe5h!JR&W=[3߹ɂeF6m;t;rBA$sh 9pTzZÖωHW 6hJKcC}ae%V55ss 3dގw)u̦.%YOd?P-vxΑmph66*7H߬ Z|qȟ&jM4y"i;wBڣ8`&pv1HN͚nF\2Jpe|TQ{<{X87A'eLq.Kn޴ V!e9(3Ag>ksw$g*6Le2|#m#W.5g8$ru9c~;D?WթzUvB$:̍?}ș 5Cs@,!zKqtu;ZE.>v#azblbɳCh=<#?QB + \1>}`;U]oorF&VˠԞ -L˸JTo9vO >ҮɻSW\8G!` \'.慃س}ޠQc;Bڈ.$S#!=g&`TOjuW>fX|a_uLz7:.ΊǎQ#ԖBm)}LxE< zZ>s~c撷"8o8* 83d1Y9z6}0SP"~c_N.ʘqQ`+yc"!5sorדRږ_]~q+UAѱcZ2Jޘ~C'*3 88jB{*zx`.ΑmR2HLX'u{~ V<+EGhn%(/c␩Q@13ݞLF>zn7=η3zsnv\]ld+$!p|r|ƌ) `o7E\½ 4(dBߚ81.wE&o)cB-scae~*m[Dy0ˆcc 6ecʀ1j2Vd]wRuOJݛ\2WrK'Ȉq(({p)aw 2&ϡe{j{ڵo5 q k*:88ҾMQdr,%Ikw?t^n+a vQ ##jqH͛1]a(5M+ { >w s8`R؍v;Miџ*;9m]<FK<-ܘ;YȸG'dҗ]bJcj0(և<~d^ҒyKAĈD8&maSƅ [=/Vw]Edt8P+wpVg :*Xś%_R, eN r/E\Qh?5%V0Q|RÉMQゎ .`x,XVfˎ"i)PF694/\|YivT8W]sA !S P Δ=ӵ<qVgtӣЖ^[nHžw sU*483p^N{8HIKvb'^q / !τ123  A6;W|1sd831]Ɍ(dXsx;*:Xk-EB`&;*T8. &R67ފNBlRxw]Bf)qzgA Agĺx$"R P`j”b^ bۍ-f?/u}_#e{9%dsed 'D!6Ɓ-4X'>;͡WrJ=mSGM.aetM" wWtp~Gm%MQ, Ч|1:y]5U8!VE'ɲ jS3f(mABKK솗25@:*f_TbD`<@Eiq Ͷ /;hnQLAT8Mԧ+9% 9v,§"kX ]7)V\QACΐV%o(:mGҢ E 6r76H)qȔ^vѝ0IDAT .Rgv|G1r+Y0~RLҢuYԊb&{jTbRccbeKX^%0fdaqJne2WgD&Hhu1ĊH)ʃN.IWlrU>ØBLNx9Hhski+1Q cN陂tM,Ua^S(뜲&iu VE!+` zZ=K!Qg2JJu,]ɬt%ӣБV޼:{K$gO7Q 0<ɒK,c=|$OБb\bw5zHUgy]3!P;jbv&$]2g9.x޿٧yr7%Axqe%%촢Ϫ];dL 0:qNu+Niu-Ab ^kSiyv4N0"IqJ]f+M _ ҜzD^3S v42nEct>Bb}P˘0L9J.(d-{wn*\PצnBNBwc|쒁*^nh6Ӣ.aUb6gJQc)/Joc{ bd#&2,,FnØči¿Jճvd2x^GFgeLQ;岢 O.TOeŖ?xL61#j‚8Si Zu..7$2T;v8 A61+E湱oHѐ(Ű[q3Dݦ;*~ R=htܝ"ywci 'D'D] fK(7!$M+_QJ.B7D9cLu.fe&(i)xaz-)TN8g[zWS`|03EcΐE2ms4$Ċ!]lݘz%J.8,إ'U~^y>;C.9Id9]*U\@J=Az[N"`D9C}>ہrsHU 'DJ蘒&?͵ w}KNyc{cU>{97d71$1cgBbuM Wo|mlcYy `rE c鋁*{.cyCƷw}c7L~h7ЃuM> z\"cay*;wHJwmof@%h M_h]7ݘ4> :*CdItŞ+P[ \ϒhNA_AM85Q&jZKk]a+g olߴP_LjGi0onɴ 4e-1&%uTpNg\x Ck+vE?O;LCoN[~2q800"sT"EaCǺ!ȦdAOG從Q< `S)*!prrtH*]RqM?=@9џ]\Ý@0!*Q1:ݛb,2 Y =|u/'&E>Nk]#-}Cw;w!Yzp=MAy8O1S4'Oy8)L߭urG]flnLaFZ>| =-v` 1&T`A@2bM[0Y 7/CAz:cLN$@Sۥ=6S}"bh7/c1~%W+ <+I "z~cX k1/z^ neq+3^(6Tŋ޼3_cCPޓbpDƊ`}|zy0ч17t~-ٖC E_^rn}}1[OhW='3ۚyBo19w_ 4rPGZ=?>Rws֭Դ>z_ OY+D3V8t`}}u:kzc \=`bm_YQ{AgJ_D2vTDq)#i&%[RWǸ )Y ;#YM X<Ê|2"_OB*v +B"ءs{"G3ۂڲHc*l<;̡dls=AۋYK{cJD$$pˌ@Յ\ U؞C[/#0_cSYp pͪuMԾ'KlHS6*̇y_¦T Ɓ2rv4oBЛҶ<|3ɍ}g1;V5S}"1)iл/f߁jW`,WO뙽yRw6rHo ,1$ p.Re߁Vj @;9l;dyǰ.ެ7r@5QȧJy!@yEy\cr(^'pk#k}ʆLJrC?pnVG%dqLYwlzԷZqs@coLYwl[L }حԋm Łs2r̕_\Ø hSi!Sx;Dq_s:mL!ɃaZ)yw ڜ.߁r)˄K}?h=P;ɪ6\ >nstqk^Ϩ?^ÝO!$ﳟ$R@Yyrk?>?Քd K}?S4_ne[@;-FZs~g.4QspIg*;,ͮq@0p* odA@8SΖB>NpwֳY߁e)+#җ]^A६DDNfjyN㝂> !s=XHC\;CFZ)_kgvzmGeB5^S.s@13tu;9M959.p @ܗd` \͡\\Oy>gپ3%)NEt$ֳv@LxE%3L(H{}޼]P[@>od2S-LqoF \Z:@-M):?.7Qn|Vpp#>3=AV*:T%(:[tY@l_g2-GĖy!w[PM30Uq 0))&d[@Q |+w2g"] ^;!H) \( b7י,R}gDR;sdLѻqnK4!""A.dJZ;i":R80w".Kp*S9 Id&91QXƔ0J|,AR@O(,sݻH[.oo#zp>0^2@}o; y{H];Or)d΂|AO@މr@[<~20SLY v(ٴ\;Au[9@c:oȧhSD/{.p"h=ɩn8Aꀩsiw(=яU) /)Kۮ|?0iBBtVW}yNeܱBw{6v5f AuJR'TcKam+QlȻOf98vM|g+~ZNƟ )rt{j1EqWLӺcKymS^Νͪud{7v/?k9rcJIK@xlV$ܻ۬dZq)r}VN{e]pϡe$PN}m+Ӏscf% Xw\)r_DI7KɌk}g2xd@Rc_Qs N#B|lZ7kw|hdwŸ|g1xs_;Y[e2-R&WFo.W^_9ޭvoP63ƔE!o+]av&Y-3crGJG[VʲwkKW)ƔLė/E^!诀c]};Oe_ >! )vil~A|rLxyQY;H!(u=QOۚv4b-Czk2imT+czJ{1\B+9&jjLڮiY;H!rBj:t0wc>^ppZPٴL7.ieY|Y1{4gXwF=DA Ř2+GiyL7{g+fwcʉB$idrYMB=kS.Jd+ޱ{d6 `<Ɣ]|Ra1G'}ifbLy( yl<;Hi~Cˊ!wcJXܳd5 ;1E5z FYHCh?i'[gs 3Z4Y)pwXrR1ǘ +|g.ľÔ"+~Y) H>lLs=O~ A?_Oo})VyC;IY=jLإ*:b\X{̸Z} ]؟)iꕾ+R}f(:Y.}TOF=S&)7&M['>L/^YTB>%9aK$?gNa{Y!~8wc' I;1[|1r/PVm}s-cd^S;9gv+m89{[1[BywO<۫^~y4vq>>̮reoh3q{6s +2邼O@<&o^+zs#m 4z >]]r們w@TUsKo D7~1ʝ{jv8wu nZִˡjWtSw.$qoX

Vf>?+}piܱ++G0YS 4^~әnD r{gC8F05M`U~k'yZ&>00<)Ky#bڴkKup\!S9 Hq;<,VfV14wC8y +Vp{.fzUU ޠ*X|X5j9W/ׯyAEy qK鱽oA )9 3"2p_}x}qOq$sCuO+o}LJNDÖ+w'F CdFg98ɶ)fAy6K|Yk++G 7*=`GPy7/z\lerj5*0MHLf|gˣx^yOͦe 9pQEeˍ#N ԝae4`F,\|Y[jd (0O1.`+dUׯW%z<{5wzΗs6lS]os$7^%LI4!@&&1F(:BHō t3f 3c}f]yJX9Q4Gn@~NERCI;D8'WݴsByK)j0~D%P! d޹EQpm,Fi*RڂX U`$j2G1l?Πɡ"^@p_zNƙa$FF:u5ȏ2IENDB`brms/man/figures/README-plot-1.png0000644000175000017500000003363314111751667016364 0ustar nileshnileshPNG  IHDRX P"PLTE:f:f:[ffK9lHqHqHq333::::::::::f:::ff:f:f::HHHHHHHHHHqHHHqHqHHHMMMMMnMMMnnMnMnMdffff:f::f:ffffffffffnMMnMnnMnnMnnnnnnnnnqqHqHHqHqqqqqqqMMMnMnMnnnȎ:f:fې۶HHHHqqHqqݗnMnnnMnȫff::f۶qqHqȎMȎnȫnȫȎې:ېf۶f۶۶ݗHݗqݺqݺݗݺn䫎ȎȫfqȎې۶ݗݺ1 pHYs&? IDATxݏG}#g5! EI 4<7i!!XFvK[%4H&Ҙ@%sOTIݙ5;3;;=ϳ?n}ڠH4٘{ !C" pD(Q;$wHD!C" pD(Q;$wHD!C" pD(Q;$wHD ^{!O0,?)TOl9'o_aXm"T Jy}?&l_dOl.`SJp!&6}Kx%l*[y.[?⯳\\ҦaxG~ o*3?ΚYmMe+1ثFT;/j&[yǝ_rS l%u&j7U:]֦a=+^!p.1PGEl+10Xn7ļ~]=,To.jSJ00o=}I2̦W#l -ޓ[yƝ_`vBXeضDCpD(Q.^k)dt&u,w<Q;$wHD!C"1>̽/~Kb7lX9C,{g^ dr/{,}iރfڙbBf1pHD}Y6Y&wdgKhȽU/9Gdyeځ$.&:QȽ}I>"p_P!wb6N8r^?܁wо }D{h#0J>EݛZ܇v(wo܅̲kf&rQ^D})'Bw#wγ$7{mqOoM{r^ٲkz ﮹KpWܛ{sGK>jҩ]0UvT{##e/Lq9@j^v}eJF']]F=i%NjF(}/XGs_?UJon.Qz}N9Gػn^ B($fwX=>R9?鱣 ܫRvg}Z SWCimj5i6x^+AQٔ{8w٢2opWEe 0lVs_?_ιИ}I,CRVv7wϹ'.wOW=}lr's7<~ݐ>;{)]~TuV7Vyžh{Fts0Ι:93&ڃo;厵ܑwB|Z$"Έ4z>;?vÝԹ(gxIDǝn9܃܉{^=sg9b3$}:n;f޸ qNdQ7w޻N1ssqA qIGp'Yp{HGI{w W/8( A{5D׹;qG&܉;bL,Z19oGq'M'} k8ゃxnD=QpGZqzsϢ 8!]qƝIΝ>Wr{ ;ΕQ=+;1厁 amw=wN$ܑ;NJDn+ٜi\aqA//wĹ;Gp=n^Uŝ7d1Nu w ܧ`V=.8(J;nrGwRN2Č;#9wO:ǘ+ z\p NZI;Ą{d'bcn7õ{3$sjqi =1厚qΝHn!cٜL?2L+#~L'w⎚ p3TΝw";7E͸cw\95 7'* pgQ"rwk>]F HƝfQ sGry2R{ջ;ɸsbgy"Iq'"w̻ w{=" wRq'J[ y/7js')wNҸXw\Ȃ{Rp D=:wDH{ KEwP=)4YB>vGuI+2᢮nsg_ܯ;Wܱo^mCO=Œ=D˝}s+|i.rgWUr'MXɝCyMwɄʽq-`h*p'w"p'2XNdq(ѫ]#+ëܑ;Vq'-${>;); ;oaX6wRH;s}>ܑ;;_\xINNQq;~`w wv队vZzwϝD]xf|bM{ܱ;*jIY4"q?<}-wԗ{u$'{X^[&4܉; Ak6/;r'r]l^}-}}^ ћO=^JsnE;U?qUj`}p d 0B;񗻃\j[&T=w@Ͻjg/n#]VJEsNEi⎁Xw#νhjtsGyk p&]l˹Eu܅ ⎳ë"w jX%11NDž@'Cn;vD{ܱ);|ߢ{"n͝,;䎀8i.іs3p'5=0pGéލ7wT}+;)wU߃;2^]pgj˄]YNEx4w܇Ɲc{/P{(;6#9D ;eY9w ܇%@rr˶w ܇)0;厕iʛ8*[sG%w\{pG/}HPvn;oQ؈;nrG}HkzF;ƽ'%w 9[,p*lF;u=isO2cn=zqGw{ջ{ҟ;rrd&rG}{ pgcw\rj^4;ͽ;6i(N3I0ʝ=mvܳ4^ܩ;w)wZqGw3'F*qpo-;˿^9Q׹ /ìN;Ep?ϘʞV3&>|]Cv"ʸ35ŝ$;3<;ͻazrqW0Q[?*bƝfm wNݏL{KiF%wl{5)n,w{s{QpmTNܩ=).7w>Ōg~q:n<ܑ{b$W:sqGp}J3Vﲽmݮ*s-TNETzrG}p]|$>Qեs?<{`{KH{BqROM^୳>3 w-wN9wZr d7Һ\}S=to #/w*ryS]+Dw gb5e{K=NT˝Mg.|wL}[bŴ%yIEkŝz=J<ܫ%j`cߩ8<%wzS ' la"ޝ~i;{/Nܩw^}v䎺#;Uqmwj;q/f&κj 0p.;-trpG5b̡=4I7wɝ i;riŝ*Sn1;m><wu;wZqf8sE9T=_fԿiԸ8.=w-  <˝w#l}^qJӁ&k)pw2>Ef,wj=|0fĘWrI{- t8wa >:3sL)pwY{ZJSIL>s܁{-MH7 xϹϪ p]Y3ɉm2w fv~s/ފUYe?^5Snk_ޥ`c2˝ea܋po͙3x4x=Jc<q=lx̽S_y&r?{}V;ɣƓל{+s7z4xݻ] @Nvw>"5r{,FrZ,~ybnh Jg>"gh rz#wgQ=aܧޣi);ȹ7޶eX 62qb{O6G}n=wp/ߕ;{_Lovll*oWu3YM5T{QptՖɒr^NM:пeEng;/8z |&>q@ޣerֺu^Mr&~qr=Zuxθ腡h;;p>'6 O~{Xo!=x= rOt`2y~R}U д~xi,SU'˝ߪ. wdfz_^ k4G#ǻVK;P$4vy^.#N]:v܍[Ժ3p44GujYb8gʇ`Oј$Ss?,Yk^}¶{$i4O5 -ӱXUV+߃>?lo}R~Ȇ[K/u\^*634Nb4 ʪe˫tI>-.Q uNȝ}氮]^H=ʵgIkܿ^J#sGX/Պ4 5twH:S+)R㾣8UT:3/ڹzzo.in]tvOt1rs9|]ӢsGXEwWY.JF^e\@W Ȇ[rk&~nO0״~"ݛ~w;_h? ћO<٣\>)iθ+KI="iHAE6:˽RA EbMs"ξj;WEsfZEJ*ҡ֩ q;$wHD!-!  ߣ!C" pD(Q;$wHD!C" pD(Q;$wHD!C" p,$Ƒ_ p,#/eދC[xw5;dy!p,!w}jwȢrjwH4Ikwݾ*pxj;] #_ #;p,*/e(p,)o~!ɭe;d)}As C[7N%JpC" pD(Q;$wHD!C" pD(Q;$wHD!C" p,$xfcȇw ^{!ϲa~m Olj7;mfg_XE;f5ҷd? ,y(K,g6},Qx`Ko*fUD`}dPG~P8xknۏon"NUQY:J_9ѨvnrS]cOƫiK+}[3,' kb]c䙍/#"hUo'WNKn{ϲmݛl;qfӼ#;2q1I:WζțsZw}Y/MesccO7սg5˛Ɇ5DzOjGٗu٫okfe{I Hh#s/x3G/޿ P.bϽX)soi/Ͻ`*si/Ͻln+wS&#L~m]yg/a(/kG֭d?{ɣss/x|ùϟ6a/{ȌKg<2)&Lez㣸oo{WL]7>tk3LvnNzw M6˟cSulң-Dp'Hʿ^j{h_^s&/k{f#L~ڗW?ֿcl?z|r{Y\?juG4}/z𦌍yQEIZ&P/{'\)&=H@¼wMʽ!&n>z'1I̸/;pZdmIGpC w2_n {A~Gp"w̒ v+' IDATWs1p= θnawqG}3dGuIށ}v߼(g}e:WU5iIDݼjWesG}3TSw~gs*}Dw~C w܇gϜ?^ %;e½f> fv =w.UǍK76w#}#>:C{!=;]]=3F{`OWBI% b*uX{&4wg#(^{H1+!j8 |J}`F܃nNjL`Gi|w{VZͫU7w,r+2r* S͹3eʈܓƝq/ww#Xr7;RqO{ ܭiLI'+޺ wpڗϝ4wZ1r_w#M pځ;;˝wXܗ=ɹc{ha[̹?;> VG=1u ᎋ@ A}r'r?w\qG# wKڣNTqOXi/;s'"w͝v'Zli;JP7w厲$I;!1=|dwńs'cνfM% G=) ܉!w➁Oׄ;Iwk-%xNؔ;;Ӆ4cF5w6[NIlj;)wE8㎸c$$㎀w!qgΤI!U1r'SqO~c{t=tc#w׸erהy3Gw4)w"N lr$PJBbYcwq'ø~m>p*w7 wq ~hܳa5=wCN1sw2;g}tqک;r' 8 $1p?lr_BNDwpgE #s8~R;о$Duܱ;ir'5{8b,Ը&9WY.k|g;pGIƝ}t>s!.hɝ ;&GSoJo/>M\h;wb;;' ]~Tu%4q=h Hΰ!RNrܡQwW!qϵyɍqgzqǹ_Rir'ܱ{i9aTqٻ;69w>np'=I~w$=nr'u]aWs'|O^M+mrgݔ'-Ɲ |<3y__Yhyx𾺯`tYpO\rGGB+ϧ/Q܉;1I,ϓ\Uc?*܇{B܉{ }෹?+G6 ˸u^C^y> ~^=׹{vb0"5U%pwXlvswW+55;Fv rS]^N{8wwޖɇjvU;c dwTN8;)ZcFGU]q' :ꜮwR^r.=X=.kINOÝԹ#sŌTNQk|gjˤ]?iH5w'zL;npoۖqmP7Mb{W7n-xv|RqΒ;pcna;;gf,Ss/2/Vks/u9掁8wX^ȝQm|3]6l wj?;{zso&klL˸1p }.I_ pWEʝr'ָ 묌']H1ɝԹc>0]GyK[l㎀AI|t2iO}w":np/`dM_Eobޏ;)#ܑ;wu܅qT=aH:Ȋ;*aW<%w3[{sǤ|_q/KiWP ܵ ;npOzq΢;ȄC4c wAH=GOWwߋ\eUn Ї;;2%wpd]{6pwN܅B31!rT{xG4QVJp)w\p}ƙ{p{'wN;1q{q;3ErG#>nL=,#9wNq=)c;SszS wsGrŝTܓ..{Uܓ{" cS'k5NJYg8ITܩ;r4N{WwcI{]lsG<3p9zrDϝf!N+4';npON ܓ;Lq{7w*8Id1pY]Q wZqC;ι{qOܱ+L pϽ;lF^´ĝvq ;qG!;ܓ;`߼Xr]NL4s;RrrZzw?\C%3V;?ȔPT׹Ӏ7cQ{(޻{~v rܑ6Ӝ;bi=r]}}e:%j+1EɝN;S>;ʸ1iq}Nlޛ,3~{Jmq9wN3TN3|l{:sm(\FYY#V^ irGw*rOi;ɝݰ73;7[$x.SQ!ΝS#4N,3k6V0/Vɬ.ឨӒ;q و5{q#fm]rڴ/Ь9w4S)wL˝w̯}=W]_srF/,Lջ~x\w۽Ny+;N͸ntqNn|>yIZswh̝ń;͋p1BD]ڽ~pJ1w=S ,\lRr-I*<{7V=zɽڀ*SKɹ~ܭ'WV[goƝZ=xv>J;UszW| Ғ;swjʝpuY۹͝JS;˽ٓݛ B7ܫg?󯄂;s;iz=9; ' IS}>;mS{5 >0^6:|epRvyw_-[3-y{xݳwr vϛ3V#YួRq&>p[w+^Wg'm+wވ;roN]Q͙36+woms抻zB*>ffjz#2;] Sc39rr6g}ά}m^.)ՙڽg㮟5[;}o]ீ:W6g\poMd{xSVjMdD}n{]ٸP|#2#- p pN;~Q{gBwyݿwi&ܻvw>"prǾy#2{ ܃w>"sP;i{we&v{Gdbjwڀ__YNU`G}* pWfRGC~yxo}|'z8{^r>"r?vwO]?UJon:}d0pɸ;k}m>V^m{HE`ǎwj]7kNKR~}eM2;3t<}Ds_L^ľw^+zRs_v젂Ϲ};"CS bۻ9q!w2θ]bŞ'Lל2󵞙4Omz>&8=2stWӺ)W>"W?5Q1st iž6on]~Tuu/ݣ74o hW^QPsC. oѭL5kg;/Vkc]y;6%AyA Vhʼ p.8i`|4o؏izt2"V&9ڗ~ `,X"V&b1n(nѾ<8 d1Ǹߝ%G,whWG Q 94OdCg\w=ݱ_,8e-QuI^/S#Q Ȇι;3׬?Ť G~"}qFw4).4^LZe,f:f5i%Wǒu˚qW8<2{E"7ϩM6tuw4) `>ɪ-3f1iV 7u G~j;WEsfZE&:ߺ3"!C" pD(Q;$wHD!C" pD(Q;$wHDWn3"IENDB`brms/man/figures/brms.png0000644000175000017500000002736313255206242015253 0ustar nileshnileshPNG  IHDRaHbKGD pHYsnu>tIMEjv IDATxwxTU?NM}zbE,n-6T@PiN4A+ 1&3 $|8֭#\ڈ6brssݚz2^{5VZ`x'0`6ͭyJ\\&I-Jq:_g}ɓq8+dH)"..I& 쳲322rd2w1sL=z [na4\J^'&&HAh`75F#,_>O?͕W^n7X,}vtii)Nlٲf;n=Xb͓`h`k`7PEEYYYXV?./ُ˙3gfUU>8! bhLqYV())qa2ؽ{7g-~>fGy?O V)%8´юBP]]3}Xd nBHNNw\*++uz6mĂ s; s+BCTTTw8N'YYYh|&O޽{:t:sꫯFQ!)))2=C͉'B( /w"¬YKܚ'Trrr;%%%a&˗/'񪫮bĉtmBbbb0 ؾ,Jzz:555nt[{l6&$$ޮv vmYVEEb&L={ho ((I&qmQ]]FOLL$88X.((UUHz'Oj*^v:>sx4h[ ?~ܭ?Z?g'YUW_}5O?4nMlllym6[NF]v1yd2Lfԩ؊.c}lA^^'Npl6… ORll,&L`̘1l UU'66֧o[JIII ngP]]o5=w̛7d!!!$$${Ч ##cl6aRSS)++)Yh&ɭ s>vMM n*}v-Zapo?w}7<RצL{tRXXH~~[u:,Z 6hBBBXx1?RʺXowz-EEE!PEzjvFes!ݻwgŊzL SN^:+**ɡcY֚5kX|ץ73{EQ$66^n';;RNرcL6kԵz=3gkh4`*BRRAAA^W1 ֛2e ~)Z%[(,,ŋ3p@@Ν"=M...&??6T/kM 'K.&`WWWEUUUkk.M֡ެ{w`L)e]zl[L0[lUUIIӑσ>Ç5z\~~~<ӌ=ڣyHNNnV;77b]z=̙37LY&wc=СCݚ'RJB.++#33֎|̘1C5|p&MDbb؂hZn1˿^<,Kktm7oft`0pq}YvhƍwPmظIm6UUc4PSZZ[of餁)S4(QjUϞ=5k}qkjJpp0 n+˪oܸgyZJ@Z!z׏lZЧ5tP.\بmABBBۃmZʢc^7|Cjj*GiXG`<%J o8gg4[ZfҤI fp8(((-L,Y&IkF!잡^KD0xBh5S53fpeՙ#Ϗ3^`8qcYVٱb V\٦7Ur>u@$y^|E=VQ]xwYlm~ӮB07d}%]:TNFV\q̙3???LEQ k`[Vrss=F˼yc&"~!=#*`msժRS2fRJt:3[JInnnʓO>U b`:*~&m(~Z)9Q3IB!!!\}z4OF#;wh46 좢" ܦ !0,XzͻVWlTH)5ݤ*GCV߿?tcUPPn]$;;mYNΜ9s`' VJvS .&y7NfCX :AZ;Ω"]w ڨ}&`}̻ϗH IcIvea%vnIM!v4U!KW#c*a)M2Cw))ˁ>SWCyMؿQs`x. LPا lw{Nh4ן/A4Bp|5.>Òc$&Q[q]Qhq`gN3Azo<)C]k׏  8PBCkpT EP&̂t(Ү5`[F)O/{z A A7^} A:q5 tWN8 턊-NB 5pR%K| 6A9$Oڔ Œ[i+` WH%(Ol(y;ɾ9YA .[AӮ̐X(y/ɖTʎJnx@aj^v0iUˍ<F ) $lreIwmU$p OG} o\8 K+TW}3D(~ !a2ɇ;)[l 8Jp+=כT~s#%$!b %OsPO89w뤎dONoR1u~}9;ԕ7\IEA ̱K>TV ^t6h`h}A槒}ңa/b%R'U {[%z8)ZSs[jU|@u.sW0pBu Uw$eQv9}U=PpA4ݪ# չP1tqUѬR;Im\*~TmSv/VG}:QvMM FlQ0G'$_ެ@3Tףa_T0sk6=Pio~?l 5NoaJBv8qGL%_:yBow_M~.*AǮ"w&J{ՁE*wB'~2.|Np2c=p@%8i` ɰ;Uu*:r6-wZqXIa}; y ë ਂQ٘^ G^t ɾ7b'YJ)jaud{Vw uM^]nޏ^DT~~M0Dpw >vM\B%cPyԤ]ؚWF"dɮL}y*ZĖOcDÆ^"ňB$t*k`wLä+~aA{H)RRQQۈvw{Ȍkuv6FpEP/.&N篓áAۻN) tPjcbn=*H+z\(i3;P-w#*wFUVUmjO U ']-Ĥ3f_}'\1=zW_!noǿ;5k>,\pIII|'H)l~[=B1IsEPw. N#559sзo_f3[lAJɺu5ڍ3bN8wbRs}sNjjjR*G_gРA\{\EbV {{{[n@ݻWj۶mrٲeҝV3fL'|Rw_ nf=z>o '/J9`em|!)/RiF?k2oر2339r eԩ7*y/Ik2|Z=qDY^^.+r(RX.rzg|Ν+_qzzlG<{hdjNL2' vՄ=KWlZ"d:|֤PԩSX,&!CXv-EEERͩk^xfZٹs'[laΝLI[*Ty&f":tHYPW{~% h\.] <<֭[p1XBanM  O] Jn>{5$kVw 8.ҹsg̛7I( +zzc|+yfzMqq17|3vyÇlnsm?]'/PR0tzSRZnQC/TNORj$|̘1kj{z#|6dh47VnҾ? =HIgco]]]_3jU=$8~K QCuSgҀD_`_E}"]c=J~yjN''OjbZ|̊bc8ei&C9ʚ|m6{uA?/n fk/. [j S'Nr[l͆jm[zPKvVg23Z‘̏e2dwuMܵ 3kɺJzLo`<[ H98:\ik6i^^^>?y_}֭c9rmLŎʺ5B|Ym95DQz9kzt ˊ"ft6W$D\]'py`wH>=+4K(QԟSoz/XS6zh<ȱcǸiF_N €P*~^;=bC1[oN@`3Nk/4Ҁ.<?[_ḭY.QN1ң͚7o:?8Csϭ!/tIDAT=))۷cXnw (sVTV=or_;u}Xblj1+-r;v ᇪ,ZO/$~VD1Y_<&MTٳg㏣=zi/FbD;22nPYYY1iutO%@1QڱHiGeԛCAO>$ׯgΝuٳiy~q23iAAA~MO>wJIgWvN12`0p7xkzVtnsn;$$L{6lX_|ArlX a,'z9F^u)**⣏>l6_Z?iƌ!]8?ҷTY,,Kmڴ+2@8PD^|sw1ǒl o0Fպ###;=~nԩ|5@O? H{3j^OVK݈ &pM7ѿ:wLTTT]&Z~~>v7wީ}Kѣ|@#c^nwcoAڙu%k~UUrrrسgPXZj6kpGq ސϖ-[XliC-lHTi"6zfs 㬠*&Rm=<8tw/)I'e"nCezއ2Q@i8?TzN84ۣo 5n$_kwSWȰcɴر^0 Z*{\:l-WP|YqE%z:?zgEEE8׶5k7ntۺLUݙ9s&wGHLLl0#વʪ*//GӞf? .dt:Ν;q?Uuu55h-@׳cΝO?=Mgsmؠ! n]Rrsslno/(Io9svkG ! !11;6ص*(( 77̞=wyG{jUdd$/ݺuhvfRRRN`d^^n1 =z[C&@z)ZEiшb9UZUUU?OFFD;F##G'h4u:ŝs6`jgeey|(o… eչˊ+>֎nc- 6܃ ֞tb!55 .23>ʢvľ}HMMeϞ=ړo j6Ecbb<'*kUSSCZZ^g<|S#F`Μ9( ZTTYmvT]x5k4yKMާK.iӦѵkW0⚵l}eggSRRl4)..geӦM)ijF%&&2an>J%[ZvTUצHKKc}@s=x49Eb4σ]J222܆EtuVOznfƏOddUy֫ ص*((cz`ɒ%[ZzhL6s=ףLBBtv222<&N !aܹSHr5xdz=:u߿+Zuow'!bʔ)?~\#5ydnY$&&֕eyZ)Sx,'..MhV5=j|vtJLLdҥtSN-nw`=Xe֬YlٲE#,e2Xp! Bӹ э%3i`ɡ­m6ٴiK,ѣM^ogL&餵 T;++~<47ϊ+ x_INNa\iEEEE˗k鱍b`zv#K'5L>RRRp???~G,X7|BCC7n~;-2Lo#j=*L&>3z)JJJ:,kTKE+Bdd$ofvʓRRVVFFFGngvE]ļy󈈈p{N||}:?#!!!;qa<ѱ6Tӄ߿]6Y3 4;JJJ=֝ͭ*v0Vo8s~~~$$$ttR v0= 2}, in which case a separate vector for each element of \code{probs} is computed and they are returned in a matrix with \code{length(probs)} rows and one column per observation. \code{loo_predictive_interval} returns a matrix with one row per observation and two columns. \code{loo_predictive_interval(..., prob = p)} is equivalent to \code{loo_predict(..., type = "quantile", probs = c(a, 1-a))} with \code{a = (1 - p)/2}, except it transposes the result and adds informative column names. } \description{ These functions are wrappers around the \code{\link[loo]{E_loo}} function of the \pkg{loo} package. } \examples{ \dontrun{ ## data from help("lm") ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) d <- data.frame( weight = c(ctl, trt), group = gl(2, 10, 20, labels = c("Ctl", "Trt")) ) fit <- brm(weight ~ group, data = d) loo_predictive_interval(fit, prob = 0.8) ## optionally log-weights can be pre-computed and reused psis <- loo::psis(-log_lik(fit), cores = 2) loo_predictive_interval(fit, prob = 0.8, psis_object = psis) loo_predict(fit, type = "var", psis_object = psis) } } brms/man/bayes_factor.brmsfit.Rd0000644000175000017500000000432314010776134016524 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bridgesampling.R \name{bayes_factor.brmsfit} \alias{bayes_factor.brmsfit} \alias{bayes_factor} \title{Bayes Factors from Marginal Likelihoods} \usage{ \method{bayes_factor}{brmsfit}(x1, x2, log = FALSE, ...) } \arguments{ \item{x1}{A \code{brmsfit} object} \item{x2}{Another \code{brmsfit} object based on the same responses.} \item{log}{Report Bayes factors on the log-scale?} \item{...}{Additional arguments passed to \code{\link[brms:bridge_sampler.brmsfit]{bridge_sampler}}.} } \description{ Compute Bayes factors from marginal likelihoods. } \details{ Computing the marginal likelihood requires samples of all variables defined in Stan's \code{parameters} block to be saved. Otherwise \code{bayes_factor} cannot be computed. Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, if you are planning to apply \code{bayes_factor} to your models. The computation of Bayes factors based on bridge sampling requires a lot more posterior samples than usual. A good conservative rule of thumb is perhaps 10-fold more samples (read: the default of 4000 samples may not be enough in many cases). If not enough posterior samples are provided, the bridge sampling algorithm tends to be unstable, leading to considerably different results each time it is run. We thus recommend running \code{bayes_factor} multiple times to check the stability of the results. More details are provided under \code{\link[bridgesampling:bf]{bridgesampling::bayes_factor}}. } \examples{ \dontrun{ # model with the treatment effect fit1 <- brm( count ~ zAge + zBase + Trt, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit1) # model without the treatment effect fit2 <- brm( count ~ zAge + zBase, data = epilepsy, family = negbinomial(), prior = prior(normal(0, 1), class = b), save_all_pars = TRUE ) summary(fit2) # compute the bayes factor bayes_factor(fit1, fit2) } } \seealso{ \code{ \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, \link[brms:post_prob.brmsfit]{post_prob} } } brms/man/is.brmsterms.Rd0000644000175000017500000000057713701270367015060 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsterms.R \name{is.brmsterms} \alias{is.brmsterms} \title{Checks if argument is a \code{brmsterms} object} \usage{ is.brmsterms(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsterms} object } \seealso{ \code{\link[brms:brmsterms]{brmsterms}} } brms/man/add_rstan_model.Rd0000644000175000017500000000150214111751667015537 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-helpers.R \name{add_rstan_model} \alias{add_rstan_model} \title{Add compiled \pkg{rstan} models to \code{brmsfit} objects} \usage{ add_rstan_model(x, overwrite = FALSE) } \arguments{ \item{x}{A \code{brmsfit} object to be updated.} \item{overwrite}{Logical. If \code{TRUE}, overwrite any existing \code{\link[rstan:stanmodel-class]{stanmodel}}. Defaults to \code{FALSE}.} } \value{ A (possibly updated) \code{brmsfit} object. } \description{ Compile a \code{\link[rstan:stanmodel-class]{stanmodel}} and add it to a \code{brmsfit} object. This enables some advanced functionality of \pkg{rstan}, most notably \code{\link[rstan:log_prob]{log_prob}} and friends, to be used with brms models fitted with other Stan backends. } brms/man/is.brmsfit.Rd0000644000175000017500000000050213701270367014474 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brmsfit-class.R \name{is.brmsfit} \alias{is.brmsfit} \title{Checks if argument is a \code{brmsfit} object} \usage{ is.brmsfit(x) } \arguments{ \item{x}{An \R object} } \description{ Checks if argument is a \code{brmsfit} object } brms/man/posterior_samples.brmsfit.Rd0000644000175000017500000000475214111751667017651 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/posterior_samples.R \name{posterior_samples.brmsfit} \alias{posterior_samples.brmsfit} \alias{posterior_samples} \title{(Deprecated) Extract Posterior Samples} \usage{ \method{posterior_samples}{brmsfit}( x, pars = NA, fixed = FALSE, add_chain = FALSE, subset = NULL, as.matrix = FALSE, as.array = FALSE, ... ) posterior_samples(x, pars = NA, ...) } \arguments{ \item{x}{An \code{R} object typically of class \code{brmsfit}} \item{pars}{Names of parameters for which posterior samples should be returned, as given by a character vector or regular expressions. By default, all posterior samples of all parameters are extracted.} \item{fixed}{Indicates whether parameter names should be matched exactly (\code{TRUE}) or treated as regular expressions (\code{FALSE}). Default is \code{FALSE}.} \item{add_chain}{A flag indicating if the returned \code{data.frame} should contain two additional columns. The \code{chain} column indicates the chain in which each sample was generated, the \code{iter} column indicates the iteration number within each chain.} \item{subset}{A numeric vector indicating the rows (i.e., posterior samples) to be returned. If \code{NULL} (the default), all posterior samples are returned.} \item{as.matrix}{Should the output be a \code{matrix} instead of a \code{data.frame}? Defaults to \code{FALSE}.} \item{as.array}{Should the output be an \code{array} instead of a \code{data.frame}? Defaults to \code{FALSE}.} \item{...}{Arguments passed to individual methods (if applicable).} } \value{ A data.frame (matrix or array) containing the posterior samples. } \description{ Extract posterior samples of specified parameters. The \code{posterior_samples} method is deprecated. We recommend using the more modern and consistent \code{\link[brms:draws-brms]{as_draws_*}} extractor functions of the \pkg{posterior} package instead. } \examples{ \dontrun{ fit <- brm(rating ~ treat + period + carry + (1|subject), data = inhaler, family = "cumulative") # extract posterior samples of population-level effects samples1 <- posterior_samples(fit, pars = "^b") head(samples1) # extract posterior samples of group-level standard deviations samples2 <- posterior_samples(fit, pars = "^sd_") head(samples2) } } \seealso{ \code{\link[brms:draws-brms]{as_draws}}, \code{\link[brms:as.data.frame.brmsfit]{as.data.frame}} } brms/man/stanvar.Rd0000644000175000017500000000636314105230573014077 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stanvars.R \name{stanvar} \alias{stanvar} \alias{stanvars} \title{User-defined variables passed to Stan} \usage{ stanvar( x = NULL, name = NULL, scode = NULL, block = "data", position = "start", pll_args = NULL ) } \arguments{ \item{x}{An \R object containing data to be passed to Stan. Only required if \code{block = 'data'} and ignored otherwise.} \item{name}{Optional character string providing the desired variable name of the object in \code{x}. If \code{NULL} (the default) the variable name is directly inferred from \code{x}.} \item{scode}{Line of Stan code to define the variable in Stan language. If \code{block = 'data'}, the Stan code is inferred based on the class of \code{x} by default.} \item{block}{Name of one of Stan's program blocks in which the variable should be defined. Can be \code{'data'}, \code{'tdata'} (transformed data), \code{'parameters'}, \code{'tparameters'} (transformed parameters), \code{'model'}, \code{'likelihood'} (part of the model block where the likelihood is given), \code{'genquant'} (generated quantities) or \code{'functions'}.} \item{position}{Name of the position within the block where the Stan code should be placed. Currently allowed are \code{'start'} (the default) and \code{'end'} of the block.} \item{pll_args}{Optional Stan code to be put into the header of \code{partial_log_lik} functions. This ensures that the variables specified in \code{scode} can be used in the likelihood even when within-chain parallelization is activated via \code{\link{threading}}.} } \value{ An object of class \code{stanvars}. } \description{ Prepare user-defined variables to be passed to one of Stan's program blocks. This is primarily useful for defining more complex priors, for refitting models without recompilation despite changing priors, or for defining custom Stan functions. } \examples{ bprior <- prior(normal(mean_intercept, 10), class = "Intercept") stanvars <- stanvar(5, name = "mean_intercept") make_stancode(count ~ Trt, epilepsy, prior = bprior, stanvars = stanvars) # define a multi-normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + stanvar(diag(2), "V", scode = " matrix[K, K] V;") make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) # define a hierachical prior on the regression coefficients bprior <- set_prior("normal(0, tau)", class = "b") + set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) stanvars <- stanvar(scode = "real tau;", block = "parameters") make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) # ensure that 'tau' is passed to the likelihood of a threaded model # not necessary for this example but may be necessary in other cases stanvars <- stanvar(scode = "real tau;", block = "parameters", pll_args = "real tau") make_stancode(count ~ Trt + zBase, epilepsy, stanvars = stanvars, threads = threading(2)) } brms/man/cor_fixed.Rd0000644000175000017500000000155413701270367014366 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autocor.R \name{cor_fixed} \alias{cor_fixed} \alias{cov_fixed} \title{(Deprecated) Fixed user-defined covariance matrices} \usage{ cor_fixed(V) } \arguments{ \item{V}{Known covariance matrix of the response variable. If a vector is passed, it will be used as diagonal entries (variances) and covariances will be set to zero.} } \value{ An object of class \code{cor_fixed}. } \description{ This function is deprecated. Please see \code{\link{fcor}} for the new syntax. Define a fixed covariance matrix of the response variable for instance to model multivariate effect sizes in meta-analysis. } \examples{ \dontrun{ dat <- data.frame(y = rnorm(3)) V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) fit <- brm(y~1, data = dat, autocor = cor_fixed(V)) } } brms/vignettes/0000755000175000017500000000000014146747050013367 5ustar nileshnileshbrms/vignettes/brms_overview.ltx0000644000175000017500000017607013701270370017014 0ustar nileshnilesh\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Overview of the brms Package} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{\pkg{brms}: An \proglang{R} Package for Bayesian Multilevel Models using \pkg{Stan}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{brms: An R Package for Bayesian Multilevel Models using Stan} %% without formatting \Shorttitle{\pkg{brms}: Bayesian Multilevel Models using Stan} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package implements Bayesian multilevel models in \proglang{R} using the probabilistic programming language \pkg{Stan}. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, binomial, Poisson, survival, response times, ordinal, quantile, zero-inflated, hurdle, and even non-linear models all in a multilevel context. Further modeling options include autocorrelation of the response variable, user defined covariance structures, censored data, as well as meta-analytic standard errors. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared using posterior-predictive checks and leave-one-out cross-validation. If you use \pkg{brms}, please cite this article as published in the Journal of Statistical Software \citep{brms1}. } \Keywords{Bayesian inference, multilevel model, ordinal data, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer a great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow the modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for \proglang{R} \citep{Rcore2015} have been developed to fit MLMs. Possibly the most widely known package in this area is \pkg{lme4} \citep{bates2015}, which uses maximum likelihood or restricted maximum likelihood methods for model fitting. Although alternative Bayesian methods have several advantages over frequentist approaches (e.g., the possibility of explicitly incorporating prior knowledge about parameters into the model), their practical use was limited for a long time because the posterior distributions of more complex models (such as MLMs) could not be found analytically. Markov chain Monte Carlo (MCMC) algorithms allowing to draw random samples from the posterior were not available or too time-consuming. In the last few decades, however, this has changed with the development of new algorithms and the rapid increase of general computing power. Today, several software packages implement these techniques, for instance \pkg{WinBugs} \citep{lunn2000, spiegelhalter2003}, \pkg{OpenBugs} \citep{spiegelhalter2007}, \pkg{JAGS} \citep{plummer2013}, \pkg{MCMCglmm} \citep{hadfield2010} and \pkg{Stan} \citep{stan2017, carpenter2017} to mention only a few. With the exception of the latter, all of these programs are primarily using combinations of Metropolis-Hastings updates \citep{metropolis1953,hastings1970} and Gibbs-sampling \citep{geman1984,gelfand1990}, sometimes also coupled with slice-sampling \citep{damien1999,neal2003}. One of the main problems of these algorithms is their rather slow convergence for high-dimensional models with correlated parameters \citep{neal2011,hoffman2014,gelman2014}. Furthermore, Gibbs-sampling requires priors to be conjugate to the likelihood of parameters in order to work efficiently \citep{gelman2014}, thus reducing the freedom of the researcher in choosing a prior that reflects his or her beliefs. In contrast, \pkg{Stan} implements Hamiltonian Monte Carlo \citep{duane1987, neal2011} and its extension, the No-U-Turn Sampler (NUTS) \citep{hoffman2014}. These algorithms converge much more quickly especially for high-dimensional models regardless of whether the priors are conjugate or not \citep{hoffman2014}. Similar to software packages like \pkg{WinBugs}, \pkg{Stan} comes with its own programming language, allowing for great modeling flexibility (cf., \citeauthor{stanM2017} \citeyear{stanM2017}; \citeauthor{carpenter2017} \citeyear{carpenter2017}). Many researchers may still hesitate to use \pkg{Stan} directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error prone process even for researchers familiar with Bayesian inference. The package \pkg{brms}, presented in this paper, aims at closing this gap (at least for MLMs) allowing the user to benefit from the merits of \pkg{Stan} only by using simple, \pkg{lme4}-like formula syntax. \pkg{brms} supports a wide range of distributions and link functions, allows for multiple grouping factors each with multiple group-level effects, autocorrelation of the response variable, user defined covariance structures, as well as flexible and explicit prior specifications. The purpose of the present article is to provide a general overview of the \pkg{brms} package (version 0.10.0). We begin by explaining the underlying structure of MLMs. Next, the software is introduced in detail using recurrence times of infection in kidney patients \citep{mcgilchrist1991} and ratings of inhaler instructions \citep{ezzet1991} as examples. We end by comparing \pkg{brms} to other \proglang{R} packages implementing MLMs and describe future plans for extending the package. \section{Model description} \label{model} The core of every MLM is the prediction of the response $y$ through the linear combination $\eta$ of predictors transformed by the inverse link function $f$ assuming a certain distribution $D$ for $y$. We write $$y_i \sim D(f(\eta_i), \theta)$$ to stress the dependency on the $i\textsuperscript{th}$ data point. In many \proglang{R} packages, $D$ is also called the `family' and we will use this term in the following. The parameter $\theta$ describes additional family specific parameters that typically do not vary across data points, such as the standard deviation $\sigma$ in normal models or the shape $\alpha$ in Gamma or negative binomial models. The linear predictor can generally be written as $$\eta = \mathbf{X} \beta + \mathbf{Z} u$$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The response $y$ as well as $\mathbf{X}$ and $\mathbf{Z}$ make up the data, whereas $\beta$, $u$, and $\theta$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects. However, we avoid these terms in the present paper following the recommendations of \cite{gelmanMLM2006}, as they are not used unambiguously in the literature. Also, we want to make explicit that $u$ is a model parameter in the same manner as $\beta$ so that uncertainty in its estimates can be naturally evaluated. In fact, this is an important advantage of Bayesian MCMC methods as compared to maximum likelihood approaches, which do not treat $u$ as a parameter, but assume that it is part of the error term instead (cf., \citeauthor{fox2011}, \citeyear{fox2011}). Except for linear models, we do not incorporate an additional error term for every observation by default. If desired, such an error term can always be modeled using a grouping factor with as many levels as observations in the data. \subsection{Prior distributions} \subsubsection{Regression parameters at population-level} In \pkg{brms}, population-level parameters are not restricted to have normal priors. Instead, every parameter can have every one-dimensional prior implemented in \pkg{Stan}, for instance uniform, Cauchy or even Gamma priors. As a negative side effect of this flexibility, correlations between them cannot be modeled as parameters. If desired, point estimates of the correlations can be obtained after sampling has been done. By default, population level parameters have an improper flat prior over the reals. \subsubsection{Regression parameters at group-level} The group-level parameters $u$ are assumed to come from a multivariate normal distribution with mean zero and unknown covariance matrix $\mathbf{\Sigma}$: $$u \sim N(0, \mathbf{\Sigma})$$ As is generally the case, covariances between group-level parameters of different grouping factors are assumed to be zero. This implies that $\mathbf{Z}$ and $u$ can be split up into several matrices $\mathbf{Z_k}$ and parameter vectors $u_k$, where $k$ indexes grouping factors, so that the model can be simplified to $$u_k \sim N(0, \mathbf{\Sigma_k})$$ Usually, but not always, we can also assume group-level parameters associated with different levels (indexed by $j$) of the same grouping factor to be independent leading to $$u_{kj} \sim N(0, \mathbf{V_k})$$ The covariance matrices $\mathbf{V_k}$ are modeled as parameters. In most packages, an Inverse-Wishart distribution is used as a prior for $\mathbf{V_k}$. This is mostly because its conjugacy leads to good properties of Gibbs-Samplers \citep{gelman2014}. However, there are good arguments against the Inverse-Wishart prior \citep{natarajan2000, kass2006}. The NUTS-Sampler implemented in \pkg{Stan} does not require priors to be conjugate. This advantage is utilized in \pkg{brms}: $\mathbf{V_k}$ is parameterized in terms of a correlation matrix $\mathbf{\Omega_k}$ and a vector of standard deviations $\sigma_k$ through $$\mathbf{V_k} = \mathbf{D}(\sigma_k) \mathbf{\Omega_k} \mathbf{D}(\sigma_k)$$ where $\mathbf{D}(\sigma_k)$ denotes the diagonal matrix with diagonal elements $\sigma_k$. Priors are then specified for the parameters on the right hand side of the equation. For $\mathbf{\Omega_k}$, we use the LKJ-Correlation prior with parameter $\zeta > 0$ by \cite{lewandowski2009}\footnote{Internally, the Cholesky factor of the correlation matrix is used, as it is more efficient and numerically stable.}: $$\mathbf{\Omega_k} \sim \mathrm{LKJ}(\zeta)$$ The expected value of the LKJ-prior is the identity matrix (implying correlations of zero) for any positive value of $\zeta$, which can be interpreted like the shape parameter of a symmetric beta distribution \citep{stanM2017}. If $\zeta = 1$ (the default in \pkg{brms}) the density is uniform over correlation matrices of the respective dimension. If $\zeta > 1$, the identity matrix is the mode of the prior, with a sharper peak in the density for larger values of $\zeta$. If $0 < \zeta < 1$ the prior is U-shaped having a trough at the identity matrix, which leads to higher probabilities for non-zero correlations. For every element of $\sigma_k$, any prior can be applied that is defined on the non-negative reals only. As default in \pkg{brms}, we use a half Student-t prior with 3 degrees of freedom. This prior often leads to better convergence of the models than a half Cauchy prior, while still being relatively weakly informative. Sometimes -- for instance when modeling pedigrees -- different levels of the same grouping factor cannot be assumed to be independent. In this case, the covariance matrix of $u_k$ becomes $$\mathbf{\Sigma_k} = \mathbf{V_k} \otimes \mathbf{A_k}$$ where $\mathbf{A_k}$ is the known covariance matrix between levels and $\otimes$ is the Kronecker product. \subsubsection{Family specific parameters} For some families, additional parameters need to be estimated. In the current section, we only name the most important ones. Normal and Student's distributions need the parameter $\sigma$ to account for residual error variance. By default, $\sigma$ has a half Cauchy prior with a scale parameter that depends on the standard deviation of the response variable to remain only weakly informative regardless of response variable's scaling. Furthermore, Student's distributions needs the parameter $\nu$ representing the degrees of freedom. By default, $\nu$ has a wide gamma prior as proposed by \cite{juarez2010}. Gamma, Weibull, and negative binomial distributions need the shape parameter $\alpha$ that also has a wide gamma prior by default. \section{Parameter estimation} The \pkg{brms} package does not fit models itself but uses \pkg{Stan} on the back-end. Accordingly, all samplers implemented in \pkg{Stan} can be used to fit \pkg{brms} models. Currently, these are the static Hamiltonian Monte-Carlo (HMC) Sampler sometimes also referred to as Hybrid Monte-Carlo \citep{neal2011, neal2003, duane1987} and its extension the No-U-Turn Sampler (NUTS) by \cite{hoffman2014}. HMC-like algorithms produce samples that are much less autocorrelated than those of other samplers such as the random-walk Metropolis algorithm \citep{hoffman2014, Creutz1988}. The main drawback of this increased efficiency is the need to calculate the gradient of the log-posterior, which can be automated using algorithmic differentiation \citep{griewank2008} but is still a time-consuming process for more complex models. Thus, using HMC leads to higher quality samples but takes more time per sample than other algorithms typically applied. Another drawback of HMC is the need to pre-specify at least two parameters, which are both critical for the performance of HMC. The NUTS Sampler allows setting these parameters automatically thus eliminating the need for any hand-tuning, while still being at least as efficient as a well tuned HMC \citep{hoffman2014}. For more details on the sampling algorithms applied in \pkg{Stan}, see the \pkg{Stan} user's manual \citep{stanM2017} as well as \cite{hoffman2014}. In addition to the estimation of model parameters, \pkg{brms} allows drawing samples from the posterior predictive distribution as well as from the pointwise log-likelihood. Both can be used to assess model fit. The former allows a comparison between the actual response $y$ and the response $\hat{y}$ predicted by the model. The pointwise log-likelihood can be used, among others, to calculate the widely applicable information criterion (WAIC) proposed by \cite{watanabe2010} and leave-one-out cross-validation (LOO; \citealp{gelfand1992}; \citealp{vehtari2015}; see also \citealp{ionides2008}) both allowing to compare different models applied to the same data (lower WAICs and LOOs indicate better model fit). The WAIC can be viewed as an improvement of the popular deviance information criterion (DIC), which has been criticized by several authors (\citealp{vehtari2015}; \citealp{plummer2008}; \citealp{vanderlinde2005}; see also the discussion at the end of the original DIC paper by \citealp{spiegelhalter2002}) in part because of problems arising from fact that the DIC is only a point estimate. In \pkg{brms}, WAIC and LOO are implemented using the \pkg{loo} package \citep{loo2016} also following the recommendations of \cite{vehtari2015}. \section{Software} \label{software} The \pkg{brms} package provides functions for fitting MLMs using \pkg{Stan} for full Bayesian inference. To install the latest release version of \pkg{brms} from CRAN, type \code{install.packages("brms")} within \proglang{R}. The current developmental version can be downloaded from GitHub via \begin{Sinput} devtools::install_github("paul-buerkner/brms") \end{Sinput} Additionally, a \proglang{C++} compiler is required. This is because \pkg{brms} internally creates \pkg{Stan} code, which is translated to \proglang{C++} and compiled afterwards. The program \pkg{Rtools} \citep{Rtools2015} comes with a \proglang{C++} compiler for Windows\footnote{During the installation process, there is an option to change the system \code{PATH}. Please make sure to check this options, because otherwise \pkg{Rtools} will not be available within \proglang{R}.}. On OS X, one should use \pkg{Xcode} \citep{Xcode2015} from the App Store. To check whether the compiler can be called within \proglang{R}, run \code{system("g++ -v")} when using \pkg{Rtools} or \code{system("clang++ -v")} when using \pkg{Xcode}. If no warning occurs and a few lines of difficult to read system code are printed out, the compiler should work correctly. For more detailed instructions on how to get the compilers running, see the prerequisites section on \url{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}. Models are fitted in \pkg{brms} using the following procedure, which is also summarized in Figure~\ref{flowchart}. First, the user specifies the model using the \code{brm} function in a way typical for most model fitting \proglang{R} functions, that is by defining \code{formula}, \code{data}, and \code{family}, as well as some other optional arguments. Second, this information is processed and the \code{make_stancode} and \code{make_standata} functions are called. The former generates the model code in \pkg{Stan} language and the latter prepares the data for use in \pkg{Stan}. These two are the mandatory parts of every \pkg{Stan} model and without \pkg{brms}, users would have to specify them themselves. Third, \pkg{Stan} code and data as well as additional arguments (such as the number of iterations and chains) are passed to functions of the \pkg{rstan} package (the \proglang{R} interface of \pkg{Stan}; \citeauthor{stan2017}, \citeyear{stan2017}). Fourth, the model is fitted by \pkg{Stan} after translating and compiling it in \proglang{C++}. Fifth, after the model has been fitted and returned by \pkg{rstan}, the fitted model object is post-processed in \pkg{brms} among others by renaming the model parameters to be understood by the user. Sixth, the results can be investigated in \proglang{R} using various methods such as \code{summary}, \code{plot}, or \code{predict} (for a complete list of methods type \code{methods(class = "brmsfit")}). \begin{figure}[ht] \centering \includegraphics[height = 0.4\textheight, keepaspectratio]{flowchart.pdf} \caption{High level description of the model fitting procedure used in \pkg{brms}.} \label{flowchart} \end{figure} \subsection{A worked example} In the following, we use an example about the recurrence time of an infection in kidney patients initially published by \cite{mcgilchrist1991}. The data set consists of 76 entries of 7 variables: \begin{Sinput} R> library("brms") R> data("kidney") R> head(kidney, n = 3) \end{Sinput} \begin{Soutput} time censored patient recur age sex disease 1 8 0 1 1 28 male other 2 23 0 2 1 48 female GN 3 22 0 3 1 32 male other \end{Soutput} Variable \code{time} represents the recurrence time of the infection, \code{censored} indicates if \code{time} is right censored (\code{1}) or not censored (\code{0}), variable \code{patient} is the patient id, and \code{recur} indicates if it is the first or second recurrence in that patient. Finally, variables \code{age}, \code{sex}, and \code{disease} make up the predictors. \subsection[Fitting models with brms]{Fitting models with \pkg{brms}} The core of the \pkg{brms} package is the \code{brm} function and we will explain its argument structure using the example above. Suppose we want to predict the (possibly censored) recurrence time using a log-normal model, in which the intercept as well as the effect of \code{age} is nested within patients. Then, we may use the following code: \begin{Sinput} fit1 <- brm(formula = time | cens(censored) ~ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = c(set_prior("normal(0,5)", class = "b"), set_prior("cauchy(0,2)", class = "sd"), set_prior("lkj(2)", class = "cor")), warmup = 1000, iter = 2000, chains = 4, control = list(adapt_delta = 0.95)) \end{Sinput} \subsection[formula: Information on the response and predictors]{\code{formula}: Information on the response and predictors} Without doubt, \code{formula} is the most complicated argument, as it contains information on the response variable as well as on predictors at different levels of the model. Everything before the $\sim$ sign relates to the response part of \code{formula}. In the usual and most simple case, this is just one variable name (e.g., \code{time}). However, to incorporate additional information about the response, one can add one or more terms of the form \code{| fun(variable)}. \code{fun} may be one of a few functions defined internally in \pkg{brms} and \code{variable} corresponds to a variable in the data set supplied by the user. In this example, \code{cens} makes up the internal function that handles censored data, and \code{censored} is the variable that contains information on the censoring. Other available functions in this context are \code{weights} and \code{disp} to allow different sorts of weighting, \code{se} to specify known standard errors primarily for meta-analysis, \code{trunc} to define truncation boundaries, \code{trials} for binomial models\footnote{In functions such as \code{glm} or \code{glmer}, the binomial response is typically passed as \code{cbind(success, failure)}. In \pkg{brms}, the equivalent syntax is \code{success | trials(success + failure)}.}, and \code{cat} to specify the number of categories for ordinal models. Everything on the right side of $\sim$ specifies predictors. Here, the syntax exactly matches that of \pkg{lme4}. For both, population-level and group-level terms, the \code{+} is used to separate different effects from each other. Group-level terms are of the form \code{(coefs | group)}, where \code{coefs} contains one or more variables whose effects are assumed to vary with the levels of the grouping factor given in \code{group}. Multiple grouping factors each with multiple group-level coefficients are possible. In the present example, only one group-level term is specified in which \code{1 + age} are the coefficients varying with the grouping factor \code{patient}. This implies that the intercept of the model as well as the effect of age is supposed to vary between patients. By default, group-level coefficients within a grouping factor are assumed to be correlated. Correlations can be set to zero by using the \code{(coefs || group)} syntax\footnote{In contrast to \pkg{lme4}, the \code{||} operator in \pkg{brms} splits up the design matrix computed from \code{coefs} instead of decomposing \code{coefs} in its terms. This implies that columns of the design matrix originating from the same factor are also assumed to be uncorrelated, whereas \pkg{lme4} estimates the correlations in this case. For a way to achieve \pkg{brms}-like behavior with \pkg{lme4}, see the \code{mixed} function of the \pkg{afex} package by \cite{afex2015}.}. Everything on the right side of \code{formula} that is not recognized as part of a group-level term is treated as a population-level effect. In this example, the population-level effects are \code{age}, \code{sex}, and \code{disease}. \subsection[family: Distribution of the response variable]{\code{family}: Distribution of the response variable} Argument \code{family} should usually be a family function, a call to a family function or a character string naming the family. If not otherwise specified, default link functions are applied. \pkg{brms} comes with a large variety of families. Linear and robust linear regression can be performed using the \code{gaussian} or \code{student} family combined with the \code{identity} link. For dichotomous and categorical data, families \code{bernoulli}, \code{binomial}, and \code{categorical} combined with the \code{logit} link, by default, are perfectly suited. Families \code{poisson}, \code{negbinomial}, and \code{geometric} allow for modeling count data. Families \code{lognormal}, \code{Gamma}, \code{exponential}, and \code{weibull} can be used (among others) for survival regression. Ordinal regression can be performed using the families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}. Finally, families \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, and \code{hurdle_gamma} can be used to adequately model excess zeros in the response. In our example, we use \code{family = lognormal()} implying a log-normal ``survival'' model for the response variable \code{time}. \subsection[prior: Prior distributions of model parameters]{\code{prior}: Prior distributions of model parameters} Every population-level effect has its corresponding regression parameter. These parameters are named as \code{b\_}, where \code{} represents the name of the corresponding population-level effect. The default prior is an improper flat prior over the reals. Suppose, for instance, that we want to set a normal prior with mean \code{0} and standard deviation \code{10} on the effect of \code{age} and a Cauchy prior with location \code{1} and scale \code{2} on \code{sexfemale}\footnote{When factors are used as predictors, parameter names will depend on the factor levels. To get an overview of all parameters and parameter classes for which priors can be specified, use function \code{get\_prior}. For the present example, \code{get\_prior(time | cens(censored) $\sim$ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal())} does the desired.}. Then, we may write \begin{Sinput} prior <- c(set_prior("normal(0,10)", class = "b", coef = "age"), set_prior("cauchy(1,2)", class = "b", coef = "sexfemale")) \end{Sinput} To put the same prior (e.g., a normal prior) on all population-level effects at once, we may write as a shortcut \code{set_prior("normal(0,10)", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Note that we could also omit the \code{class} argument for population-level effects, as it is the default class in \code{set_prior}. A special shrinkage prior to be applied on population-level effects is the horseshoe prior \citep{carvalho2009, carvalho2010}. It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The $1$ implies that the Student-$t$ prior of the local shrinkage parameters has 1 degrees of freedom. In \pkg{brms} it is possible to increase the degrees of freedom (which will often improve convergence), although the prior no longer resembles a horseshoe in this case\footnote{This class of priors is often referred to as hierarchical shrinkage family, which contains the original horseshoe prior as a special case.}. For more details see \cite{carvalho2009, carvalho2010}. Each group-level effect of each grouping factor has a standard deviation parameter, which is restricted to be non-negative and, by default, has a half Student-$t$ prior with $3$ degrees of freedom and a scale parameter that is minimally $10$. For non-ordinal models, \pkg{brms} tries to evaluate if the scale is large enough to be considered only weakly informative for the model at hand by comparing it with the standard deviation of the response after applying the link function. If this is not the case, it will increase the scale based on the aforementioned standard deviation\footnote{Changing priors based on the data is not truly Bayesian and might rightly be criticized. However, it helps avoiding the problem of too informative default priors without always forcing users to define their own priors. The latter would also be problematic as not all users can be expected to be well educated Bayesians and reasonable default priors will help them a lot in using Bayesian methods.}. \pkg{Stan} implicitly defines a half Student-$t$ prior by using a Student-$t$ prior on a restricted parameter \citep{stanM2017}. For other reasonable priors on standard deviations see \cite{gelman2006}. In \pkg{brms}, standard deviation parameters are named as \code{sd\_\_} so that \code{sd\_patient\_Intercept} and \code{sd\_patient\_age} are the parameter names in the example. If desired, it is possible to set a different prior on each parameter, but statements such as \code{set_prior("student_t(3,0,5)", class = "sd", group = "patient")} or even \code{set_prior("student_t(3,0,5)", class = "sd")} may also be used and are again faster because of vectorization. If there is more than one group-level effect per grouping factor, correlations between group-level effects are estimated. As mentioned in Section~\ref{model}, the LKJ-Correlation prior with parameter $\zeta > 0$ \citep{lewandowski2009} is used for this purpose. In \pkg{brms}, this prior is abbreviated as \code{"lkj(zeta)"} and correlation matrix parameters are named as \code{cor\_}, (e.g., \code{cor_patient}), so that \code{set_prior("lkj(2)", class = "cor", group = "patient")} is a valid statement. To set the same prior on every correlation matrix in the model, \code{set_prior("lkj(2)", class = "cor")} is also allowed, but does not come with any efficiency increases. Other model parameters such as the residual standard deviation \code{sigma} in normal models or the \code{shape} in Gamma models have their priors defined in the same way, where each of them is treated as having its own parameter class. A complete overview on possible prior distributions is given in the \pkg{Stan} user's manual \citep{stanM2017}. Note that \pkg{brms} does not thoroughly check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \proglang{C++} and return an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} tries to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. \subsection[control Adjusting the sampling behavior of Stan]{\code{control}: Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup samples, and chains, users can control the behavior of the NUTS sampler by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior samples. Whenever you see the warning \code{"There were x divergent transitions after warmup."}, you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be a value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior samples. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior samples. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. \subsection{Analyzing the results} The example model \code{fit1} is fitted using 4 chains, each with 2000 iterations of which the first 1000 are warmup to calibrate the sampler, leading to a total of 4000 posterior samples\footnote{To save time, chains may also run in parallel when using argument \code{cluster}.}. For researchers familiar with Gibbs or Metropolis-Hastings sampling, this number may seem far too small to achieve good convergence and reasonable results, especially for multilevel models. However, as \pkg{brms} utilizes the NUTS sampler \citep{hoffman2014} implemented in \pkg{Stan}, even complex models can often be fitted with not more than a few thousand samples. Of course, every iteration is more computationally intensive and time-consuming than the iterations of other algorithms, but the quality of the samples (i.e., the effective sample size per iteration) is usually higher. After the posterior samples have been computed, the \code{brm} function returns an \proglang{R} object, containing (among others) the fully commented model code in \pkg{Stan} language, the data to fit the model, and the posterior samples themselves. The model code and data for the present example can be extracted through \code{stancode(fit1)} and \code{standata(fit1)} respectively\footnote{Both model code and data may be amended and used to fit new models. That way, \pkg{brms} can also serve as a good starting point in building more complicated models in \pkg{Stan}, directly.}. A model summary is readily available using \begin{Sinput} R> summary(fit1, waic = TRUE) \end{Sinput} \begin{Soutput} Family: lognormal (identity) Formula: time | cens(censored) ~ age * sex + disease + (1 + age | patient) Data: kidney (Number of observations: 76) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 673.51 Group-Level Effects: ~patient (Number of levels: 38) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.40 0.28 0.01 1.01 1731 1 sd(age) 0.01 0.01 0.00 0.02 1137 1 cor(Intercept,age) -0.13 0.46 -0.88 0.76 3159 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 2.73 0.96 0.82 4.68 2139 1 age 0.01 0.02 -0.03 0.06 1614 1 sexfemale 2.42 1.13 0.15 4.64 2065 1 diseaseGN -0.40 0.53 -1.45 0.64 2664 1 diseaseAN -0.52 0.50 -1.48 0.48 2713 1 diseasePKD 0.60 0.74 -0.86 2.02 2968 1 age:sexfemale -0.02 0.03 -0.07 0.03 1956 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.15 0.13 0.91 1.44 4000 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains, as well as the WAIC. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and correlations between group-level effects. On the bottom of the output, population-level effects are displayed. If incorporated, autocorrelation and family specific parameters (e.g., the residual standard deviation \code{sigma}) are also given. In general, every parameter is summarized using the mean (\code{Estimate}) and the standard deviation (\code{Est.Error}) of the posterior distribution as well as two-sided 95\% Credible intervals (\code{l-95\% CI} and \code{u-95\% CI}) based on quantiles. The \code{Eff.Sample} value is an estimation of the effective sample size; that is the number of independent samples from the posterior distribution that would be expected to yield the same standard error of the posterior mean as is obtained from the dependent samples returned by the MCMC algorithm. The \code{Rhat} value provides information on the convergence of the algorithm (cf., \citeauthor{gelman1992}, \citeyear{gelman1992}). If \code{Rhat} is considerably greater than 1 (i.e., $> 1.1$), the chains have not yet converged and it is necessary to run more iterations and/or set stronger priors. To visually investigate the chains as well as the posterior distribution, the \code{plot} method can be used (see Figure~\ref{kidney_plot}). An even more detailed investigation can be achieved by applying the \pkg{shinystan} package \citep{gabry2015} through method \code{launch_shiny}. With respect to the above summary, \code{sexfemale} seems to be the only population-level effect with considerable influence on the response. Because the mean of \code{sexfemale} is positive, the model predicts longer periods without an infection for females than for males. Effects of population-level predictors can also be visualized with the \code{conditional_effects} method (see Figure~\ref{kidney_conditional_effects}). \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{kidney_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the kidney model discussed in Section~\ref{software}.} \label{kidney_plot} \end{figure} \begin{figure}[ht] \centering \includegraphics[height=0.90\textheight]{kidney_conditional_effects.pdf} \caption{Conditional effects plots of all population-level predictors of the kidney model discussed in Section~\ref{software}.} \label{kidney_conditional_effects} \end{figure} Looking at the group-level effects, the standard deviation parameter of \code{age} is suspiciously small. To test whether it is smaller than the standard deviation parameter of \code{Intercept}, we apply the \code{hypothesis} method: \begin{Sinput} R> hypothesis(fit1, "Intercept - age > 0", class = "sd", group = "patient") \end{Sinput} \begin{Soutput} Hypothesis Tests for class sd_patient: Estimate Est.Error l-95% CI u-95% CI Evid.Ratio Intercept-age > 0 0.39 0.27 0.03 Inf 67.97 * --- '*': The expected value under the hypothesis lies outside the 95% CI. \end{Soutput} The one-sided 95\% credibility interval does not contain zero, thus indicating that the standard deviations differ from each other in the expected direction. In accordance with this finding, the \code{Evid.Ratio} shows that the hypothesis being tested (i.e., \code{Intercept - age > 0}) is about $68$ times more likely than the alternative hypothesis \code{Intercept - age < 0}. It is important to note that this kind of comparison is not easily possible when applying frequentist methods, because in this case only point estimates are available for group-level standard deviations and correlations. When looking at the correlation between both group-level effects, its distribution displayed in Figure~\ref{kidney_plot} and the 95\% credibility interval in the summary output appear to be rather wide. This indicates that there is not enough evidence in the data to reasonably estimate the correlation. Together, the small standard deviation of \code{age} and the uncertainty in the correlation raise the question if \code{age} should be modeled as a group specific term at all. To answer this question, we fit another model without this term: \begin{Sinput} R> fit2 <- update(fit1, formula. = ~ . - (1 + age|patient) + (1|patient)) \end{Sinput} A good way to compare both models is leave-one-out cross-validation (LOO)\footnote{The WAIC is an approximation of LOO that is faster and easier to compute. However, according to \cite{vehtari2015}, LOO may be the preferred method to perform model comparisons.}, which can be called in \pkg{brms} using \begin{Sinput} R> LOO(fit1, fit2) \end{Sinput} \begin{Soutput} LOOIC SE fit1 675.45 45.18 fit2 674.17 45.06 fit1 - fit2 1.28 0.99 \end{Soutput} In the output, the LOO information criterion for each model as well as the difference of the LOOs each with its corresponding standard error is shown. Both LOO and WAIC are approximately normal if the number of observations is large so that the standard errors can be very helpful in evaluating differences in the information criteria. However, for small sample sizes, standard errors should be interpreted with care \citep{vehtari2015}. For the present example, it is immediately evident that both models have very similar fit, indicating that there is little benefit in adding group specific coefficients for \code{age}. \subsection{Modeling ordinal data} In the following, we want to briefly discuss a second example to demonstrate the capabilities of \pkg{brms} in handling ordinal data. \cite{ezzet1991} analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a four-point ordinal scale. Ratings are predicted by \code{treat} to indicate which of the two inhaler devices was used, \code{period} to indicate the time of administration, and \code{carry} to model possible carry over effects. \begin{Sinput} R> data("inhaler") R> head(inhaler, n = 1) \end{Sinput} \begin{Soutput} subject rating treat period carry 1 1 1 0.5 0.5 0 \end{Soutput} Typically, the ordinal response is assumed to originate from the categorization of a latent continuous variable. That is there are $K$ latent thresholds (model intercepts), which partition the continuous scale into the $K + 1$ observable, ordered categories. Following this approach leads to the cumulative or graded-response model \citep{samejima1969} for ordinal data implemented in many \proglang{R} packages. In \pkg{brms}, it is available via family \code{cumulative}. Fitting the cumulative model to the inhaler data, also incorporating an intercept varying by subjects, may look this: \begin{Sinput} fit3 <- brm(formula = rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative) \end{Sinput} While the support for ordinal data in most \proglang{R} packages ends here\footnote{Exceptions known to us are the packages \pkg{ordinal} \citep{christensen2015} and \pkg{VGAM} \citep{yee2010}. The former supports only cumulative models but with different modeling option for the thresholds. The latter supports all four ordinal families also implemented in \pkg{brms} as well as category specific effects but no group-specific effects.}, \pkg{brms} allows changes to this basic model in at least three ways. First of all, three additional ordinal families are implemented. Families \code{sratio} (stopping ratio) and \code{cratio} (continuation ratio) are so called sequential models \citep{tutz1990}. Both are equivalent to each other for symmetric link functions such as \code{logit} but will differ for asymmetric ones such as \code{cloglog}. The fourth ordinal family is \code{acat} (adjacent category) also known as partial credits model \citep{masters1982, andrich1978a}. Second, restrictions to the thresholds can be applied. By default, thresholds are ordered for family \code{cumulative} or are completely free to vary for the other families. This is indicated by argument \code{threshold = "flexible"} (default) in \code{brm}. Using \code{threshold = "equidistant"} forces the distance between two adjacent thresholds to be the same, that is $$\tau_k = \tau_1 + (k-1)\delta$$ for thresholds $\tau_k$ and distance $\delta$ (see also \citealp{andrich1978b}; \citealp{andrich1978a}; \citealp{andersen1977}). Third, the assumption that predictors have constant effects across categories may be relaxed for non-cumulative ordinal models \citep{vanderark2001, tutz2000} leading to category specific effects. For instance, variable \code{treat} may only have an impact on the decision between category 3 and 4, but not on the lower categories. Without using category specific effects, such a pattern would remain invisible. To illustrate all three modeling options at once, we fit a (hardly theoretically justified) stopping ratio model with equidistant thresholds and category specific effects for variable \code{treat} on which we apply an informative prior. \begin{Sinput} fit4 <- brm(formula = rating ~ period + carry + cs(treat) + (1|subject), data = inhaler, family = sratio, threshold = "equidistant", prior = set_prior("normal(-1,2)", coef = "treat")) \end{Sinput} Note that priors are defined on category specific effects in the same way as for other population-level effects. A model summary can be obtained in the same way as before: \begin{Sinput} R> summary(fit4, waic = TRUE) \end{Sinput} \begin{Soutput} Family: sratio (logit) Formula: rating ~ period + carry + cs(treat) + (1 | subject) Data: inhaler (Number of observations: 572) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 911.9 Group-Level Effects: ~subject (Number of levels: 286) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 1.05 0.23 0.56 1.5 648 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept[1] 0.72 0.13 0.48 0.99 2048 1 Intercept[2] 2.67 0.35 2.00 3.39 969 1 Intercept[3] 4.62 0.66 3.36 5.95 1037 1 period 0.25 0.18 -0.09 0.61 4000 1 carry -0.26 0.22 -0.70 0.17 1874 1 treat[1] -0.96 0.30 -1.56 -0.40 1385 1 treat[2] -0.65 0.49 -1.60 0.27 4000 1 treat[3] -2.65 1.21 -5.00 -0.29 4000 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat delta 1.95 0.32 1.33 2.6 1181 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} Trace and density plots of the model parameters as produced by \code{plot(fit4)} can be found in Figure~\ref{inhaler_plot}. We see that three intercepts (thresholds) and three effects of \code{treat} have been estimated, because a four-point scale was used for the ratings. The treatment effect seems to be strongest between category 3 and 4. At the same time, however, the credible interval is also much larger. In fact, the intervals of all three effects of \code{treat} are highly overlapping, which indicates that there is not enough evidence in the data to support category specific effects. On the bottom of the output, parameter \code{delta} specifies the distance between two adjacent thresholds and indeed the intercepts differ from each other by the magnitude of \code{delta}. \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{inhaler_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the inhaler model discussed in Section~\ref{software}.} \label{inhaler_plot} \end{figure} \section[Comparison]{Comparison between packages} Over the years, many \proglang{R} packages have been developed that implement MLMs, each being more or less general in their supported models. Comparing all of them to \pkg{brms} would be too extensive and barely helpful for the purpose of the present paper. Accordingly, we concentrate on a comparison with four packages. These are \pkg{lme4} \citep{bates2015} and \pkg{MCMCglmm} \citep{hadfield2010}, which are possibly the most general and widely applied \proglang{R} packages for MLMs, as well as \pkg{rstanarm} \citep{rstanarm2016} and \pkg{rethinking} \citep{mcelreath2016}, which are both based on \pkg{Stan}. As opposed to the other packages, \pkg{rethinking} was primarily written for teaching purposes and requires the user to specify the full model explicitly using its own simplified \pkg{BUGS}-like syntax thus helping users to better understand the models that are fitted to their data. Regarding model families, all five packages support the most common types such as linear and binomial models as well as Poisson models for count data. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. In addition, \pkg{brms} supports robust linear regression using Student's distribution, which is also implemented on a GitHub branch of \pkg{rstanarm}. \pkg{MCMCglmm} allows fitting multinomial models that are currently not available in the other packages. Generalizing classical MLMs, \pkg{brms} and \pkg{MCMCglmm} allow fiting zero-inflated and hurdle models dealing with excess zeros in the response. Furthermore, \pkg{brms} supports non-linear models similar to the \pkg{nlme} package \citep{nlme2016} providing great flexibility but also requiring more care to produce reasonable results. Another flexible model class are generalized additive mixed models \citep{hastie1990,wood2011,zuur2014}, which can be fitted with \pkg{brms} and \pkg{rstanarm}. In all five packages, there are quite a few additional modeling options. Variable link functions can be specified in all packages except for \pkg{MCMCglmm}, in which only one link is available per family. \pkg{MCMCglmm} generally supports multivariate responses using data in wide format, whereas \pkg{brms} currently only offers this option for families \code{gaussian} and \code{student}. It should be noted that it is always possible to transform data from wide to long format for compatibility with the other packages. Autocorrelation of the response can only be fitted in \pkg{brms}, which supports auto-regressive as well as moving-average effects. For ordinal models in \pkg{brms}, effects of predictors may vary across different levels of the response as explained in the inhaler example. A feature currently exclusive to \pkg{rethinking} is the possibility to impute missing values in the predictor variables. Information criteria are available in all three packages. The advantage of WAIC and LOO implemented in \pkg{brms}, \pkg{rstanarm}, and \pkg{rethinking} is that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the Bayesian packages, \pkg{brms} and \pkg{rethinking} offer a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, we believe that the way priors are specified in \pkg{brms} and \pkg{rethinking} is more intuitive as it is directly evident what prior is actually applied. A more detailed comparison of the packages can be found in Table~\ref{comparison1} and Table~\ref{comparison2}. To facilitate the understanding of the model formulation in \pkg{brms}, Table~\ref{syntax} shows \pkg{lme4} function calls to fit sample models along with the equivalent \pkg{brms} syntax. So far the focus was only on capabilities. Another important topic is speed, especially for more complex models. Of course, \pkg{lme4} is usually much faster than the other packages as it uses maximum likelihood methods instead of MCMC algorithms, which are slower by design. To compare the efficiency of the four Bayesian packages, we fitted multilevel models on real data sets using the minimum effective sample size divided by sampling time as a measure of sampling efficiency. One should always aim at running multiple chains as one cannot be sure that a single chain really explores the whole posterior distribution. However, as \pkg{MCMCglmm} does not come with a built-in option to run multiple chains, we used only a single chain to fit the models after making sure that it leads to the same results as multiple chains. The \proglang{R} code allowing to replicate the results is available as supplemental material. The first thing that becomes obvious when fitting the models is that \pkg{brms} and \pkg{rethinking} need to compile the \proglang{C++} model before actually fitting it, because the \pkg{Stan} code being parsed to \proglang{C++} is generated on the fly based on the user's input. Compilation takes about a half to one minute depending on the model complexity and computing power of the machine. This is not required by \pkg{rstanarm} and \pkg{MCMCglmm}, although the former is also based on \pkg{Stan}, as compilation takes place only once at installation time. While the latter approach saves the compilation time, the former is more flexible when it comes to model specification. For small and simple models, compilation time dominates the overall computation time, but for larger and more complex models, sampling will take several minutes or hours so that one minute more or less will not really matter, anymore. Accordingly, the following comparisons do not include the compilation time. In models containing only group-specific intercepts, \pkg{MCMCglmm} is usually more efficient than the \pkg{Stan} packages. However, when also estimating group-specific slopes, \pkg{MCMCglmm} falls behind the other packages and quite often refuses to sample at all unless one carefully specifies informative priors. Note that these results are obtained by running only a single chain. For all three \pkg{Stan} packages, sampling efficiency can easily be increased by running multiple chains in parallel. Comparing the \pkg{Stan} packages to each other, \pkg{brms} is usually most efficient for models with group-specific terms, whereas \pkg{rstanarm} tends to be roughly $50\%$ to $75\%$ as efficient at least for the analyzed data sets. The efficiency of \pkg{rethinking} is more variable depending on the model formulation and data, sometimes being slightly ahead of the other two packages, but usually being considerably less efficient. Generally, \pkg{rethinking} loses efficiency for models with many population-level effects presumably because one cannot use design matrices and vectorized prior specifications for population-level parameters. Note that it was not possible to specify the exact same priors across packages due to varying parameterizations. Of course, efficiency depends heavily on the model, chosen priors, and data at hand so that the present results should not be over-interpreted. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{lme4}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & yes \\ Multinomial models & no & no & yes \\ Count data models & yes & yes & yes \\ Survival models & yes$^1$ & yes & yes \\ Ordinal models & various & no & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ Generalized additive models & yes & no & no \\ Non-linear models & yes & no & no \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Multivariate responses & limited & no & yes \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & yes \\ Censored data & yes & no & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & -- & no \\ population-level priors & flexible & --$^3$ & normal \\ group-level priors & normal & --$^3$ & normal \\ covariance priors & flexible & --$^3$ & restricted$^4$ \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & ML, REML & MH, Gibbs$^2$ \\ Information criterion & WAIC, LOO & AIC, BIC & DIC \\ \proglang{C++} compiler required & yes & no & no \\ Modularized & no & yes & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{lme4} and \pkg{MCMCglmm} package. Notes: (1) Weibull family only available in \pkg{brms}. (2) Estimator consists of a combination of both algorithms. (3) Priors may be imposed using the \pkg{blme} package \citep{chung2013}. (4) For details see \cite{hadfield2010}.} \label{comparison1} \end{table} \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{rethinking}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & yes$^1$ & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & no \\ Multinomial models & no & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes$^2$ & yes & yes \\ Ordinal models & various & cumulative$^3$ & no \\ Zero-inflated and hurdle models & yes & no & no \\ Generalized additive models & yes & yes & no \\ Non-linear models & yes & no & limited$^4$ \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & various \\ Weights & yes & yes & no \\ Offset & yes & yes & yes \\ Multivariate responses & limited & no & no \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & no \\ Censored data & yes & no & no \\ Truncated data & yes & no & yes \\ Customized covariances & yes & no & no \\ Missing value imputation & no & no & yes \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & yes & yes \\ population-level priors & flexible & normal, Student-t & flexible \\ group-level priors & normal & normal & normal \\ covariance priors & flexible & restricted$^5$ & flexible \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & HMC, NUTS \\ Information criterion & WAIC, LOO & AIC, LOO & AIC, LOO \\ \proglang{C++} compiler required & yes & no & yes \\ Modularized & no & no & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{rethinking} package. Notes: (1) Currently only implemented on a branch on GitHub. (2) Weibull family only available in \pkg{brms}. (3) No group-level terms allowed. (4) The parser is mainly written for linear models but also accepts some non-linear model specifications. (5) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}.} \label{comparison2} \end{table} \begin{table}[hbtp] \centering %\renewcommand{\arraystretch}{2} \begin{tabular}{ll} Dataset & \parbox{10cm}{Function call} \\ \hline \\ [-1.5ex] \parbox{2cm}{cake} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{5ex} data = cake)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{4ex} data = cake)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{sleepstudy} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [1.5ex] \pkg{brms} & \parbox{13cm}{\code{brm(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{cbpp$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(cbind(incidence, size - incidence) $\sim$ period + (1 | herd), \\ \hspace*{6ex} family = binomial("logit"), data = cbpp)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(incidence | trials(size) $\sim$ period + (1 | herd), \\ \hspace*{4ex} family = binomial("logit"), data = cbpp)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{grouseticks$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{6ex} family = poisson("log"), data = grouseticks)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{4ex} family = poisson("log"), data = grouseticks)}} \\ [2ex] \hline \\ [-1ex] \parbox{2cm}{VerbAgg$^2$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{6ex} + (1|item), family = binomial, data = VerbAgg)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{4ex} + (1|item), family = bernoulli, data = VerbAgg)}} \\ [2ex] \hline \\ [-1.5ex] \end{tabular} \caption{Comparison of the model syntax of \pkg{lme4} and \pkg{brms} using data sets included in \pkg{lme4}. Notes: (1) Default links are used to that the link argument may be omitted. (2) Fitting this model takes some time. A proper prior on the population-level effects (e.g., \code{prior = set\_prior("normal(0,5)")}) may help in increasing sampling speed.} \label{syntax} \end{table} \section{Conclusion} The present paper is meant to provide a general overview on the \proglang{R} package \pkg{brms} implementing MLMs using the probabilistic programming language \pkg{Stan} for full Bayesian inference. Although only a small selection of the modeling options available in \pkg{brms} are discussed in detail, I hope that this article can serve as a good starting point to further explore the capabilities of the package. For the future, I have several plans on how to improve the functionality of \pkg{brms}. I want to include multivariate models that can handle multiple response variables coming from different distributions as well as new correlation structures for instance for spatial data. Similarily, distributional regression models as well as mixture response distributions appear to be valuable extensions of the package. I am always grateful for any suggestions and ideas regarding new features. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language \pkg{Stan}, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Two anonymous reviewers provided very detailed and thoughtful suggestions to substantially improve both the package and the paper. Furthermore, Prof. Philipp Doebler and Prof. Heinz Holling have given valuable feedback on earlier versions of the paper. Lastly, I want to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_overview} \end{document} brms/vignettes/brms_multilevel.ltx0000644000175000017500000016721713701270367017341 0ustar nileshnilesh\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Multilevel Models with brms} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{Advanced Bayesian Multilevel Modeling with the \proglang{R} Package \pkg{brms}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{Advanced Bayesian Multilevel Modeling with the R Package brms} %% without formatting \Shorttitle{Advanced Bayesian Multilevel Modeling with \pkg{brms}} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package allows R users to easily specify a wide range of Bayesian single-level and multilevel models, which are fitted with the probabilistic programming language \proglang{Stan} behind the scenes. Several response distributions are supported, of which all parameters (e.g., location, scale, and shape) can be predicted at the same time thus allowing for distributional regression. Non-linear relationships may be specified using non-linear predictor terms or semi-parametric approaches such as splines or Gaussian processes. Multivariate models, in which each response variable can be predicted using the above mentioned options, can be fitted as well. To make all of these modeling options possible in a multilevel framework, \pkg{brms} provides an intuitive and powerful formula syntax, which extends the well known formula syntax of \pkg{lme4}. The purpose of the present paper is to introduce this syntax in detail and to demonstrate its usefulness with four examples, each showing other relevant aspects of the syntax. If you use \pkg{brms}, please cite this article as published in the R Journal \citep{brms2}. } \Keywords{Bayesian inference, multilevel models, distributional regression, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for R have been developed to fit MLMs. Usually, however, the functionality of these implementations is limited insofar as it is only possible to predict the mean of the response distribution. Other parameters of the response distribution, such as the residual standard deviation in linear models, are assumed constant across observations, which may be violated in many applications. Accordingly, it is desirable to allow for prediction of \emph{all} response parameters at the same time. Models doing exactly that are often referred to as \emph{distributional models} or more verbosely \emph{models for location, scale and shape} \citep{rigby2005}. Another limitation of basic MLMs is that they only allow for linear predictor terms. While linear predictor terms already offer good flexibility, they are of limited use when relationships are inherently non-linear. Such non-linearity can be handled in at least two ways: (1) by fully specifying a non-linear predictor term with corresponding parameters each of which can be predicted using MLMs \citep{lindstrom1990}, or (2) estimating the form of the non-linear relationship on the fly using splines \citep{wood2004} or Gaussian processes \citep{rasmussen2006}. The former are often simply called \emph{non-linear models}, while models applying splines are referred to as \emph{generalized additive models} (GAMs; \citeauthor{hastie1990}, \citeyear{hastie1990}). Combining all of these modeling options into one framework is a complex task, both conceptually and with regard to model fitting. Maximum likelihood methods, which are typically applied in classical 'frequentist' statistics, can reach their limits at some point and fully Bayesian methods become the go-to solutions to fit such complex models \citep{gelman2014}. In addition to being more flexible, the Bayesian framework comes with other advantages, for instance, the ability to derive probability statements for every quantity of interest or explicitly incorporating prior knowledge about parameters into the model. The former is particularly relevant in non-linear models, for which classical approaches struggle more often than not in propagating all the uncertainty in the parameter estimates to non-linear functions such as out-of-sample predictions. Possibly the most powerful program for performing full Bayesian inference available to date is Stan \citep{stanM2017, carpenter2017}. It implements Hamiltonian Monte Carlo \citep{duane1987, neal2011, betancourt2014} and its extension, the No-U-Turn (NUTS) Sampler \citep{hoffman2014}. These algorithms converge much more quickly than other Markov-Chain Monte-Carlo (MCMC) algorithms especially for high-dimensional models \citep{hoffman2014, betancourt2014, betancourt2017}. An excellent non-mathematical introduction to Hamiltonian Monte Carlo can be found in \citet{betancourt2017}. Stan comes with its own programming language, allowing for great modeling flexibility \cite{stanM2017, carpenter2017}). Many researchers may still be hesitent to use Stan directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error-prone process even for researchers familiar with Bayesian inference. The \pkg{brms} package \citep{brms1}, presented in this paper, aims to remove these hurdles for a wide range of regression models by allowing the user to benefit from the merits of Stan by using extended \pkg{lme4}-like \citep{bates2015} formula syntax, with which many R users are familiar with. It offers much more than writing efficient and human-readable Stan code: \pkg{brms} comes with many post-processing and visualization functions, for instance to perform posterior predictive checks, leave-one-out cross-validation, visualization of estimated effects, and prediction of new data. The overarching aim is to have one general framework for regression modeling, which offers everything required to successfully apply regression models to complex data. To date, it already replaces and extends the functionality of dozens of other R packages, each of which is restricted to specific regression models\footnote{Unfortunately, due to the implementation via Stan, it is not easily possible for users to define their own response distributions and run them via \pkg{brms}. If you feel that a response distribution is missing in \pkg{brms}, please open an issue on GitHub (\url{https://github.com/paul-buerkner/brms}).}. The purpose of the present article is to provide an introduction of the advanced multilevel formula syntax implemented in \pkg{brms}, which allows to fit a wide and growing range of non-linear distributional multilevel models. A general overview of the package is already given in \citet{brms1}. Accordingly, the present article focuses on more recent developments. We begin by explaining the underlying structure of distributional models. Next, the formula syntax of \pkg{lme4} and its extensions implemented in \pkg{brms} are explained. Four examples that demonstrate the use of the new syntax are discussed in detail. Afterwards, the functionality of \pkg{brms} is compared with that of \pkg{rstanarm} \citep{rstanarm2017} and \pkg{MCMCglmm} \citep{hadfield2010}. We end by describing future plans for extending the package. \section{Model description} \label{model} The core of models implemented in \pkg{brms} is the prediction of the response $y$ through predicting all parameters $\theta_p$ of the response distribution $D$, which is also called the model \code{family} in many R packages. We write $$y_i \sim D(\theta_{1i}, \theta_{2i}, ...)$$ to stress the dependency on the $i\textsuperscript{th}$ observation. Every parameter $\theta_p$ may be regressed on its own predictor term $\eta_p$ transformed by the inverse link function $f_p$ that is $\theta_{pi} = f_p(\eta_{pi})$\footnote{A parameter can also be assumed constant across observations so that a linear predictor is not required.}. Such models are typically refered to as \emph{distributional models}\footnote{The models described in \citet{brms1} are a sub-class of the here described models.}. Details about the parameterization of each \code{family} are given in \code{vignette("brms\_families")}. Suppressing the index $p$ for simplicity, a predictor term $\eta$ can generally be written as $$ \eta = \mathbf{X} \beta + \mathbf{Z} u + \sum_{k = 1}^K s_k(x_k) $$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The terms $s_k(x_k)$ symbolize optional smooth functions of unspecified form based on covariates $x_k$ fitted via splines (see \citet{wood2011} for the underlying implementation in the \pkg{mgcv} package) or Gaussian processes \citep{williams1996}. The response $y$ as well as $\mathbf{X}$, $\mathbf{Z}$, and $x_k$ make up the data, whereas $\beta$, $u$, and the smooth functions $s_k$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects, but I avoid theses terms following the recommendations of \citet{gelmanMLM2006}. Details about prior distributions of $\beta$ and $u$ can be found in \citet{brms1} and under \code{help("set\_prior")}. As an alternative to the strictly additive formulation described above, predictor terms may also have any form specifiable in Stan. We call it a \emph{non-linear} predictor and write $$\eta = f(c_1, c_2, ..., \phi_1, \phi_2, ...)$$ The structure of the function $f$ is given by the user, $c_r$ are known or observed covariates, and $\phi_s$ are non-linear parameters each having its own linear predictor term $\eta_{\phi_s}$ of the form specified above. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves. A frequentist implementation of such models, which inspired the non-linear syntax in \pkg{brms}, can be found in the \pkg{nlme} package \citep{nlme2016}. \section{Extended multilevel formula syntax} \label{formula_syntax} The formula syntax applied in \pkg{brms} builds upon the syntax of the R package \pkg{lme4} \citep{bates2015}. First, we will briefly explain the \pkg{lme4} syntax used to specify multilevel models and then introduce certain extensions that allow to specify much more complicated models in \pkg{brms}. An \pkg{lme4} formula has the general form \begin{Sinput} response ~ pterms + (gterms | group) \end{Sinput} The \code{pterms} part contains the population-level effects that are assumed to be the same across obervations. The \code{gterms} part contains so called group-level effects that are assumed to vary accross grouping variables specified in \code{group}. Multiple grouping factors each with multiple group-level effects are possible. Usually, \code{group} contains only a single variable name pointing to a factor, but you may also use \code{g1:g2} or \code{g1/g2}, if both \code{g1} and \code{g2} are suitable grouping factors. The \code{:} operator creates a new grouping factor that consists of the combined levels of \code{g1} and \code{g2} (you could think of this as pasting the levels of both factors together). The \code{/} operator indicates nested grouping structures and expands one grouping factor into two or more when using multiple \code{/} within one term. If, for instance, you write \code{(1 | g1/g2)}, it will be expanded to \code{(1 | g1) + (1 | g1:g2)}. Instead of \code{|} you may use \code{||} in grouping terms to prevent group-level correlations from being modeled. This may be useful in particular when modeling so many group-level effects that convergence of the fitting algorithms becomes an issue due to model complexity. One limitation of the \code{||} operator in \pkg{lme4} is that it only splits up terms so that columns of the design matrix originating from the same term are still modeled as correlated (e.g., when coding a categorical predictor; see the \code{mixed} function of the \pkg{afex} package by \citet{afex2015} for a way to avoid this behavior). While intuitive and visually appealing, the classic \pkg{lme4} syntax is not flexible enough to allow for specifying the more complex models supported by \pkg{brms}. In non-linear or distributional models, for instance, multiple parameters are predicted, each having their own population and group-level effects. Hence, multiple formulas are necessary to specify such models\footnote{Actually, it is possible to specify multiple model parts within one formula using interactions terms for instance as implemented in \pkg{MCMCglmm} \citep{hadfield2010}. However, this syntax is limited in flexibility and requires a rather deep understanding of the way R parses formulas, thus often being confusing to users.}. Then, however, specifying group-level effects of the same grouping factor to be correlated \emph{across} formulas becomes complicated. The solution implemented in \pkg{brms} (and currently unique to it) is to expand the \code{|} operator into \code{||}, where \code{} can be any value. Group-level terms with the same \code{ID} will then be modeled as correlated if they share same grouping factor(s)\footnote{It might even be further extended to \code{|fun()|}, where \code{fun} defines the type of correlation structure, defaulting to unstructured that is estimating the full correlation matrix. The \code{fun} argument is not yet supported by \pkg{brms} but could be supported in the future if other correlation structures, such as compound symmetry or Toeplitz, turn out to have reasonable practical applications and effective implementations in Stan.}. For instance, if the terms \code{(x1|ID|g1)} and \code{(x2|ID|g1)} appear somewhere in the same or different formulas passed to \pkg{brms}, they will be modeled as correlated. Further extensions of the classical \pkg{lme4} syntax refer to the \code{group} part. It is rather limited in its flexibility since only variable names combined by \code{:} or \code{/} are supported. We propose two extensions of this syntax: Firstly, \code{group} can generally be split up in its terms so that, say, \code{(1 | g1 + g2)} is expanded to \code{(1 | g1) + (1 | g2)}. This is fully consistent with the way \code{/} is handled so it provides a natural generalization to the existing syntax. Secondly, there are some special grouping structures that cannot be expressed by simply combining grouping variables. For instance, multi-membership models cannot be expressed this way. To overcome this limitation, we propose wrapping terms in \code{group} within special functions that allow specifying alternative grouping structures: \code{(gterms | fun(group))}. In \pkg{brms}, there are currently two such functions implemented, namely \code{gr} for the default behavior and \code{mm} for multi-membership terms. To be compatible with the original syntax and to keep formulas short, \code{gr} is automatically added internally if none of these functions is specified. While some non-linear relationships, such as quadratic relationships, can be expressed within the basic R formula syntax, other more complicated ones cannot. For this reason, it is possible in \pkg{brms} to fully specify non-linear predictor terms similar to how it is done in \pkg{nlme}, but fully compatible with the extended multilevel syntax described above. Suppose, for instance, we want to model the non-linear growth curve $$ y = b_1 (1 - \exp(-(x / b_2)^{b_3}) $$ between $y$ and $x$ with parameters $b_1$, $b_2$, and $b_3$ (see Example 3 in this paper for an implementation of this model with real data). Furthermore, we want all three parameters to vary by a grouping variable $g$ and model those group-level effects as correlated. Additionally $b_1$ should be predicted by a covariate $z$. We can express this in \pkg{brms} using multiple formulas, one for the non-linear model itself and one per non-linear parameter: \begin{Sinput} y ~ b1 * (1 - exp(-(x / b2) ^ b3) b1 ~ z + (1|ID|g) b2 ~ (1|ID|g) b3 ~ (1|ID|g) \end{Sinput} The first formula will not be evaluated using standard R formula parsing, but instead taken literally. In contrast, the formulas for the non-linear parameters will be evaluated in the usual way and are compatible with all terms supported by \pkg{brms}. Note that we have used the above described ID-syntax to model group-level effects as correlated across formulas. There are other syntax extensions implemented in \pkg{brms} that do not directly target grouping terms. Firstly, there are terms formally included in the \code{pterms} part that are handled separately. The most prominent examples are smooth terms specified through the \code{s} and \code{t2} functions of the \pkg{mgcv} package \citep{wood2011}. Other examples are category specific effects \code{cs}, monotonic effects \code{mo}, noise-free effects \code{me}, or Gaussian process terms \code{gp}. The former is explained in \citet{brms1}, while the latter three are documented in \code{help(brmsformula)}. Internally, these terms are extracted from \code{pterms} and not included in the construction of the population-level design matrix. Secondly, making use of the fact that \code{|} is unused on the left-hand side of $\sim$ in formula, additional information on the response variable may be specified via \begin{Sinput} response | aterms ~ \end{Sinput} The \code{aterms} part may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. This allows among others to weight observations, provide known standard errors for meta-analysis, or model censored or truncated data. As it is not the main topic of the present paper, we refer to \code{help("brmsformula")} and \code{help("addition-terms")} for more details. To set up the model formulas and combine them into one object, \pkg{brms} defines the \code{brmsformula} (or short \code{bf}) function. Its output can then be passed to the \code{parse\_bf} function, which splits up the formulas in separate parts and prepares them for the generation of design matrices and related data. Other packages may re-use these functions in their own routines making it easier to offer support for the above described multilevel syntax. \section{Examples} The idea of \pkg{brms} is to provide one unified framework for multilevel regression models in R. As such, the above described formula syntax in all of its variations can be applied in combination with all response distributions supported by \pkg{brms} (currently about 35 response distributions are supported; see \code{help("brmsfamily")} and \code{vignette("brms\_families")} for an overview). In this section, we will discuss four examples in detail, each focusing on certain aspects of the syntax. They are chosen to provide a broad overview of the modeling options. The first is about the number of fish caught be different groups of people. It does not actually contain any multilevel structure, but helps in understanding how to set up formulas for different model parts. The second example is about housing rents in Munich. We model the data using splines and a distributional regression approach. The third example is about cumulative insurance loss payments across several years, which is fitted using a rather complex non-linear multilevel model. Finally, the fourth example is about the performance of school children, who change school during the year, thus requiring a multi-membership model. Despite not being covered in the four examples, there are a few more modeling options that we want to briefly describe. First, \pkg{brms} allows fitting so called phylogenetic models. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. Species are not independent as they come from the same phylogenetic tree, implying that different levels of the same grouping-factor (i.e., species) are likely correlated. There is a whole vignette dedicated to this topic, which can be found via \code{vignette("brms\_phylogenetics")}. Second, there is a canonical way to handle ordinal predictors, without falsely assuming they are either categorical or continuous. We call them monotonic effects and discuss them in \code{vignette("brms\_monotonic")}. Last but not least, it is possible to account for measurement error in both response and predictor variables. This is often ignored in applied regression modeling \citep{westfall2016}, although measurement error is very common in all scientific fields making use of observational data. There is no vignette yet covering this topic, but one will be added in the future. In the meantime, \code{help("brmsformula")} is the best place to start learning about such models as well as about other details of the \pkg{brms} formula syntax. \subsection{Example 1: Catching fish} An important application of the distributional regression framework of \pkg{brms} are so called zero-inflated and hurdle models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: ``The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.'' \begin{Sinput} zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") zinb$camper <- factor(zinb$camper, labels = c("no", "yes")) head(zinb) \end{Sinput} \begin{Sinput} nofish livebait camper persons child xb zg count 1 1 0 no 1 0 -0.8963146 3.0504048 0 2 0 1 yes 1 0 -0.5583450 1.7461489 0 3 0 1 no 1 0 -0.4017310 0.2799389 0 4 0 1 yes 2 1 -0.9562981 -0.6015257 0 5 0 1 no 1 0 0.4368910 0.5277091 1 6 0 1 yes 4 2 1.3944855 -0.7075348 0 \end{Sinput} As predictors we choose the number of people per group, the number of children, as well as whether or not the group consists of campers. Many groups may not catch any fish just because they do not try and so we fit a zero-inflated Poisson model. For now, we assume a constant zero-inflation probability across observations. \begin{Sinput} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson("log")) \end{Sinput} The model is readily summarized via \begin{Sinput} summary(fit_zinb1) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.01 0.17 -1.34 -0.67 2171 1 persons 0.87 0.04 0.79 0.96 2188 1 child -1.36 0.09 -1.55 -1.18 1790 1 camper 0.80 0.09 0.62 0.98 2950 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat zi 0.41 0.04 0.32 0.49 2409 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} A graphical summary is available through \begin{Sinput} conditional_effects(fit_zinb1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_zinb1.pdf} \caption{Conditional effects plots of the \code{fit\_zinb1} model.} \label{me_zinb1} \end{figure} (see Figure \ref{me_zinb1}). In fact, the \code{conditional\_effects} method turned out to be so powerful in visualizing effects of predictors that I am using it almost as frequently as \code{summary}. According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability \code{zi} is pretty large with a mean of 41\%. Please note that the probability of catching no fish is actually higher than 41\%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-\emph{inflation}). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish, since children often lack the patience required for fishing. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. \begin{Sinput} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) \end{Sinput} To transform the linear predictor of \code{zi} into a probability, \pkg{brms} applies the logit-link, which takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. \begin{Sinput} summary(fit_zinb2) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper zi ~ child Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.07 0.18 -1.43 -0.73 2322 1 persons 0.89 0.05 0.80 0.98 2481 1 child -1.17 0.10 -1.37 -1.00 2615 1 camper 0.78 0.10 0.60 0.96 3270 1 zi_Intercept -0.95 0.27 -1.52 -0.48 2341 1 zi_child 1.21 0.28 0.69 1.79 2492 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your chance of catching no fish at all (as implied by the zero-inflation part), possibly because groups with more children spend less time or no time at all fishing. Comparing model fit via leave-one-out cross validation as implemented in the \pkg{loo} package \citep{loo2016, vehtari2016}. \begin{Sinput} LOO(fit_zinb1, fit_zinb2) \end{Sinput} \begin{Sinput} LOOIC SE fit_zinb1 1639.52 363.30 fit_zinb2 1621.35 362.39 fit_zinb1 - fit_zinb2 18.16 15.71 \end{Sinput} reveals that the second model using the number of children to predict both model parts has better fit. However, when considering the standard error of the \code{LOOIC} difference, improvement in model fit is apparently modest and not substantial. More examples of distributional model can be found in \code{vignette("brms\_distreg")}. \subsection{Example 2: Housing rents} In their book about regression modeling, \citet{fahrmeir2013} use an example about the housing rents in Munich from 1999. The data contains information about roughly 3000 apartments including among others the absolute rent (\code{rent}), rent per square meter (\code{rentsqm}), size of the apartment (\code{area}), construction year (\code{yearc}), and the district in Munich (\code{district}), where the apartment is located. The data can be found in the \pkg{gamlss.data} package \citep{gamlss.data}: \begin{Sinput} data("rent99", package = "gamlss.data") head(rent99) \end{Sinput} \begin{Sinput} rent rentsqm area yearc location bath kitchen cheating district 1 109.9487 4.228797 26 1918 2 0 0 0 916 2 243.2820 8.688646 28 1918 2 0 0 1 813 3 261.6410 8.721369 30 1918 1 0 0 1 611 4 106.4103 3.547009 30 1918 2 0 0 0 2025 5 133.3846 4.446154 30 1918 2 0 0 1 561 6 339.0256 11.300851 30 1918 2 0 0 1 541 \end{Sinput} Here, we aim at predicting the rent per square meter with the size of the apartment as well as the construction year, while taking the district of Munich into account. As the effect of both predictors on the rent is of unknown non-linear form, we model these variables using a bivariate tensor spline \citep{wood2013}. The district is accounted for via a varying intercept. \begin{Sinput} fit_rent1 <- brm(rentsqm ~ t2(area, yearc) + (1|district), data = rent99, chains = 2, cores = 2) \end{Sinput} We fit the model using just two chains (instead of the default of four chains) on two processor cores to reduce the model fitting time for the purpose of the present paper. In general, using the default option of four chains (or more) is recommended. \begin{Sinput} summary(fit_rent1) \end{Sinput} \begin{Sinput} Family: gaussian(identity) Formula: rentsqm ~ t2(area, yearc) + (1 | district) Data: rent99 (Number of observations: 3082) Samples: 2 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 2000 ICs: LOO = NA; WAIC = NA; R2 = NA Smooth Terms: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sds(t2areayearc_1) 4.93 2.32 1.61 10.77 1546 1.00 sds(t2areayearc_2) 5.78 2.87 1.58 13.15 1175 1.00 sds(t2areayearc_3) 8.09 3.19 3.66 16.22 1418 1.00 Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.48 0.73 494 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 7.80 0.11 7.59 8.02 2000 1.00 t2areayearc_1 -1.00 0.09 -1.15 -0.83 2000 1.00 t2areayearc_2 0.75 0.17 0.43 1.09 2000 1.00 t2areayearc_3 -0.07 0.16 -0.40 0.24 1579 1.00 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.95 0.03 1.90 2.01 2000 1.00 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} For models including splines, the output of \code{summary} is not tremendously helpful, but we get at least some information. Firstly, the credible intervals of the standard deviations of the coefficients forming the splines (under \code{'Smooth Terms'}) are sufficiently far away from zero to indicate non-linearity in the (combined) effect of \code{area} and \code{yearc}. Secondly, even after controlling for these predictors, districts still vary with respect to rent per square meter by a sizable amount as visible under \code{'Group-Level Effects'} in the output. To further understand the effect of the predictor, we apply graphical methods: \begin{Sinput} conditional_effects(fit_rent1, surface = TRUE) \end{Sinput} In Figure \ref{me_rent1}, the conditional effects of both predictors are displayed, while the respective other predictor is fixed at its mean. In Figure \ref{me_rent2}, the combined effect is shown, clearly demonstrating an interaction between the variables. In particular, housing rents appear to be highest for small and relatively new apartments. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent1.pdf} \caption{Conditional effects plots of the \code{fit\_rent1} model for single predictors.} \label{me_rent1} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_rent2.pdf} \caption{Surface plot of the \code{fit\_rent1} model for the combined effect of \code{area} and \code{yearc}.} \label{me_rent2} \end{figure} In the above example, we only considered the mean of the response distribution to vary by \code{area} and \code{yearc}, but this my not necessarily reasonable assumption, as the variation of the response might vary with these variables as well. Accordingly, we fit splines and effects of district for both the location and the scale parameter, which is called \code{sigma} in Gaussian models. \begin{Sinput} bform <- bf(rentsqm ~ t2(area, yearc) + (1|ID1|district), sigma ~ t2(area, yearc) + (1|ID1|district)) fit_rent2 <- brm(bform, data = rent99, chains = 2, cores = 2) \end{Sinput} If not otherwise specified, \code{sigma} is predicted on the log-scale to ensure it is positive no matter how the predictor term looks like. Instead of \code{(1|district)} as in the previous model, we now use \code{(1|ID1|district)} in both formulas. This results in modeling the varying intercepts of both model parts as correlated (see the description of the ID-syntax above). The group-level part of the \code{summary} output looks as follows: \begin{Sinput} Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.49 0.73 744 1.00 sd(sigma_Intercept) 0.11 0.02 0.06 0.15 751 1.00 cor(Intercept,sigma_Intercept) 0.72 0.17 0.35 0.98 648 1.00 \end{Sinput} As visible from the positive correlation of the intercepts, districts with overall higher rent per square meter have higher variation at the same time. Lastly, we want to turn our attention to the splines. While \code{conditional\_effects} is used to visualize effects of predictors on the expected response, \code{conditional\_smooths} is used to show just the spline parts of the model: \begin{Sinput} conditional_smooths(fit_rent2) \end{Sinput} The plot on the left-hand side of Figure \ref{me_rent3} resembles the one in Figure \ref{me_rent2}, but the scale is different since only the spline is plotted. The right-hand side of \ref{me_rent3} shows the spline for \code{sigma}. Since we apply the log-link on \code{sigma} by default the spline is on the log-scale as well. As visible in the plot, the variation in the rent per square meter is highest for relatively small and old apartments, while the variation is smallest for medium to large apartments build around the 1960s. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent3.pdf} \caption{Plots showing the smooth terms of the \code{fit\_rent2} model.} \label{me_rent3} \end{figure} \subsection{Example 3: Insurance loss payments} On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see \url{http://www.magesblog.com/2015/11/loss-developments-via-growth-curves-and.html}). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are for now assumed to be the same across years. We load the data \begin{Sinput} url <- paste0("https://raw.githubusercontent.com/mages/", "diesunddas/master/Data/ClarkTriangle.csv") loss <- read.csv(url) head(loss) \end{Sinput} \begin{Sinput} AY dev cum 1 1991 6 357.848 2 1991 18 1124.788 3 1991 30 1735.330 4 1991 42 2182.708 5 1991 54 2745.596 6 1991 66 3319.994 \end{Sinput} and translate the proposed model into a non-linear \pkg{brms} model. \begin{Sinput} nlform <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE) nlprior <- c(prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta")) fit_loss1 <- brm(formula = nlform, data = loss, family = gaussian(), prior = nlprior, control = list(adapt_delta = 0.9)) \end{Sinput} In the above functions calls, quite a few things are going on. The formulas are wrapped in \code{bf} to combine them into one object. The first formula specifies the non-linear model. We set argument \code{nl = TRUE} so that \pkg{brms} takes this formula literally and instead of using standard R formula parsing. We specify one additional formula per non-linear parameter (a) to clarify what variables are covariates and what are parameters and (b) to specify the predictor term for the parameters. We estimate a group-level effect of accident year (variable \code{AY}) for the ultimate loss \code{ult}. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in the case of \code{ult}, contains only a varying intercept for year. Both \code{omega} and \code{theta} are assumed to be constant across observations so we just fit a population-level intercept. Priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. Otherwise, we may observe that different Markov chains converge to different parameter regions as multiple posterior distribution are equally plausible. Setting prior distributions is a difficult task especially in non-linear models. It requires some experience and knowledge both about the model that is being fitted and about the data at hand. Additionally, there is more to keep in mind to optimize the sampler's performance: Firstly, using non- or weakly informative priors in non-linear models often leads to problems even if the model is generally identified. For instance, if a zero-centered and reasonably wide prior such as \code{normal(0, 10000)} it set on \code{ult}, there is little information about \code{theta} and \code{omega} for samples of \code{ult} being close to zero, which may lead to convergence problems. Secondly, Stan works best when parameters are roughly on the same order of magnitude \citep{stan2017}. In the present example, \code{ult} is of three orders larger than \code{omega}. Still, the sampler seems to work quite well, but this may not be true for other models. One solution is to rescale parameters before model fitting. For instance, for the present example, one could have downscaled \code{ult} by replacing it with \code{ult * 1000} and correspondingly the \code{normal(5000, 1000)} prior with \code{normal(5, 1)}. In the \code{control} argument we increase \code{adapt\_delta} to get rid of a few divergent transitions (cf. \citeauthor{stan2017}, \citeyear{stan2017}; \citeauthor{brms1}, \citeyear{brms1}). Again the model is summarized via \begin{Sinput} summary(fit_loss1) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: cum ~ ult * (1 - exp(-(dev / theta)^omega)) ult ~ 1 + (1 | AY) omega ~ 1 theta ~ 1 Data: loss (Number of observations: 55) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~AY (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(ult_Intercept) 745.74 231.31 421.05 1306.04 916 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat ult_Intercept 5273.70 292.34 4707.11 5852.28 798 1 omega_Intercept 1.34 0.05 1.24 1.43 2167 1 theta_Intercept 46.07 2.09 42.38 50.57 1896 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 139.93 15.52 113.6 175.33 2358 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} as well as \begin{Sinput} conditional_effects(fit_loss1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_loss1.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model.} \label{me_loss1} \end{figure} (see Figure \ref{me_loss1}). We can also visualize the cumulative insurance loss over time separately for each year. \begin{Sinput} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_year <- conditional_effects(fit_loss1, conditions = conditions, re_formula = NULL, method = "predict") plot(me_year, ncol = 5, points = TRUE) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_loss1_year.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model separately for each accident year.} \label{me_loss1_year} \end{figure} (see Figure \ref{me_loss1_year}). It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. In the above model, we considered \code{omega} and \code{delta} to be constant across years, which may not necessarily be true. We can easily investigate this by fitting varying intercepts for all three non-linear parameters also estimating group-level correlation using the above introduced \code{ID} syntax. \begin{Sinput} nlform2 <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|ID1|AY), omega ~ 1 + (1|ID1|AY), theta ~ 1 + (1|ID1|AY), nl = TRUE) fit_loss2 <- update(fit_loss1, formula = nlform2, control = list(adapt_delta = 0.90)) \end{Sinput} We could have also specified all predictor terms more conveniently within one formula as \begin{Sinput} ult + omega + theta ~ 1 + (1|ID1|AY) \end{Sinput} because the structure of the predictor terms is identical. To compare model fit, we perform leave-one-out cross-validation. \begin{Sinput} LOO(fit_loss1, fit_loss2) \end{Sinput} \begin{Sinput} LOOIC SE fit_loss1 715.44 19.24 fit_loss2 720.60 19.85 fit_loss1 - fit_loss2 -5.15 5.34 \end{Sinput} Since smaller values indicate better expected out-of-sample predictions and thus better model fit, the simpler model that only has a varying intercept over parameter \code{ult} is preferred. This may not be overly surprising, given that three varying intercepts as well as three group-level correlations are probably overkill for data containing only 55 observations. Nevertheless, it nicely demonstrates how to apply the \code{ID} syntax in practice. More examples of non-linear models can be found in \code{vignette("brms\_nonlinear")}. \subsection{Example 4: Performance of school children} Suppose that we want to predict the performance of students in the final exams at the end of the year. There are many variables to consider, but one important factor will clearly be school membership. Schools might differ in the ratio of teachers and students, the general quality of teaching, in the cognitive ability of the students they draw, or other factors we are not aware of that induce dependency among students of the same school. Thus, it is advised to apply a multilevel modeling techniques including school membership as a group-level term. Of course, we should account for class membership and other levels of the educational hierarchy as well, but for the purpose of the present example, we will focus on schools only. Usually, accounting for school membership is pretty-straight forward by simply adding a varying intercept to the formula: \code{(1 | school)}. However, a non-negligible number of students might change schools during the year. This would result in a situation where one student is a member of multiple schools and so we need a multi-membership model. Setting up such a model not only requires information on the different schools students attend during the year, but also the amount of time spend at each school. The latter can be used to weight the influence each school has on its students, since more time attending a school will likely result in greater influence. For now, let us assume that students change schools maximally once a year and spend equal time at each school. We will later see how to relax these assumptions. Real educational data are usually relatively large and complex so that we simulate our own data for the purpose of this tutorial paper. We simulate 10 schools and 1000 students, with each school having the same expected number of 100 students. We model 10\% of students as changing schools. \begin{Sinput} data_mm <- sim_multi_mem(nschools = 10, nstudents = 1000, change = 0.1) head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.5 0.5 16.27422 2 10 9 0.5 0.5 18.71387 3 5 3 0.5 0.5 23.65319 4 3 5 0.5 0.5 22.35204 5 5 3 0.5 0.5 16.38019 6 10 6 0.5 0.5 17.63494 \end{Sinput} The code of function \code{sim\_multi\_mem} can be found in the online supplement of the present paper. For reasons of better illustration, students changing schools appear in the first rows of the data. Data of students being only at a single school looks as follows: \begin{Sinput} data_mm[101:106, ] \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 101 2 2 0.5 0.5 27.247851 102 9 9 0.5 0.5 24.041427 103 4 4 0.5 0.5 12.575001 104 2 2 0.5 0.5 21.203644 105 4 4 0.5 0.5 12.856166 106 4 4 0.5 0.5 9.740174 \end{Sinput} Thus, school variables are identical, but we still have to specify both in order to pass the data appropriately. Incorporating no other predictors into the model for simplicity, a multi-membership model is specified as \begin{Sinput} fit_mm <- brm(y ~ 1 + (1 | mm(s1, s2)), data = data_mm) \end{Sinput} The only new syntax element is that multiple grouping factors (\code{s1} and \code{s2}) are wrapped in \code{mm}. Everything else remains exactly the same. Note that we did not specify the relative weights of schools for each student and thus, by default, equal weights are assumed. \begin{Sinput} summary(fit_mm) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: y ~ 1 + (1 | mm(s1, s2)) Data: data_mm (Number of observations: 1000) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~mms1s2 (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 2.76 0.82 1.69 4.74 682 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 19 0.93 17.06 20.8 610 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 3.58 0.08 3.43 3.75 2117 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} With regard to the assumptions made in the above example, it is unlikely that all children who change schools stay in both schools equally long. To relax this assumption, we have to specify weights. First, we amend the simulated data to contain non-equal weights for students changing schools. For all other students, weighting does of course not matter as they stay in the same school anyway. \begin{Sinput} data_mm[1:100, "w1"] <- runif(100, 0, 1) data_mm[1:100, "w2"] <- 1 - data_mm[1:100, "w1"] head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.3403258 0.65967423 16.27422 2 10 9 0.1771435 0.82285652 18.71387 3 5 3 0.9059811 0.09401892 23.65319 4 3 5 0.4432007 0.55679930 22.35204 5 5 3 0.8052026 0.19479738 16.38019 6 10 6 0.5610243 0.43897567 17.63494 \end{Sinput} Incorporating these weights into the model is straight forward. \begin{Sinput} fit_mm2 <- brm(y ~ 1 + (1 | mm(s1, s2, weights = cbind(w1, w2))), data = data_mm) \end{Sinput} The summary output is similar to the previous, so we do not show it here. The second assumption that students change schools only once a year, may also easily be relaxed by providing more than two grouping factors, say, \code{mm(s1, s2, s3)}. \section{Comparison between packages} Over the years, many R packages have been developed that implement MLMs, each being more or less general in their supported models. In \cite{brms1}, I compared \pkg{brms} with \pkg{lme4} \citep{bates2015}, \pkg{MCMCglmm} \citep{hadfield2010}, \pkg{rstanarm} \citep{rstanarm2017}, and \pkg{rethinking} \citep{mcelreath2017}. Since then, quite a few new features have been added in particular to \pkg{brms} and \pkg{rstanarm}. Accordingly, in the present paper, I will update these comparisons, but focus on \pkg{brms}, \pkg{rstanarm}, and \pkg{MCMCglmm} as the possibly most important R packages implementing Bayesian MLMs. While \pkg{brms} and \pkg{rstanarm} are both based on the probabilistic programming language \pkg{Stan}, \pkg{MCMCglmm} implements its own custom MCMC algorithm. Modeling options and other important information of these packages are summarized in Table~\ref{comparison} and will be discussed in detail below. Regarding general model classes, all three packages support the most common types such as linear, count data and certain survival models. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. Additionally, they offer support for zero-inflated and hurdle models to account for access zeros in the data (see Example 1 above). For survival / time-to-event models, \pkg{rstanarm} offers great flexibility via the \code{stan\_jm} function, which allows for complex association structures between time-to-event data and one or more models of longitudinal covariates (for details see \url{https://cran.r-project.org/web/packages/rstanarm/vignettes/jm.html}). Model classes currently specific to \pkg{brms} are robust linear models using Student's t-distribution (family \code{student}) as well as response times models via the exponentially modified Gaussian (family \code{exgaussian}) distribution or the Wiener diffusion model (family \code{wiener}). The latter allows to simultaneously model dichotomous decisions and their corresponding response times (for a detailed example see \url{http://singmann.org/wiener-model-analysis-with-brms-part-i/}). All three packages offer many additional modeling options, with \pkg{brms} currently having the greatest flexibility (see Table~\ref{comparison} for a summary). Moreover, the packages differ in the general framework, in which they are implemented: \pkg{brms} and \pkg{MCMCglmm} each come with a single model fitting function (\code{brm} and \code{MCMCglmm} respectively), through which all of their models can be specified. Further, their framework allows to seamlessly combine most modeling options with each other in the same model. In contrast, the approach of \pkg{rstanarm} is to emulate existing functions of other packages. This has the advantage of an easier transition between classical and Bayesian models, since the syntax used to specify models stays the same. However, it comes with the main disadvantage that many modeling options cannot be used in combination within the same model. Information criteria are available in all three packages. The advantages of WAIC and LOO implemented in \pkg{brms} and \pkg{rstanarm}, are their less restrictive assumptions and that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the packages, \pkg{brms} offers a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, I believe that the way priors are specified in \pkg{brms} is more intuitive as it is directly evident what prior is actually applied. In \pkg{brms}, Bayes factors are available both via Savage-Dickey ratios \citep{wagenmakers2010} and bridge-sampling \citep{bridgesampling2017}, while \pkg{rstanarm} allows for the latter option. For a detailed comparison with respect to sampling efficiency, see \cite{brms1}. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{\textbf{Model classes}} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes & yes$^1$ & yes \\ Response times models & yes & no & no \\ Beta models & yes & yes & no \\ Categorical models & yes & yes$^2$ & yes \\ Multinomial models & no & no & yes \\ Ordinal models & various & cumulative$^2$ & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ \hline \\ [-1.5ex] \parbox{5cm}{\textbf{Modeling options}} & & & \\ [1ex] Variable link functions & various & various & no \\ Multilevel structures & yes & yes & yes \\ Multi-membership & yes & no & yes \\ Multivariate responses & yes & yes$^3$ & yes \\ Non-linear predictors & yes & limited$^4$ & no \\ Distributional regression & yes & no & no \\ Finite mixtures & yes & no & no \\ Splines (additive models) & yes & yes & yes \\ Gaussian Processes & yes & no & no \\ Temporal autocorrelation & yes & yes$^{2, 5}$ & no \\ Spatial autocorrelation & yes & yes$^{2, 5}$ & no \\ Monotonic effects & yes & no & no \\ Category specific effects & yes & no & no \\ Measurement error & yes & no & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Censored data & yes & yes$^1$ & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] \textbf{Bayesian specifics} & & & \\ [1ex] Population-level priors & flexible & flexible & normal \\ Group-level priors & normal & normal & normal \\ Covariance priors & flexible & restricted$^6$ & restricted$^7$ \\ Bayes factors & yes & yes$^8$ & no \\ Parallelization & yes & yes & no \\ \hline \\ [-1.5ex] \textbf{Other} & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & MH, Gibbs$^9$ \\ Information criterion & WAIC, LOO & WAIC, LOO & DIC \\ C++ compiler required & yes & no & no \\ \hline \end{tabular} \caption{ Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{MCMCglmm} packages. Notes: (1) Advanced functionality available via \code{stan\_jm}. (2) No group-level terms and related functionality allowed. (3) Cannot be combined with other modeling options such as splines. (4) Functionality limited to linear Gaussian models and certein pre-specified non-linear functions. (5) Functionality available only on GitHub branches (\url{https://github.com/stan-dev/rstanarm}). (6) For details see \cite{hadfield2010}. (7) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}. (8) Available via the \pkg{bridgesampling} package \citep{bridgesampling2017}. (9) Estimator consists of a combination of both algorithms. } \label{comparison} \end{table} \section{Conclusion} The present paper is meant to introduce R users and developers to the extended \pkg{lme4} formula syntax applied in \pkg{brms}. Only a subset of modeling options were discussed in detail, which ensured the paper was not too broad. For some of the more basic models that \pkg{brms} can fit, see \citet{brms1}. Many more examples can be found in the growing number of vignettes accompanying the package (see \code{vignette(package = "brms")} for an overview). To date, \pkg{brms} is already one of the most flexible R packages when it comes to regression modeling. However, for the future, there are quite a few more features that I am planning to implement (see \url{https://github.com/paul-buerkner/brms/issues} for the current list of issues). In addition to smaller, incremental updates, I have two specific features in mind: (1) latent variables estimated via confirmatory factor analysis and (2) missing value imputation. I receive ideas and suggestions from users almost every day -- for which I am always grateful -- and so the list of features that will be implemented in the proceeding versions of \pkg{brms} will continue to grow. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language Stan, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Furthermore, I want to thank Heinz Holling, Donald Williams and Ruben Arslan for valuable comments on earlier versions of the paper. I also would like to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_multilevel} \end{document} brms/vignettes/brms_monotonic.Rmd0000644000175000017500000002075314111751670017065 0ustar nileshnilesh--- title: "Estimating Monotonic Effects with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Monotonic Effects with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, $b$, takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, $b$ can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, $\zeta$, estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, $x$, the linear predictor term of observation $n$ looks as follows: $$\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i$$ The parameter $b$ can take on any real value, while $\zeta$ is a simplex, which means that it satisfies $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$ with $D$ being the number of elements of $\zeta$. Equivalently, $D$ is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation. ## A Simple Monotonic Model A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: 'below 20k', 'between 20k and 40k', 'between 40k and 100k' and 'above 100k'. We use some simulated data for illustration purposes. ```{r} income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ``` We now proceed with analyzing the data modeling `income` as a monotonic effect. ```{r, results='hide'} fit1 <- brm(ls ~ mo(income), data = dat) ``` The summary methods yield ```{r} summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ``` The distributions of the simplex parameter of `income`, as shown in the `plot` method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories. Now, let's compare of monotonic model with two common alternative models. (a) Assume `income` to be continuous: ```{r, results='hide'} dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ``` ```{r} summary(fit2) ``` or (b) Assume `income` to be an unordered factor: ```{r, results='hide'} contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ``` ```{r} summary(fit3) ``` We can easily compare the fit of the three models using leave-one-out cross-validation. ```{r} loo(fit1, fit2, fit3) ``` The monotonic model fits better than the continuous model, which is not surprising given that the relationship between `income` and `ls` is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets. ## Setting Prior Distributions In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$) and zero otherwise. The Dirichlet prior has a single parameter $\alpha$ of the same length as $\zeta$. The higher $\alpha_i$ the higher the a-priori probability of higher values of $\zeta_i$. Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of $\zeta_1$ (difference between 'below_20' and '20_to_40') and hence into higher values of $\alpha_1$. We choose $\alpha_1 = 2$ and $\alpha_2 = \alpha_3 = 1$, the latter being the default value of $\alpha$. To fit the model we write: ```{r, results='hide'} prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ``` The `1` at the end of `"moincome1"` may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model. ```{r} summary(fit4) ``` We have used `sample_prior = TRUE` to also obtain draws from the prior distribution of `simo_moincome1` so that we can visualized it. ```{r} plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ``` As is visible in the plots, `simo_moincome1[1]` was a-priori on average twice as high as `simo_moincome1[2]` and `simo_moincome1[3]` as a result of setting $\alpha_1$ to 2. ## Modeling interactions of monotonic variables Suppose, we have additionally asked participants for their age. ```{r} dat$age <- rnorm(100, mean = 40, sd = 10) ``` We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the `*` operator: ```{r, results='hide'} fit5 <- brm(ls ~ mo(income)*age, data = dat) ``` ```{r} summary(fit5) conditional_effects(fit5, "income:age") ``` ## Modelling Monotonic Group-Level Effects Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for `city` to the data and add some city-related variation to `ls`. ```{r} dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ``` With the following code, we fit a multilevel model assuming the intercept and the effect of `income` to vary by city: ```{r, results='hide'} fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ``` ```{r} summary(fit6) ``` reveals that the effect of `income` varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed `income` to have the same effect across cities. ## References Bürkner P. C. & Charpentier, E. (in review). [Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models](https://psyarxiv.com/9qkhj/). *PsyArXiv preprint*. brms/vignettes/inhaler_plot.pdf0000644000175000017500000071630213202254050016534 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125184259) /ModDate (D:20170125184259) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 232956 /Filter /FlateDecode >> stream x콽.MGqe @10ḧlIc7^SUW?e?a]Ws_{ꟲ<]W?7??O߿gd?]O?Շÿ<cjMO o< ۟~'|fgOypi=]?JןG^x 5>ZKx={a9 ,oQ=8;X q\vfZO)VO Bϟq|ȂSjrTx̀Q-GeGuGeO;ʰQ5;> Uj>}0v' {^8۵O)q ~y?GwN°1nб/°1aq\61|0?LjO]v'¨`*ƨcu\Zom0@9 eWc7 "9^/?u^=@/.EP:=@u<*Fv_|::n*P/>@PyUn!. ^P^EDžQ 5(^5j107~ku1f>?Owu-rCS-v<0/姓-|=.B`=ڍ-=Ekz}x Dˮ2*M<odtusMn&mdS@/ W_i^}?|R4`2ϫ6-=󲅳q>޴xA?/[@{cG'~,Lwwy= ^v S;t˸jzكq^/@2!MOU^le<4]<UiC8ㅿ77,eq_~C1+D e챫J}}0^3\pR1c\z̸]SƮȌ"pM|kx`rgDM8C^b1UA}itNW~=|~yz#^O? כ5߿F~/U2͟B^fP379!^Fji?̸_l*}U"zn?`5!2eSR>R_o3Q}U;%cwTl}u;g/߈'r'x۳ˈ'۳|]o/]Ɍw8,,>"^p?ǻߴ;|d.7y֭Q޺\5ىCoИz )^ӄ@u\vޛE=L_!\LC}7{wࡾC}7twK+wm}!扺_][wW)yګwMoݜ, :x{Њ%}1^/wX7;[x~ջlRsV4ջ+דB߽<nVn>^< }w1{|*nB]⃱/V]."}2*r/-#q?^:]XH+xbQ%/b^ ]d{!?X|﵆ߟ:r^ 7 ),ݵ0Dx˽Ezջ=Bv#~/W!/ջk`| ;rwJ/&Eq<ݥ:|'q!31rxT28~![l_8뼐/k慄Y{`گ%jCژI}W2e?_˼>J^M}_muq_Vn~~%3&3a-% ?Ѻn1_[I~#~>OJpJ %x쒥8,g*Zgx//ɷ t>J~?R{{<9^>%y_ͼ}7~\]>C!f;?暎%yϧG<3on||9z`sIzƷwwy=5% y=fΧdswһb)IONNzesN5SS'pwm^9)ߴyx|f'|ik[.91wwrc|]wGߕ+}|᏾c>7r[GzKQo)}xx|Gwʧ))e}|T/|g}zCT+-CϤTO1cS='rMSSS(rw7EN1ީu8;g}zT;ܓRpKzK=OIzKͬWFNWw^)ީz8;kg}zoƗɇS=pwgGzKQo8-X|]wGߕ+32*#2*=Ҳ*-Rs+5;FJJɺɲɪɢɚɒMkXlFqF1 ` lj`.)q^%ܝܝܝ۝۝۝ەەL+ )+(3(##0*v—cT[Y,Vi+˴U"meDi`L2rIfaMbIFaIJ&!,DcHx(%0k%̊lfA6Yͬfc3k1KF@&τQ0cza8R:f;`Ojd5:&?`^#뮑eWϪgճ:Eб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dFAOD|O_wă%9PF/FW7SkЃ¾)8ssAǵCBWuwk^f}}DZǹ' ǵ~xL6"Ni,⅃U)0x&׈ qIݱ) \pt# GZ+Gu$D{ !b}.q@ːGK>]x9d{] ѭ.i83A,vHI'n6C:^?Tz$#-h5>@G֎OПgrG'%)i1`X*8 Cm"d=4>cTư!?K6V, 'C2Aod0ѝ쾜3ŞKX,{#4%7,P `jCٗq`bl^bQ܆8 =cnyCy P&X()~>)CH@-Cz8 AkA?ޓOHz4CCe$k3"cݱH2X$ =P j 9e%/[n@cb,z8VHV:T|y1o9*'+ߖQE} ;tJ*ٿJ*1V|_~䘷 ZBt\QŹrqtXr]񮔶㿣b^fUQ7BWYU+c\q4C"=䩍+(GG?;R/vLT9QQv\Te ؑa&/#;:~wGG=?lzwGIf~^ّbϳu-uqIU:Xww=,; xÎâeߣmO}GGpS ,3vT"*;2KAou:YdgZ Au9Qj? G<:NM/QRfǪ:Z x/`<-'-:j :A܅˶;v]_ z*<8^ ;~ayag B_8bǴ1ٶü`5In_Q^\t$#WrpiÊzAǹ ]U%*!|ѹ~פ|ɊVpt`to<:}:kv%HǀK_P`!#a'XXp4qǃ*ח c:֗r4q)I~﻾3 J^Hz벮O8rx(te3t,.}aƥ/" G}S_r_pT:t?pO%8>wiP<1Ϝ?o8J{ʒxHe:VwV/J*t >CGq EtbEG.h_tûxFGq U/v)hޕ;:#p$fMͪW.XU]ȪY.Z׮ZW:R9;o7P-)h}'̿Gl.}=zm$뽙go~68'5ůgf|NxxKxsO|sKAJ=|yjsǟnsM_**G<ǁ[<k<%S%!lq|;Kf=Oz,p㓞 \C֏=fΧdswԻ#xG=9;)Q9q=pxx#[w\D{-SK<_J{Sz1rw\o9qzxˇKw\oGN-~䚎_!O5l|Sc>p%;c63_9;{"x|MgwWF>+r[̇nYo1vf|y |x|a+;Gߕ+}W>|]wG!8-G|r⨷Rc>;L#;Oc?pwc!qwW$n!g[$z1ޱ)ޱ^8;֛xzUX ec-pwIo%zaΧ$za+xzg+;KXo 뵁c7qOY߱8;ֳGzx⨷XOOgw9ޡ p[w3;+Gw-x7V?c%~Qo_$pOz&[[W \sCKex~)8;%Ng}~Y߱_)pKzNk[JG~)ޡ+bgw7 g}~)޵O8;O?Nz~/x%~3-+&NSCdo8;k.}W>|]wGߕVYoVYoViYoV9ޕy#xvwvl%xXdidedad]dYdUdQdMdIǦ5 ,6t來;66 g kl~8RPc,,,,ޮݮ,h2x%!Eƕd gQ4Fl)T002E3Gh,VVj+ 5P6fJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6;&0`IȄQ0c\uLo1G Pl)@F^#+ȺkdճYtmx%t,¨]RϱZ 96M`KX<5cԱeղjYa,ZW-˫U'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'Yu#qrЃxq!`\Āv7l)mĭ h7~# rx6 f@*MLV,;h4*7U<=0U1ySXb![L{pVlK{Cw=!-9W΀jr V#>*Vўh|kOzEaȊXdT1{ח5S/*nȱ\C^=*zwůW:>ړ~!Dǒr4)=)Ǔe/'FbOǔ>ߡ ($UQ#tl!Lݎ.Nӕ#L_cLa9ʔ0Ld9Ҕnc t~L2倇#N_cL:h~#ϸo&ǣ>}#P1L^â/~БD_2 !tDO4Qi,Gpdb!Ǧ rt!G>*QJȑZ x~hUHWUUIYգUJ[UK]= Gr#㣣zȪW.XU]f h ^j)^kW:VAqQIqFÈx{=/y撎czmwޛy?Ɲgo~68枎`sKx9Ļ-ωk߉C_8yVƷƷ5o'~#OU|/G"MGqGyGz pIk W5lni|EmkN'z*p \tPocs#j;ٜXz8pwӁSGwZOl9i=xL/)ZO.||jswZNN)ixֻkwZ/o.9i8;tp%;c63_9;{"x|MgwWF>+r[̇nYo1vf|y;|x|+;Gߕ+}W>|]wG19-#G|r䨷>RS>LN#;SpwlS!rwWDn!g[DzK1ީ)ީ^9;՛"xzUT:eSpwIow%zaΧ$zf+#xz+;KTo=굇GwFi|Yߩ|8;գg}zv䨷T鑣R=LN#;9ޱpnf}~YߩaNSEدg[9n{w|]wefUf[edUF[gUZ[eUjwx~1ޱx㝢[7^)=Y=Y=Y=Y=Yֱ9zcuOtOtlXbCw( 0+6MMc`SpwHAM,,,,ޮݮ,d2^IHɠ \IF ID!)Tq002%3d!,VVj+ u2meH[Y,d9 7\YGXXGXRI( d?^)Jɺ$̚lfI6"Yͬfc3Zlf)&$a$(LE1 c:7L#cF^#Kkd5Yu,z\Ƕ:Ga?.)X-Q&u,ԱK PZزjYbZX-뫖Ueq'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'Yq榳;AOD|O\w 1{ 87ڈ7~,">#?Ru?p cۏF<}%q8G~OPx (/ss~6¨㠏x?=4;OD\A?5}*8>EyЧOgOBE2&fǺmdzS܂I.8r1ay'!2՜b1nw3r<{ƾf)!. :z# 7..gޞ)i؈d -7T (Zn!|džq.gٸd_grC$ᠡTri1`DBC:cD{#cc1ŏ?X)`x![f$s3a)hw )s [Pl԰ryP7WCڱ5t8q7QblxS ܆S`3oż_g, q@ZOH@L3.o C "aŦvEp7a|iO _$@G_|z?{hH1^vC {t :ܸ=$ܸ<4ܠbG?=>o=x2 ty/ܣBoqOqvܹ>;[؃޸=]o={ EGq z;wP]ݥA}辰rv~]p#rl zCCoM%:֤ IGj[85=觾ŽUc:佣xIo2~_lHc{ 8w=-?|@tzx;<v O}ž {7˶؁;k#;gמxbF#= VU^p"ٴ1:ƍ|KԞ<]8EG-g߇kN;}q7yBǾIpݎ= `rlIP 9EG9ac# / ?"ع:&'HǏ MO9E:<w{{2A̛|!-=lY7wp{48=2x~Ep<e _ zgރY(s{{x/i3gh3 GO1猿7ܿ'=py2"3G23p߻gVCNy8 ƝY{W:}W=转Je::_wPg !:J1;B~0>|Չ4_9Zy=k*|=;Ƈ [РlXE8vw}]w޵$:w!]xHu;:O P"|TP#'>K8OS#TQ>5W8,~˽t^ZbǷkx||[W\s7~\Mw- h>\H.i|MF- as \6SOq|ck\'s#j;ٜXz8pwӁSGwZOl9i=xL/ah=zl)i8;x֫[wZn9ixv^\x 63xQ>"pwgNN5;S6ZN))8;o:pz5-|;; _91xxxW>|]wGߕ+}W>Qo)9-#G|هgwʇ))~x||ƗS` cS"rKǯT9%-#7^9;;_)ީ^zxxzT=꽑{_w'NYߩ9-#Gzz䨷T?|]wGߕ+32*#2*=Ҳ*-Rs+5;FJJɺɲɪɢɚɒNMšFyfQ\` mjB?؅355 G jjf wgwgwgvgf1]Y]Yd AAA81B8R` `2dKf1xB\YVj+봕e*meF[Y4s0 &nF$&$P%Q1N~$RuI5̒lfE6 Y,fVc3RL&0aIHQ|v0"%ӛp%Üp%5YzF^#뮑eWϪgճ:Eб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dA?P|8G=X'd>Uǹ/Dě}}ٷ}~sxǾ DZǹ;|sx>x7D?C,A_!w<zǵ_΃:^v<8A!)㠏xqG|J@dAqՀ"O=ձ;Ԇ|q;@NS㳧Cdx"Ǿ'*DZ1dzSكI1}:Axw<{;= )\ptۻ Gec!D 2ű$b}.q7*A98~Vt'Yf8tnw7dx@,:=_P:"uj k؎ȧI:= q !vtc{CG S;a1xrW`!՗q;(Y8>?-oiAW"NAo/,]Ao8CS, /(c)8FB',# =bi} j 9g , =^alwjC}m'/hX7!<0oÂS;{/;qz[%Rc5ća;{rlG:ư2oj̸w#3W>{Џ#B2 &C~;!9eh5Ab}O{A!v؎TdԌ'3rOpЏČ1ڎSd/>ˏ"xqa zc|1 kx=%Ǽ)Du*gdGpsvc1qמǺgoIokA[r|#c=lEFۘx=̘{1cn $2p "nc.9=co<Ŧ~&TeLYTDK(aD '  c8Q="b=}سc<器%2*2pbc<V ?NĊx'YIesBҞҨH X -c!iF ٱ[8vv |(jR~p yrł$>G;qKGuL8>8/;p_#50f8Zx.ǻw#;ʆꆁ׈ss=z=?c~<{_x-/<y=//7qI;➎?@'K<'^{<-eǧx8pOc<8i|O6lq|x5r3#-5@G=z#pK^ \w8>S{X'=7S/|7t>ԫSKNNz:pwS#rẇ91rx|fk"x|O/:rcgENz--׬;\rc>o3}/<|x|]wGߕ+}W>|];s#G|p䨷OчWwg))~xx|{_wNww7DN-R=LzKQo9;s"t>5;Ջ"xzST]WwNYߩ^w'zꅑ{:ꑛYꝇT/=RSpwN=/;Փg}zTώᑣR==r[)ީxx~=;n9ͬԯp8;;lYߩ_pw귈5"tLzK G['Q"x~)ޱf3e"x~)s8;ENg}~Yߩ_pKzKNk[ꗊ"G~)ޱbgw7;g}~)޵Ow8;mf;-R Ϥԯ9;;FNScԯ|]wG߱4rwW]#'~%NzSc?oNю,힬,재,랬,Ɡ,ؔ&ņQ^<`Wl$C356o)q~cpwVpwpwowowVowoWnWn4lAaJ2憍3(#6d8 c` ^2Wj++:meJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6 LX0`E2HƵ1@&)@0')@l')@F^#+ȺkdճYtd[^IP_I.y0Y-F#%%{%k)eղjYa,ZW-˫U'wK=W{.\7llXsհ}>)+/V^ͭh(訏j+Y$]s'YqKރ'=Ug_C29}nK p8u7gzǺǾ)9Vy>8x/:pnxú!Ǟ=a88kq{*r<{;>{3^>LrCS =}vcr5 ާ^iDZmǵ'ukO]>C].8Oprx A8HS.|O'ie]y^48;Żhs|~t/rpP.<ב7 F#E!NAZˆ8i@:Sam8zN! AG '00&SD MmK,`Xyo|bid狅e=Xts%Ys 6C?#, qz@R:zC/#y|9̎[%hS TCr|5G[Coҷ/3!vle_v, vc‚\;da1os(0Ȁ4J#7F#00{zУqG_2{Zİ-@-MT E!OtL_>wOt318D7# CRH`2tO}ч8zC[}i/ClXI1Clv)3C8Qf܌DF{:#c/:,3A ~12pX!h-VvXOKBd<=d5S+Ao\ό1$2pH#5az)ǼK]dÁq{1~cNx"#nog̍o9oCwc쑎1cfdŏ0o7s1Ê1~(T8Q0c%1j[NT<8*"t{**Ƹ71꺛]/Ñ1/TG==]YQwח.=׭5 *VXke '*^+bazc|1{E7c~+r8?9=^3AEٟOT $xߣ"nyƃ3dw#l*~sa|"5\/Tl+^=a?azgd\·{{t. +sa|C+}!e-ޙ }`Ǥ1`[pء#4b[nArE7Qj\9H5C~aը=tji݈t:b[+G86QU_XsbM䨿w_bG/ߡl=}7PFDZƃ ˾xz_q{{f@yOuiwtTy %G/:= !ǼEGg-hHxpO`ץ=;r^rp*pCMhv{2 }7Wr{*hG8*:=top>_8bݦ(v1 c\H47.9c"+ F}W{$:<'GX Iy_ȅ1GEt~}7:>Q傾SnzĹ@@oȊ ~A} 1)1;JQ.$džc#ӱtt3t7Cp%hHꐭXLEV"ו GTbrQ%w>^SyoUwǘ$OV 1Bd1P0Y|c|x?x;U8NQ9gc&+ty΅|}7[t &X,cZw]M1pU{7 ܑӾ#1HG.ѱx"qF.t&VUwﲤW]$>*]Tk$MzV ]eCPp3 -8t؂>"_w?ɉW pD'f\U$7+7^I*XnN1.J_[t |-2c:'flxgsIAk3'y_ͼ}6t>~is;wg~78lnq|}\J枎`sKx9 x9rs;ƷƷ5o'=lq|gg\{_'s-ϙ7x>Gl.i|[OT}^[+k;KK䭧68686tF|M EWw7m9)_Io)e|qKwmf󅇯o|]w̧wύR>9r[G^))}xx|{wGi|Y)8; 6Yߩ9;+"tJzK3-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSꥇWwNYߩe}zT>ّR=|]wz̬zJzJzJcc;;E;oR{{{{{{{{{cSƚFyfQ\<` mlB7qYYYYYYY]Y]YdJB+(6$dd8 c` ^2Wj++:meJ[YVh2IQ.ɬ#I,#,)T$D`S d]fM6$Y,fc3˱blf-6 LX0`E2HƵ1@&)@0')@l')@ɨ'kd5Yw,zV]=5l+) YF#~d<l–,š’Ė5VeղjY_,ZVWm~}.\9prݰaUÒ ;箼[y5b>R꣤>B꣣>2꣢>"ꫡdat͝dAbAo{l=I'=Ճ~!"`K|oA=ߛwom"GA85Q=Qc/A~9~uϧ1~ 6C4yq;9dAx>#vxaTޘ~zhvwJ@'O6}*8>E#+O{\#N™ i۱Iݱ)߱oA8 Mw]jxBI'A28;!~ȑQ=GZY3vw~eGVߟlH$Kك)iqz8{Ew!@u|~Z¹[C/} aXB7G:{EKnÇx)t4c7b޻{$u? AoQ3E°qy P?ZGΦ.#ibxqy PD#kC c-=cwi oe~uكeބΎ޺@ʛO-CF2st @-I=qF68!%GIá10ZC߮(ᇊ7 c8PS:ڽaLW< UJvs;VTHGx=bFٿukOcT7zkc)c:޽bn ViT"f3c:`"_d@EnTf$9)RZ Z !)@jiz{3p?]jѷhDfFzZwc6{2w0Df(o`(Q<=6{2"y0`c1y1wܤ?n+ĈM|vU{4==oD&zc=YDA`DO"yPO #7)ADzoD$"b^PQ?=R{TmqSS}PWC/H8[^d,<&q)BfTȸBtnFظoF?xI16BE"))3V.)G$%GDG})W;淜BGd\7* #`d B#Oo('P=בt02 X/ʈ {Qu"嵘uןwˌzoW7dB}?X;S=8}]pom}[}f]{o{מ_{9_d]{~-keNA0 7ă,Ȍk;fI $Ș<=jQy')yOPߌ̓'ZXF(;1zpq zwػD42R{}w z{*܌؃DP":-C:ߩgs&SL~7wƌ+$J?,=^b f, #2/|75w[,좊Eؾ36͌K?8K1ݩS~XP-d_z7Fƻ/PR{w̤S wK7Ph#c"3*03e(~k ݯ_.w(~i]!%s.ax&Ttͷza_;y׃x2Tˁ5*xA<*b:G ^(n}"z^:fq6iE&3EgES$EuV":pe)7|f RqfF*HuO.$,{,}댾P*.v ?T]܉g=旝T^uOAƃ =h(3iRy=҃~3(*/NO)c>]bߥ׿CY㿪'==PCk1*fo)/ě *iI 4;OKEb'P[셊Q!:ZnfĈS_\'DHv7b׃b(̳P\[ r=-EEŻKbqT[ԒsN,Ċ'b~KESxV(-8ˍn}xx{k>9_b)oaOb߆q#8r;R? W=p=Yi'#5L>qw{G>qwhxE{5;G.q?q 'a.z8~iU~?{wO>ܢ~`︟8;=io{p>pP qW"毇?`y8G{G);c _qwxEhozG{GgyaȷO{8GE); ‡gw7>5;<\)_`E~|#S1_o)I<|K({|c>axm<F~|#S>opwWz|s[ʗ-[y<^h/pfG~|#SޕOpw3|W[{x|G[7||Ec1_`opw4]wߥ/~]wߥVoVjoV*oVޥy=WvL%68ۑv$v;iݎnGR#ۑ1)` 6w LwГ+&;M {0kL~7؂QcHVdp+ۊmE"yH(208@4=( # `(p0 2bDpD6"SFi#IڈmDFь0Qp㠧Ḱ,Qc0SE %Dz;EK#'둒z$d=XlG2#둊=` =-z3AOpM'z@=` k0P-rW̫E"jvȺj$]5r'[=ɣ?O.)ϓZ zd%'@=y(` IK/|SHJdX%UDvUwC5Fk 7,1lc0E}a}n ݊s#F}q/*ŤQ_,D}sK^Rg\]? xpMsj{>Z?,<ܜU%]pΪbprVc21׃§?&^;W@ }rQp'@5o$%i\q+s})"1C (΅Ёѣ zْ~EuM-+i@5 pY ޗy=Oj 9m@IKsۛ ρ7O0PLbö*/auz 7[2۹f/o/b+~FƬm#Mךa z^cNkWC[ n,^^o.67ƻ nt=b p"a&<$]xb랸E?j7L7na;2\* ꏁJsDEdsc@BPDFD`:gNxpN@t ׁz^G`(B;MxFhxN99 DC"Yl ^AG;IFwDv"y~<{Imb4xG?M?#I1␶~ER#qv Njx|0"b ،DE#.g1<ș ɴd[E2+(bÞ\d))9ۓL|3>qޜeUo'"bYBF2be![L|`(M}};XLM5dkcFN.!̪P2A9KᏌLE22A8)d,}0 3"`Fӡh/Q.%S#zL^"oSF.cҐ$3R2,u^/3R =fIEd%*і*#3>x|?axd%}?Q7̸ஞC/ї3]ȔS`ba4e>ߙ~y0cV?|_b>ߝ2&.M?, 3]L~pq ; w3>4HCFhVEfwgFiVfލA efU<`Fݒ.t!wca̸[2rcO63~. DlȌbB2vHPị,Rl#GЃ`(812f$3U@@U=P"jY-8?M/>)i uH8}흂/\( <d_3Wyf_O$8 ئP1pplG}l'}l=L:u9u9a gjTd\3.XQq=~P'xĦz'&P\$9H_RWT\1U`FStq#(̕MEP7M1Og>c]|7(̏NV)NŮ} !P]z˩wbS[LrT] ~kC4 &~}T"AL>C~(r{ʼnN~*근P\H |b(3"Pi*&/b@'"Nb1EQABXPTT $7(R_4 {x}<(b_ #76<;<;4D3` {~bEJãx\BpS0c]AK⡨Ο':< Ex#HLż]èx' m>9_b)oaOb߆ps89䃆z7zN {'+,>pw{'>pw{wni?aF{3p~RG) OWN=~`4\~pN]9; hv8;.|| ßqCџqw{Г" J7 s-(k0P-rW̫E"jvȺj$]5r.g`@<>z#僞H)XzO ԓ`9VDU"*_HJdW9|1tXcaqÆ9F Stڧ/֗ +VЭq;7nnR_LH}/ŢH7[M5+Кo}EAn)q1I\݈TtA~2Rak.<{x}umY$ifhD+%2Ktx5Qȥ,ȅ%P|M&ɗSWW[^7FA&C7%&TҐ%KTR4 ؞iNlMÝK r2iLp}0\&)r/Kɏ ?.\M< > > > {;2aTgjAq^><)Z%-&ib R!B"2#Y""& &IMlD {རm)$u>kzIeHeUH[8Kn4 7N:n3s&@O L*>q{C@?+{O2PMi l2Pj:Ou3ܹa;mK gLT'qD$7v=ӄciυ :z^?^DsAT1iqU=8E,LQʈQ s拈aI ZnI||q9A|lQ| PL"ByPȈQFcjǴe)8`n T(z#YFT xGD-SQw?6.#vE/+(F {)"xBİ2"E/q,침dbˢ #Eb=A g?FT?3#%?.;) q.̈qˆrB"8#XfDHˈwQ FTwDƖ’O*/T)b_ieIVCE=gQPgE92 {&+cB 3^PCz3#:3&*n02.jRUDddT)zQRUU!U#U%5KaV䭖?vз*73^|̈$ʘ9=AjzbM% WF;y#*G 3*w5(ɬ*63jBKUKt'xQC3U5SQΌ|1HO)ftU?f|"]?`XaHeDX8|*1#dE`[?̈;E|Vu~l{B2*0ctwT#4GF`Qf DF|2 {+#3Kh}1#h0c|_+ˌ˒eFfQOoH{^FgIv1㳨+3Bz2c4(3JG)_F*hXce}2^32b󤂏yQem"H$~Y Wf(W_dSJuΟǪܨhdHCFsMϠ/#:7fLQ 2sefdiC-2OE#Q}.+sBP8t\)#=g'ˬ vlC^{VifgVP|6E?z,3>G236HeFVaf'w*T $ĕ my88/(ԡHP)ҢxHz?8Abxa1)7+Hp*:ζKyO{}2:*H b%IMER$)IR S1ॢ&I!KMIEDgCs=Cx E7AH1:cH;>I<́"Ըh:n(g/hPԸިYW:k>]qPu|Q%ge g55CE*+Iі* oI89ᤘ;>:?O/C~/ZR8ME=xTi.E\R!/ERʻ/EbR {U UA^ɭa zVNbL8V++ J}0zWNs)=.B1:)QT *Ⅵ+;8a0Klc8~'oyr86pzri=: s;\8\9p 8\''=Gizd.zd/~~ia~0?~a~Nʅ#a}.|}|~/ {pף_g=0? 0?O|0O|'>p &_tt8!_u8;]aav8;q7ܣ~pN 5;G hﴟqr~G1ܯ9{{iiF{sw/Nqqwܯ{\{< _{GpSw`8;p8-.oџp|Swho|xD{]wߥ/~]wߥ/~{%Ǟoɟ[G?a=R3- ><R=-+z={|KÑYpwߥ/~]wߥ/~|URÁo)_p|Ksw)G{|^S: NĂ3غݎnGb#ۑv$u;r)s XJ~+%?ɕMIzæv̚[0jJnE"SޜV$o3rD #둍Hzb=R1S F=igzR$ڃ@I&؃`N%` E"jyHZ]-ҮYWF%٢ H=\A~$|s4K0Px `$L@IZJX"*bȰJ$X%U/>k8,1nXb0ǨaNra݊s3nF͍R_TI}/EXT$C-\&hͷ~n^>-~~kXErg[ `e ݚD,`VK b)&΂RQ ̥܆TЮM!04C,pah3YCmGP7C)V(AĊ滃ah'{A};KP HR"HWTJY Y{V3aww:A꠰; aPTn*H&VR0 Ti*邤x{AJ aJbN22HRb Ti\*] LEob y\ɳRThYi*уbA tqB2j!NhjCH["Hz"H"8QC)c3-ҾmۉkIJ"H5ٲ< IR^ viRI!!1yRo3풐2^L_9z&\8>6O Bcp:O^Msؑn̰ *~H-.*cz۲w)҆'z&aX5d@0 S ,,2i;?-c[paPlO J="uY ac^﾿0JQN(pw8C^ I(~㽅}<#69_[Av\}9N*tbsC=;R Б"3 v4|8pשк%UL}V;~?d=@}iJTSϵw/֭2nd4VtbL3ͣi,4Fp> $4op_"_  y<Ӌ#@RUi 6UnT$$/{!2NZڶߤ,,"gʚ!` jpڹmVVcZU)U*m Hyr2UxfU'> UJvW5a_@ (I7}J S bÚ}&S˦ eX誐YSfd!`VeQi$]nPXNf)Edt@> ?"UywF6a{K}67 EZy i)E[Iܢ&H- ޳;?,8첺om־*V%sZTt&*zgͥI}ԡ8z0kA4\I:}*H^ EzK{~c:xEC$iёk^$C**0S KV^d7 ^fd&pMϏC }y6_u %)qTIT CPRkvj Jkq Bxw:mP~ .t^Aq`%Eh*$\l *26ll|ؔROjڙ,c7shIkf^DLB)eq9 Y%J*6HJKfݧg887!P)'gS=JeP6{MTJZfN.$nR?%CfcP7EV6V]G.))zWKSzu<3RCT/Upn|jʗ]Z@ʕ:yuzF|5'Omde]MhԣZW[.5z]q)VV\.$uJwi՛L{UwTwXeVNgvSUb6UOb AD@!XjiC~&,~jAD _gb~pY2l6z~pT&9-^dM`QlK- [J7ŷJjÇs {?8l Pl*{ xrCC5NIx 5^`_ͩxck5>Y4Y"XܬlLnVD V7+[89V40W`'u6X23#223P3nFBMJ&`񳒹6d곾 {ܬ#bd0"ԧ`s`E]}R fw 96)'ԇ`I0ݬ`LnV.V7+ 0Yiɟ̖(`u~f Yy@W8JW0P.8Rp z? :? ? `7\0)m. 1]ڂ Nzzz~5(u,@`JS0P7P`'Gٟl`szz߂9(5L@7~? T^@ :~ `[n߂l'6j-XQo| `P=W0P-8Ro T *E"JAȠRdP)2T JMtZz?舍5~k-8fނ(k-ؼFނ(k` zx e W`PֿAg{ş<<݂k#6ֶAgi_B:v? ~(3P֫[00(- ʺt VOlG`:t?X%$Ol=7s? @Ygn  ʺr e=~VAYCn``P֌AGlX#6܂(k-؂R n p `z`P}[00(MۂAYR?{bc-'6pAGl@@6:m? l? T ʚl e-eڂ-=:bc}tƺj? zj `Q[~ڂAY7m@``PF0`7zh NOlg vOl{@w@s@o``P50E"JAȠRdP)2TؤMjؤMؤM*ؤ T@I@E6I 7N;iqځ7@v`M;&u&ʞɨ'!cېc1m"u6,zauoj 6Q I+p( i?Zf G~M4=cQk:_uVy׆QkCި!gmh24 F B#h,h.׆3QkC5QD-n ֆaQkk͊[@yz`<=Nt1e P -BFȱS9b-)v(ޢnZ 7-pM ̦bSjL4=0U#r4T\Bd xb1-̀(OvP ,S)”`J 0%ң8ChS bM%r40N1aVg 9#ls"[d%rHU"SD%HSXJܤ @ m/,93?|rV|>;L~OZ{\uZ׍";'ك8;;l'_I;OK?jHz>uR~8onñ:xa;ܗ5󦣜Gy'{<2/Ⱦz췟Y%z_ X) 'Y5߽>L(>ĽL>ŸXg?_mco}[/N^Br'3n;;D;CW-ʸO}!mg,̾B6?:oLt ӐPl]dmL~O9`owqd_]^dWI'N_FzƬ> ܟDxk]c' ?yd|?tzs'3ƭ ~gl5Vnݦ`k*?%r ?~ן?w o qFb~dmwwIgW0@;CW?f'PA'xޤD? 7xTO^Gc?Qu:b~$2w\Ў?nv}雄.Λu<>k3O+wSy}Sv? 'I[9nܿ9bSv;‘!Kk,}Ϭ~ggb7--m^YI ?+ j7?gmϚK57O>*dS+,z.&^(Tn,WzL~ϐg9_$~>'rdN,7-~8yX .8 7^mh<h8Um7Q{|?J(:Ϸ_篎q:s=<~5RW%8tYշF!MT:Dz{9?t7g้ ƿ;^AQxլ^舀 y$n^o0Q8~o{%x / VOW/zz;*E} KWzg%.os}#?]/xу s~2o'߁~ob*ƛ:bru7}בH8t/:!gXOG_9%p>rx&0{1^GŃ+~/x|6e~9ٰ; t]O#ω=|Rtzj!91^'0x 񖭷Ó߈xg=x8ݺ^ϭ6*K#a?=tGyBpux ~J[; xW|xb+/XC ~.E1_ޏzqJQw|L\OwE߳?[W~yoZOI΅N\ys~wJŇkk˟*lX_xo¾A|$u#_'p|im'1zߘTs}W>ssobs|·oqq:>_r}0_>WkuߏaXOEW`^-♝=Q<^8p%_^'.x-%w)<dzgvWG`7`1w]|8빆~o:q%w-ߟx~fn~*q! ?΃փxl=Lԥrj<ޏr&^,uni |]% ~ p |f_;xI;~Y;zA.|1x_'14\p~g~sxvy?rf'x~ ֽ:TE|b/ Ɨ_;^7׸^sx,uONsx/fmgtfּ?fˍ Z <8ο?D]A+w<Ճv> nJ?]e ;c+/c~2.čx_yy5ڋR| 4%O6Qlhya5S]LxJ_Ws{JnLLq}}K;0cgՃ/M:( {Z_ĭ=x0+x []g7ghƤ^6c "n<>l nװo_{vs{Se(ÞԽߋ8{]w?z~ {\c|ONN1x?kxWx|60^V횆3qS?_6Mލxdh}o Bx%n]/}Cm:5k=H鏆.Kؿ-̏.S|{w/m dL\aZ?MwX-ɿ]3/c0K-yU߆}hx6/ Ϙŏ›;oN&uv /}k|5T |_:kϵ<{Uhڐxx'z&K|u0\OSrm_)Eqp:7ٍi[۝!ܗ{gm˿'>\叞/=190s<^TZ/FB|Y|/#m?zylxx'ލ_BGxo$zzʿyVAo=b'&?8&Ɠ?[S&<^ic`{;\lv0v|x}lnwr,?6~ #x^$93axD˼3k~^Em;gXY0w G1;1sV{v}N<p_NDgOs60$mW%lT^k7q^c򗃯6wm-'Gu7F|`-GB;DmӟttIm^~]r,=w։os+;c<=ߝ1'`A|^<$boo1~ z{UwاMx6ʯi:gu}&œvxG4.'co'1ޢCȉ-v kb<}6n{|]:Kr/{9%9Ͻ0i![ͿO_~)a< :9x ||*\,,v'w^ċyq]fX >/3za*yosO0|)_[!f>sG~wG|Չ9^]/ܿ\+1;o770Mzą_ΐ}Rzj/g~.=mx8_O?t1EaL7s~<3ω_|ۅxoG!.仕N|. ʿ1P1^Ug'|`gx`'Wg?_$;>3ggyqE~v-(˻s<ޔ?+ϛ|>;ߗ0ƣ`ߗΟ|f.\g&x 3ߺ$; 2ϒ,w b8~u/YU?^/'_t ˱>Jļ&$L4c1/ǫGWuӿta<;0oT/uo+|xGLq.](q?*n=_{U3O 4lÝ|Os%ĕIܑ/_/{OAwly=K[q~MG7?Ϸ&} lEܩlGW|ĎjNxgy'Ooěz3"oO߻ooIh^Svq~pc=׮0G!9ޟl:>xۃG{^53|.̏} u=߅ׂ?:z׷5זgO9c|J9?<ߖ4?ܿ?.̷z` 0_3"a[y"gL9VoJ+ߦ0>m&9Ck^ޟ&9e+ZW^t֝.% Kgҙ'tӗ.=IgN]zGN݄tʋ.~eҕgdtKI1]<}a5.=Kwu>$>ӥCw#]tҥ9Hn:wxҥK@]N{4]@tK_9g 7]zN5.kPW\o x.$^.}OꢨK:]t^^_/]vV:v!tCda!t-ΩK'tItC:rw.}7ħKCoΎ.}SW#]:Kҥ+nn:ХoL.#t 71]Z/ХQtLTg`[r LgN?ҥ' tKI:KN!|V^!C>7u.]:Yҥ}tjK7{U7u4ԥOϧK.qFӥovKg^t:4]zL]\ON}Wtԥ? ]69tkAnuK_+uΜQ]ey{Խ.'ҕa:uפCo/緤ұˏ-o/ӝ3 Ott3Q.tkpR.ҷ(PtY.yҥ8~$Bӥo͇tK߅럺t~ytCUׄtřKWҹwtK#]:Kyw5t~z=.]:;}.ߠ{]҅O5җ.=8xK_ԥo_6s~CnunKW\tMtMK7ҥ=_S.8z1⨛Nk]yqԥNtSf~SnuwK.]uK߅[)t8.ު.]ҷtߖ:*y&]zt{^ҥKtLθқouJ>ÕrKW\t.h>STҹ.]yަKZt.=_I5җtԥwKG:/OtR6tUc+ooKWPBt.]yҥ.}uI&ߡ.}7oKW]0ҳtԥ鐩Kgқttҭ3ϮH~ƛۚ.]ucKO]W+N]y{C1;tM7ܻtqU>j)]:.]:u.~ӥgK=.yK/ɼEJХ9tSyԉO9yHK7>ucһHw:u%ҹqt겋3z!qҋtċ:K_[iҗS7uylC8꾛`_KW^t鬻t3tyAԥvbtӑQ<wHgN>uCyKtUy{Sxԙ+9t=tey>QTwEEHtХoKK,]zQ݂)w 9g:t˷t笓Z.}g<@a^Μ:|>)J=Kҝ3o:buau>7[uWީKґP>t&?CY'T'|M.vs<;uhONҡO 3oJgԥo).]uѤKtMI>TW*Х'~oK7tPn7ҷӥ t9ءK[wk.]Wt ]7K߅қ]KWҕ?dtߦK7zԑf76L߻t۫:K~Rt).]/寓.}Jo)KK%K>Kg wtL^ȿKgt]:{,]z%?~tꞳQg]ҵoOVKg^On]:w9^_KgN4ϧ.])ҙetGҷzR٧tғΟx| Ui8v>u߼]uMtԥ7Y'1]zn]m֩s'o\u\71[ҭs<ԥt:9?+UX{w']zNޢG]'uƧ.Dӥu.} ēv_w{]jԥ~{ХS'gt_.]u~K.=}.tnwҭt6uS#]zK]+KWwt:t|_/̥~LiK7]:uC.I+XJ>>H}.=KgNҡ_4H>/~-?:.}}.]ͧK~FCn|Vt}.]uSKH:ҥ҇tХ/#K:ҥI^txҥ3t<]:uK|܇ƫB]zes5Wԥo[o["Kn?dWTt'tsS+t̷7]t{tҩӟOuQ5өtC3?KJ&]:ҫtMyK:ҥ7› ]:IL:.ҥO>w~:tG>t?.JmuHy}J,]:LKߺS,m}8KuW0Ks .}ק3g_-y|ީKg0ӥ gҙL{ /ML]ҷGn]։|p؋%]:K|Ko-܇tTtg))]:F.HgM]:K_IߔMx 3|^Rxt=*9˩[ҹ ufKW]'tJӝ`uH~Jg>` @]H.{']{z2]zNԽWb9.gK^eoK߉t.<ҥoQt݈6]>Q|*ҥ/1]:KO {/]:aL>iϨK_(]bCmR.;ӥcJxѥ}t;tuqҥKE]ҥ˯ltE.qW3]:s/]FKr_Wg^tUԥoUt'.}tYw\J/[u50ץNQ~/J}SǩsG?uG#]kIo>tҩӯ3[L.tM͋ԑS {&,kv' $h }qg/XKjyƏxjꨫ9o/\ySϼt~GԥӤ3^#\zөӷu ȫ:uš>;a>ͥ3Ap~Zɓ:u3ǑS'..} ]~NIk/:}{M>k:sitWuIk˵[wuq}\z#}fΜ<gt]μ=K:s['/霁 Kw{{ؖk!_dLcM]#:uɋuvμF'/:qo}ҝ{`^:ut9[ץ@O^zsm>6:}vI3|q˼nv~Х\uwKQ'ѥ?/]z5=qz;o9}lqҥgԑ7ĕ߬kaY7hӝ߱St]t,'3Ƀqץ;'?}{pKwKKo*]st/u.w}x~Нqt]}ҫn 9|\:yپ_ɟ?}{9u;.WtG>5T6ySʼtMI_懷K}^tq|>@ޟ>@l7N}ҥ?ouxҙ?.9t\@楧cƥS7Nﰺt\ҥߺme`f~_ӭyKWny5#edv9gJ߿.}ҧ..}ǥD2]:wYg^:n~8o0Gto_#>x\S`N.}x3ǛO%o'.1]z;YrUN|s[n/.GKwtW\'_| 5/ݼZ\e^2>E˥C#שYp2p*/5}qwCӐ?.=y-&kť׎K7̼t^t-Mӧh_.}Kwdt%/C}3`dL}5uMo?Yu93Ϝtq默w}onҝx k?.m\zMK|!]Mqk\zp3B7.]KoNѥ(uO9s|~ץoKwn.=>>.eӥ }u<ޟ.}̯^߭s_:uwv85sp٧¡7ǭO6|>['O&]9>~\/MN҇-\j͏ť\sGpӥ{;7O<ϼtO^zq}}\9y.}뮫mq5.}&Ͻ&&>t ;(]z;۩ku¹tsRt5y07Ig~E}9Iy:b\t}d^zK_҇]_]tof^ǥyKpYMoD >󙗞>>Μ;.ܛ{^{W:sWqQ>n]zh/>K_K~]K:s&/h^:3/=3.ytҗnZƅWdK1tts)n&.=.9X楯K.M=97Kn}ǥ7ӥ߮ |o\:Kܞ}߼t].ݼҿy~u5XS?]z /Z^yu u/}F]]=/y{vo8}ySo1ǔ|bv~ϑҥ;K 7Kw.}{WmV湻oӺf~w{k_p>;o楓?ޭw4Kew76g~qOKy-:y<3Oo^>t.=_iЙr/֭.tr\7kdnNԕ{|7~<ǥc'߼}S\틋.ݹt3;8y}ջ_.}95K\̼f <\ܺǥs{wӡt!ޞqK:Kǡ|Kw.ypۼW\zӥK:a n}I.4LO=K}q|ХK.: \?ҧ?]M]wҧN޸>ԥopz͚{<ҧ8뽷/FItӗqSW2/ۛǗ7/bM<9t_|Х{}\0ƥo$.=uХ/0qQѥo_]wKwq}|\zO\OZ~wRvNjӡqn/o|bdϹtĸtҥuC7K>K.}o.}Y/ӥKFsEp'=\v.˥ʭG߬GO:3/ut:o\?v:792Gg>:sM0uU׾b}.ov:Κ~9ǥ;']ctut O^zsԡ+FuKO;ĥ/{K_:j4.2Wq9GqwQg^zsM^֡s|8y\PNᕗ^?.gEwU_֥z9y\yt:u:09߿\zݿ.둗Kt\z8]X}̇יa{{ǥ3,]z泇KOxҗ~5Ν|#x?yE7K߻9N=t{ծC~tG/5/K7?ؼnҝ۠K/'Kwn.}毓7_ӡ\bK鮣W^:spɼtTqIt#۱K_t95Νm^K_:'>/׽~+tKױ=҇y1Xߞ~^tCNz}wy<[7\̺rw58״}ާ.}yj?xq黙w.=?tץoOKZ]߿<>xz~SqulM-_qӸw{tϗ'KoѥKϼuׯ<ҷNK'Ow;KKOKw.}˗.=,qnݹHK8y]yKzD>}ǜ/\=ҋyKgn˥Kg|oַǥ/ޛn>>Konz JK_u\K~G>.t?qI.֧ p\z8I>cϼ[N_^k_Х"K7/K䯧Kau/'/֭S'wÝե7_?Gz}W曻&%y gC.:uf>^v\z8] ХWKKǵS'rn.:7tFa>z{~8]}Eź|}å/Gq|h\y"ҥ4q^Kǝg7p8fҕ֥30]ҝpd^sp2o!/=C.<\g^zѕ?uitn؝vx|+uiҥSKn~.ҥK?󧷟QG=y8y25uw9;~ӥ=WoK=:t|柳KN_at߿]\7 |]68f ]zxuGQޞ=m"ǥ+,ɻY^'KO]gC1O̼XX/USԥxu]Yv~ߥ;mwzv\vӥ7uo?{uM[3OuG]\ut[O<_]qǗK>^tqK'Y׸t;GANW t<}q3OFS֥-u8p9tҥ<8ϹtOw3/}]MI\v΄.}z,yқs ¥ǟ.9bt~G~79uԥ3Nus|8jҙۻN[v\Sǥo~Y7H>u$q~~t:ҭҽ=uK׼{>]s856_e'׻w\OqK.ݼW^z|^t֧}y7[^kz/oקy~qgҫ~]_ߙXt/,t8ӥNzu=.輫4_E>.=k=uKnwiGKO on>/ uӱ_ݤko/nKo:spݷ}Cn>|g#x}q\z{þ8Kok/'/=4KOt烦KWnޚ.]gf^xO۾BGIwŹ&e#o:+wv}q:CNt;}tYg^:|/N^zסvGv9.׎KK7Pny>ߺKuC>}}tys?t^u|Mk\O7]yGzgz.}K:Kw..}f>}m.}ۧg^}׺t;>p:tӥ;YҝKKq]%Wt KwnnK7t8w9皾B}{u9uSn_\?yrm]ԏ3_v\n߷.=Ku̅m+Cd9N#\z3NK>۾[\{{ͫkΜzvm[~ե2?gbs;cX˹SwN_\p|c ]zw{ӹK եe^za.3/ҙ?]N^mt5\PK/s.y噗^ϺG=;os/\za/ӥu.ﲾM>=^]yE.=KEѥsMI>.8n>'8vw}\I˥~nSg;˼tq;_a횾tqUn[^t'һs}qk]ze}^:k]n3wfb=>}):1]z揗o~ӱCtyt5ۅS/ ]st˼</dnG\Nvu]թwϼCo/ג.=̻Kqٙ.=.6/_pKoo\Qn.z.zo?G{ @|K߹t7}ȇkq1/K~å2/}qҫn=ӥoN8^ug+:u`}o2Y[ԗ17/}Wu[/swN*;Ա;ҽ:ySy~?.}[:qh^:90ҋ/.ݹ&|ttKw~^ ^]#/8ҧsZӥg6r;S>hW\߇yz8O|oO?..}/'~Xyåҥ;H>tvM}^/䩯7;_vՏK4].~{y89 'K'G)]:sFӥ:pxpUqqߗNm#g_]o.}K_N9#96qougĥ zoSnoQގ3_{ǥKOw?ronnHӥ3`x~å|>8i̙ 'Kߗgsw}ӥ7qu}.|v]&\oqNv]9[ToҽNys;sRzv#ץKoǭלt^x}ty-s܎#ϼuo.:+[NNt2ҷ.<3z~~t듺tPwժCXGuwkԏK֣u쏹35sRs+VY?onqӥo|SKg6g~̿ooo8ru3׹֙ԣǥ3G%]v{oS/gd͚9z9G^.9㙗>uޗ]w^_ן oq陏ޜK/:]G~~9қGtz9_tf^z=y0ԩow홧N=w\ǥ[<)+ޟ~kå[q8uԥ{at~tC_NSzo|~qs.ץ\қ'M^:ﯼt/:ܳxҙC.}[ַN= /XG^t{+/]ǥ;A^twҋm.}ڇ}{{=Ν|Oo//{NݼΚz4n}C]o{Kżvҋ.y>w]]^7K6߻د}]:9/w5+?tͫ7/}/Ugz~ǹһyMG[OnNŚ̼Kߗ[o￿Z{^±+~gǿ?_~FO?_oӹ3+νT޸{?O:{сù$νtqEgs/E{1?^\Թpԟ}8>tt%u8r}{5^t~8({i܋N^>{5^2>{1^2={Y܋}8n>{OS^u8Zt܋^2=xq%]x8Bνpys/_Gs/f{Oc|A{>pEs?^8 ¹e|8s/:a{ZS^s ¹ν>q:nW(s¹b܋^.νù8pùt܋. 婿ӹX8O8*{1^{Ĺ&νs8qgcC==vEs/{i ^n]8Z}ksU7ν?sNs9`- s/9㛺.s{|ys/Ecͼ{,pcw_8R}>{?νǪs/|8Ź }:Ĺa^xspEs/8۟9^#{W}?ġ;ùޏ9>j:Bν^tesEgν8W^νTtŇs/oҹq^2>{q ν8^+O8(=??ps>νOs/~W( {}z^.y8sҹνs/Kes/tù8+o~{q νGs/ùby82w^ny8uɺy׵qu3ks/ue{濇s/X}: KGcw1{qν_s/7{w2{tKs/Sg4D^/?{YOpEs/9G!{?{Y\?Kqs^/ uw\㞣 v8s/O܋spy}s/ K^nv8ù܋s.p>{=pKw@8+>s/yXkҹtb.ν4pùKTto{9^o?Gopù%{>wqyĹ nY^rNB8|@y=¹^s kn3-p~zq}νp?t^ܽҹK=n:)pż":N^ }Gy3uW~vҷs/}\dXq^܋^O8ùys/܋8r5t%n}Q8bνpEs/m{桇s/]K~y;{1^[ŕg<8wϧ:w܋n^¹p5]tuνq~?ѹas=pz܋}hG sӭ_qy}۹3:3sXvEs~s/e܋8܋}8yXWs/7}8wt?s^ν7s/{}܋^^u8¹ys/s/ˠ^7ss/ҹW8bνw=??_Szk{aֹܽ܋s)p瞟o8rg'멋_ԃuԏqO︶ziw-{t^vH8g=qe|{o^YGߣ?uy~¹qs/楅s>νm~!νPҹ[ӹ[_ҹ'ν7{Rtzyשs|w~;b6zνԓ^u}%y{ӹXtչB8b=z^ܫ^okp5}8Z=e}{ީG/Cty~ǹ[?ѹs/9!{¹7{O:<܋8w ?Kx&źеS?n'<{νsq|ַoouu޿8w8ɚwKǢ;^sL{n=߭oνs/p[l]{q;G0 kԭo/'ks/ùO=B^_bs}cY3?ν^s/܋4x&ko47]pǛy~]܋spSm:j=:{uq5ùf~x8ZuqXS&;{8q目Ա[/'uĹ νoOWs/^йνpy=sD:ƹ[~gqsz=pEs7{qν8g8w?йsνsGnν8G ^^Ĺs8sGc~?ùνpqY¹=78s?wYS'8pY/ sLֳ?ܟϫx~溽ܟIIgMԩzof{If{8œ~ yh:Ĺiy8=܋sFpK#~Yw_/:H{ѝϬ6];{Q_8yu{\O㖋Sn= 9:܋s|pxqŹf8w/չrueù=ν8^y-ޞm]{zs\_Kѥs/ù'{~Kg>{:).ݿ{љ?ѹ[չs~s@^S-Y gs߿^+Oq=_ܭ܋U^tH8wueSv܋82ͳxEN_֣yWo{{/#ss/sS^3s/頋S'ѱ[/׽}qjz:+Sh{lփƚ1nzs#pOt{i6Wnos6u~[o9xqt5t@n?νs׳ܭs/w^yA:ZN^Թ8׹[Թ[ҹtŹ.8>{g;m/qWG^/[YSǝ?N4{q :}>6ܟ:;n?9Sù|s-*S:wԹ[׹^2>{)̙qԏzyѽ܀pGw߱;1{--;7>ùE:W $:N<{un:[ukpcݬO8veg K=qǹ%{|pG?t=]9]z in.K׭+Ko\^pNҳ-]Iҥ7W 6yۿut[u=(<\ץK..|^׽O>tꑿNљ_6u ۭT֫}\:t>~]L^:TKg.txkЭW56kuut].}y\:Aҧ{X' GK!]:N ]z$ǥw3`ZX>sOOmqw\zүK/qI:u8?Enޮ.I]:ӥXu|bҝӷwpK>/PgMn^.}q͓R>t>@ěuSq/uCyХ֕S'i: |Dz1_VsI;ǥ]nu:_\5>gdztwMzwoOݥfKu۾n ¥Rҟ:skȷƥ]rJ.gjuN\.Rgץע3OqN6?{=.=\q|ҝKY:utӥҫ=#ۥ9>CL.]רK9:.]K{ץKwKǥ_?suܾK䯧~7/kY>6\6.N]w;}vUg^7\CqϜq^/'\zwKo:uI,ɇ1ܺf=WI.{n`\։]#]z#_>K/:}.qrҥ_a]#>u]z_橧KǍ/ҝK<\zo_m}?wܛ­k߯GK[_<^]z¥jx)\]ۺtպA\3)0>uQym[q̻ť?SLk8x?Kҟgo~?Oåֹʟ:ĭӾnp7+ܩpcy=.yn7b=gK:nөt;kl_qm_`=c_:>ҥ_>m_ҥm`ә?:u}t].ϯt1uM%ӱե3o=]z:˾=e[qҫ}` ~\;.#Gn|Kե{w\G߃.ݼf]q_ѵ,{ vdzrM]1;}dt.6]kҏ֥Ky-.ΝqE>zy\zוGx֥/qťguKǥ_wBA__/}}g˥{E_~Nr;K Ouo8zo?z; nY_85˼i8tq!d]eԑO]zqy=v8pK/m=?묩ӥIܡKONӕ֋5[g~ץڙ7KOW'Itmst.|Pqz}_0=Ngs=ӥKv\֭no߯.ݾpC^t#n.>?]}Ϻt~åue[/׏z/׋yҟY3w]:r#>=.#ZXѱWngԥ~\ҕzN.ݹǥ u۷KE]ϧ.}o.=K:~>¥tҧWN>E\6}'kG7f9ַǥIԥK[ԥK=spӥyK/扆K{N}ҕp55saV^uz~å v}ux/zɸz8_қ8K7Cs_޿KNsӥwe_arS?Mߛ8ۧ/ҋNG^^.=~pǥp9kCzD>N^yno7KM3~8'ұO~iM}3ֱS.:}~U^tӥ;(\KO}ŭ:w_:vǷҿ3ohp>ǑJt΅Х;C^>@8pӺtҫҹv.v=_t:M]NP ^N_\יS-DZK/ .8/]yֺt.ꚯO~ߏKrgN.9tu~ӥؙ ǥ[ť:]}:}q1Dwlo~wξھz.=K.΅]䯗b7}p鏋ףR/yt3'}o]sWpS~^=<䯗cַo9Ofo/.G.=^ϙ^[~K/ۼޏK?uҝ3K|Kw˥wܞ3]:.]GKw..7]ҧصsMIGqu#4>tUG^r]ϟ.tҝ;KtX.}q[K'KF]tq>'=~#t??u^^uztCw};vz{Og}Jt?3/~{<3yѥַǥ\ǥKwq\7\zޢK'2]sp鵞#_W\u_/˼tzC^g=@z^^3Lz):+uqߗ ׳Kd焑~s7<\SХw]|w+?]zә_~RqUwNX|ҥ;wJ꜃җz>ul58r?oE=/u\o<\z:ts998m};uz`KwKޞvtүKOKtYuGK?:..뚛[:tw9:sMwm k<їOz1^y]`pXt:\z8]ytK߮e:ugǥ׫K7Uuue̋:rGĥߺνֹ͓W.=z;.}ȫg_|5[~owOΙz}\4=]ұ_ַ?.=LJ]\]:Wƥ'}ynǥ7ågN]t9C\t:5sR4ߏ_.=>>N9~eNǥ36]z|~O~>.Puҟz]ݒsMkW]ӿ=t3krKqչ~'=?_׷n?{qzſ]zK_[q8fr;7sʦ^uIMwN}.:]z[sNbݨwzZK/WǵKON=:>/Sum͚~;7қ{ۅַ۟sr}ҥuM{ƥO.KqYO^su[wNvON>w|եSM>?O3OG]; smӥg^e?xusfu kb͜x=_.ݹ33ǥ>n}}G/ԭo]ϘK޺r<}Sz<'~^iΩv:[Κ1n>\tΕХ;7Eߥs3Lg^t^.}}Q~|ҙjz~Хs{wqY:trpܮcǥ]:oooe=:*//++s.}̛saSN.˥nq\z?~?ңҙ.KYSwYnoo8utc$M5/]Kykխҷq.|\]}tKu]:ם;I^t~<.낋}{|]V^NCpttҥ/y\zסGst߸+KyMgN^r'[~[IIn.tyE ۾3{>^z~w{Ƈu3^?^.}{{W]S'.=Эk!܂S}e~Щ׼tһK/nǑy5}vμӥO^:yE7W]NPuut.mW~wX_:Kg~Qgh4#~W37mIyv8tSO|םNuIΜF{߭^uߋ.NWKۥ9K/p|zGw~O>.>Nzsݓ.]Gvқ8{söo/=k ɿ&/ҩ҇N}n.Ky|ƺ1ٹ=?.}3Bu5on/|q\:䕗m7]z9>/$߼Ч.}ݲP<8_+K|}{5oKI]z:[g>^}K7_YJ#]zig^:y|>up|qo9nuɚ:DթSHn^˥?c_KzON.=Cƥgat来Ko\o/}~?y7׷gw̙]y pɗuIp홗ΜСWk[q37wK3]z>sq~1/ݹ WoZk=#_t2/tuk׼t~̼܂K=Wtp)[K<җn^|?q.y/ҹ~NnH_ԭs|p4]^پX[wN N8]q:]]yǥG.=u鮛y5}8r\7iw?Kp_tJ4w[3F5/ptywq链2c#nH~.ݼA]4_>ypy.}\_Y[g}Ⱥt~uWK!3/} >o]`m>M8y-8ut_5νk^vtc>xҋn濐#oμK|twҝ_q=Cw׽?}t^]:֥͗7e^:e^Vc"/=k!ݮ.},;stgqco׸Kӥ_g<r)']2~fCL/r_ht.}^./]:g^6^tuyMK?q[n:x^q8yۯ .=缥Kו~ury}Kߺsi{ַk\z8 ]z晓1/.p5.w:եgKt~ԩ/9}\N}{_]+>Sgy{ܟ0/ݾ_]yҟ9V~K7]nbMG^싋_ҋyq/gKo3ގG~kxGUe>ץg{t3/rM};O]}:N~:k[qב=>n]3ɓ!N~9un~tԏ2/.}~.>W^zޞ;v/s2/==N]9^q|wks0]ogz~Kw.C};]zg.ݾt~uw)>zoq~n}kss }y-?RX35>N^֩~Nkt᲋[w9KOKM^trqKǝp3ґ_~ȼthҧKҭҫϼ/~˫;]z:˼tJ^30ۋϗNz)..=/^Y]z:q\zKGYC^OǥKױNޑ.åe]zQץ_'[gn˥?.=U..뜯t: EcttuӥW»G7.}s|ݼǷuu6.6v)vt[]u3YR]sK>G^ᅮ^^XK=>]^;ҟҟVӥ7b9t=IZҝ#a^Kwn.}:rq楏Kt9_ƥ߫K/\O8/Cky8^]z7(қ;wu֙Jx[ⓗǥg~:}](]z oԏLϼt̬ԯqyw^s}:X_w\n}üǗn3wj^Μmt:trK7]:d/uI=׬o&<\Kw..K]st#O^zvS^z~\zSG\z=]W]Stct8uѺrWtԷz4<̧o\ [g^]s}QGKO׏KϹ{KwNy|uuԷ.=97~q%o~].2<]:4ޏ[i='N^zK.~_:r윻:Y[v;gܿyÄK?ytoKe^:ϺE:tn'W,tqk5rk=/ԷqYm6tx\}o\3o7S߾?._.}ʩoxo:s\.r@!mz?.әo/楛.}O /7ҥg>ґw:/mogNOu5Sp;ץO+7,.2~eNYaMtu;8sWu/:y.=J߿.2ϻx|KOg.9uNZ#O/]zå}ҝKϥK.y~K핗ǥo_8sOn{^>uԷ7~.}/ӟwK.|L\z5Zμt7>:y2ҋ]n85=>oOg^^ҥSGI>̃ϼpaKKA?yg7u '\Cܺ+Ϋܺ3̼t捽\7/pʚy{FpռA\z-:N^x¥?/ss>^u}qvM_a Х7]5y{|?y~2/Wu>.#:m\Z >K}\yútK7f3/֩W;ynKݎ#\A.6q|Nͧ뼘y.蒋 [NB_=.hK.^&//|ǝ_G;/nv~_g|t[!6NμtK/t 8wu楓['UnU?>/җǏK'ᕗ^tռf^;y?\[K__.9^:}{¥?y?ud"måz;}ҥY߱k'楧pt]+/K/7μ\'vqם.yoқ.N~|Ͼ=9/UnߞٟW\z)]sjt8..yRqՇh^z|Kot=:&\zͦKǩS7ϼ+'|_Oӵ//KW+:t;N~':敗.ѥOqu>ޭ3g]ӭve}:u ҥy7KNy{k?oq8>\2=]:/u鷎G']zݯM]:߫ҥOKg^[3ӥ:y2_¥?ǥ{.=벺J_+/}|\ߺtϧ̟_cn²nWKӥ/d^kƥO0yS\S.}ko&ߜ<u.k]z7?N:]z=GN>7Kƺ9o&/=OKqҧW >1ҝӶuu7O\ǥK|K<K_:xGpԝ7#tMw~Q>.8!]]뽣/;?Qμr\7CǑtܸwy~>.O~܎Ktqw֓9Pq.3//n{K>tฟ μ+sdN:'/͗ѥ SʼKD!&KSn˥:#x:t].ݼm]zgbå_:vd׷~vnבo-oOL:sp̸t¥.=畦K7_'\zA¥|]O~x./Kx߇ӥK䥛q\zu\K.uw͓Yk>5.8]z1/|k[~t)fqǥi_\ӡy'/.G{EnMvsu[>ǥ۷j^V[åתső'KouרGS~㘫}Kw.Kt u楛l^}ҟߍ>OnqKgzvG\K/:tz֡}ׇ|7X0uҟ7_ݮs>.ݹǥg:}v鋋ӑ/y|yKK6ߜ{|q~֥K-Kҫzԝxk~rEΜK';\z^tx8\z+Og>.zEix^?.=q΅Wt]pҧG^un,.KwΒ.ڇ񞳤K ҧǥ;GGCvt.]gK7ڼt~IC]aBt?k.=:~ӥSg^S7=oՑyuCOqyqҥyKC+\zg͜TqEd^n\tνҥ{=`^z>=7k58{\K0\s3ߵg7\Gz6EwND^ǥ{.Wt7sV;k3ɗ_y}+.s}S.=*dbq;9[N~~C'~UҥKO^uq}Koǝu΅յ[g^:s?.fs祟_|?.}\;.oեߣCХ.tҭKwN.=3/[O=z}]z\Sii̭[=_.=ҥ4O^;^~~_~vљS:u{:uKtSN^pOҝKjy;D^\:s8+7kd8pzy3qCw^߿t 9hӥ/\S싻XS/#]s'tuҝru΅ձqM?ԩ}?)]N>yҝK8t~<]zjqo}(\q[o0/oTyoUnl#xg{ӡ׸.8sҝb^zKGNw}Ι:^.yw;Νm׭ҷzTvy{[qqfM87uҥ[ҥ\otu۹ǥ9>7]:dz2ƥtC\|{k搲K'O~o޽=8_Wq]:91ҝK;ǥOK7oX.~tN|t?t]ߧK.{3/9tҟb=K_Kwn.+/ݵǧC'KOw.KO>>ӥ7Gԏ֥StZ?돓#q~v./]k_Uӥq8m};:K:u']ɓq.}˛nztS;E/.vev\l^}ܹ:uqX3'_][/q|[:t..cdw;k,OnN8+gqI^åq鵘'y镹jywgNYgMyӕ㴗9߼-KOW]uUwNuq;sWsǑ}xKWȏ ҝsK#\z^ϦKSKw3@tz4緛+ƥom5wg~]NN+/=<ҧs uU>==sHn}s}_X3t粘>|+stӥߟr.z7.}lXK9bvsMvCnt˥OK'"]:ӥ{~ѥgsRo/~ >-}g^z|k=n} [￿ZJny~P:=?/׿B_D?WoQox}\哿>ҵsvQqW{W>ù?ٟ!o~9qo [ u|U&/~k¹?~KW/#N^>.~ǹ6ߺ8s?\=pX=NqLJb8Q3/=ׯ ~_st"_ku_G]DZ?өsmp\֙Tzù~q>¹?;:}}qM]#us=cb{p pߥ_q|a~uopNܟ׳I.7ù8ǹߥZ_Aze>9>^ù??{]_7x.w>X+ָ{9'bpěÉ^v r]Ź8n@B D B npZ+볿_lӵrppers q]o羯g{g Tڻq}?/hS|_Kr\yG8F׽!ꣃ^q?y{eY};}N=spxh|W>wc^_R8u?sߏWr{,ߩNrg}/?_֗}wz?K~Qks/֛^ql_{zڏν8/ν:ν8=_8B={^/pes/sùpgbP,^/s?w8}n羯ׅvkx_zby9>\b{x?r{<^?ʧӭ=ܫs]b<o;s~ƧIx^1νq`kn1>o:ѯI8=^t}?~{{ H'b|n0[l̷scgsUgs/KߣFZ|__\¹p|;\-΃/{ܯ?~pe9tEù?t\l$}9ov~hq\ xB^.5hG(ν7sY #~=yWN}o<=|hDJv};z·G/xL~W}}8{lp5Gjf$`nS}}g/LxM;~ùg'/۹˱h.'7N}Y;>l{s7sǏw慆s+y8>#7~soB:åzd$ 쿧S7}c&~Ys=ҹWqs9]/ 3߁w^yW8}~֟y_pxh{S3Ĺ)8F޿/Oۋ#Ǫc 7[^Sx6]{n!@\ùg}N2M<:ۻ?;su_ν6rs~o7z'^N9㽯c|Kďs~Q~mn?.:`A._7>{z1ϖeZ/s/:5=olq?޲?=_#so|>{ywxJߗ8|߀sy8s= }qw^ұy_7_>}pO<_:K{}s+ƥE^Y_c&k=FW}OWO_gu_ڍkx ~xryr?q>];߷޿^=8}|=6G/׵rWpOgmfly8}~W/E6ƷW\8}|޿vǟF|"mx8>sΝtg}?/.w|ݣ߄sx}6['~\tՈW¹IN۞tt\z<ùufOߺm~}ɳֹDn|X?#[Ĺ?Ź3>?S={^¹o8rd8}=yi/s:<_܋.;y羏׏`N%-ƣu#>p{<^1L8/~?mùy%¹S8j|8=޺QF^x_s?A5>/~O%˴yx4yl:z9帞sK{G|羟l<}7g8|{u|sb|?AvOgހ^q{[q_G<úwپs8|Ҟ<,ڋpOlߣ+?i8>7SOx/~;}^>N{k@u&pos]{!~ko^R=_B8|}:鬷>pٮ{8|d8~sН;H8=յ+_=6Qs؞ۧ['GO<%ޛ,Dw8"m=px+sާp~ 羏w۳޿w8s/m/G8|s&[Kķ?O<u9=^G[~D|yڞ1ގkw{[ď >^;{ ~{8^wꩳ}S>k mY?k<6.?>#y ވ[_7s㕇W0nю?4¹xP|1>>u#x1{{Z:|^~xZ˫b훺vѽ1^[8߬{Qlw;t=Kg39¹9K8.qi%OEùM:xYoUwKg㺾s/:J~K^|DKùzpy¹n|k'?:/|۶R3>ux&{zu!'Du puxx=/۞=,Cs9k?;8¹=/_q~W⽯5ߚy f[W]4^{g|џ.7{Gn>}/Wʡ[/L->2u3̷=g8|?sI8}uu_qƹE8}hߴyǹx3s~\ױq|KݷؾW"ޫoԹ#~zďǻۏ7{֛џqyu3ökW7.-:vX83z֧"y¹{_z\¹g>=϶zup| GNùH3S^wWw>3>~o:8}m֫{O'O<2]h/M>8N[8{ډ/]/Qpl^6W?t~_'Nrt] .=.v .=.Z]z?oΟX?܍.]wZߙ9a]}*N]z^7.puM<ҽϥKw?CK/y _Νx5n-]]:qt.9\z:6\z:S]:tǥ/?N\z\=GzyZҽҫPu/peYNõҥrԳǥO!\Gfwt9~>{o[qsK^qzqҏ}C|:t.q'K>^[7AK.ҥźjb\G^N}Q^.w^!NyzzKx֥ۥztӥԥfqfqܸ7~/yu>GkK7K7O']7]:ytǎ^[/7s]%]zΫgԉk|:ѵ_oqƁҥt쪟'Ϯʉ|:tx^u%+qKƥW^..=ҥ;/.t3o>yq^{7n ptޫK=.ݸ.Xnxjo!d".~}/^ݞpYk\:!>.]Kt_'] ^åxn^Atkt\z=ǥå;_.ԥ#]:6]z~\+[]O7c&/n.=iå׾̋G\zK/gХtꃧK>SptJCѥ7sBPD^Nn .8z\:5;ҙ7G^:p K uǥåI^qnKN`M +\z3^dbӱc¥ҭ߁KOעKz_.u9E6.X? n9]zgާCChG}ϼȓ֥w\:,7?\z.ݼ@\z?űlSnK/.XU^uҝ'QS[եK7/OZ?<\zqg..=#.=]ytqoH^KOGKI]y\tҋp@t:.=/l8t̓J=r\yt[ǥj.̳+'ѝwcwK7D.=7ǥW:7~?\zl8t!]{/t77 \zQҽ>3ul_ߏK_spa3^Ʃ^7K/cypҽѥitpK7.}1ǥS,0n8wm]t.tҗ8*\tt.Nm\:yK׌S0.=?ƙTp*}=twp9.z tиzc'M}/x6M]yw.XY.w^=t;<:toq.qϼať{~}߫KہK'][w^X}\z.]KOKy ޗyqs{m3O]WqY<]zsh \:ytws^]zۥťҭ;.yItqhx˥OW7=u˩/ߝwzyam/\pK^x{ޡ.qߏ6k< ѥn[7?hK\|ۋK/:O<:ttģa%9שn_.qyaxytďiy!u\:N)]zӹK_pup9o .txZN˥S}^nNNUN˿{͏H<Xe]1̻n?֭ffu\ҩ3go>~ץBt9.z쯎տ~7.Ήlx͟H}ñ͇ץK7~.zút}.WCgn~Gԉ?:u򭧮x4oK/#¥W*Kg<.ϞNz:s8ccǏK:p_uvt~\z|p=a//پ+g~ŷӥ?.yҥw2^~t ҝR.Ɵ>:n>lqåWҫ?qOtOr\]=29~uҳ:pZuҋ'^N=n>ԙ+<]KWu\zg@q:CwRrJ\z-1եK/{qߺ\C/1.K7rqKoփSNyfAvgz9)#!6mȳ>;uΜxys9.=ƃitqҭ{qc.=<.n9]zܗӥ/]zҭ_KԃХ7Z/=i⩗ۓ ҭK8tKY}':t$ q^:ܼF6y8yls0t韮9\/ftC\z\NnzU޲ˢۥgD4pNpt.ޞy{:s7ki:N^ߢz׸FӝXK7.҇u3W]tgl.=JpzƺG7ϕwiu2te-K7.K^c}.텃;.K:|;^mbsS^#΀z1qS׎#osɼpHKw^]9.=_ҏ:t 8.>+s\zEftw]:I=q޷ӥO>.=ki>?/.=]:u.]KХOgyKss.Xgk]җ. ^z?֧r{t-8}_>KAsat^zәӁҽ? s]zawq=å0]ҽK'ҳ|)z8.륿nKgez鱿ҏcm72Zt!^Sҩߐ.guypy>ҋge{K~.yqu˥{ѥWgWwKt_﮿ʩOdz.9N+n_tq)WѥO9.z8㬟?\z^Ot֋M<ӥSﻘgzǮV:q҃WSkw> wKw}q=ӥ;ҥ:=~ƥia"~o\y4HZԩOmt߇nr.vS_7z]GYzKY/=oǾtMwN^ҩn4MwN#ҥ;_>.>tKގ.=aKgqs}_.}K|Kn=>/뽇K֛;?9yvy .=Ӂw|Yz'KzKו^)v:y{5gy.t.0lK7͋:tKO?n^yK.zK<[WwN=Y_i[<1ҭgKo֯$qNС?`x%߿\z:uo^uYO^\N+'/'.}7ysǩK//q۷hSO&Fu7z.%G.kӹ>^oC+yqY?['ώ?<1]yx<K;z:sɤ['p..ӥۥ>yl{4]fFzƥuiC>^]׌o'KϞ.=~o\z^z3pq/ẻK/8֡WiNj.]K~ns nrYOҫҥܟp_/r|K_ynKN^/^z\慺\:~ЩMn8tAc^tKu#{yR.åG讻yY/<]:uE~)Kg~~]ʫ^:Z~yW'K.pq˳:=\zҭ?kt]ХuқOt8['/GOp˥ot~s\Сg]:n]hS6PwۏKKϼS/ձߎ.[ޏS9/.Vg̋KK-s|Kw9w}ۺ}K7ҭK7~K\:yY/tY/= H ]z/|.'<^cޫ;M=/<9GK:.=tOҝ@޸>қ9j笗a??.it7tC7K_֧O<>r.'Nc{_q[/ih|`ӥ;/.]/]~דI^yq H}Oe]z~/y\K~K=.>c+7^>i+&]:9Щ?\z:b&^v^B=>.Ӷ^ե[V?ҫҫqu?z~_]O&Oy{pc:˙^sOw.'һ_tnt /]c]S/}K{yv:uw.Mzse`8xݢMKu_וnn|?]<.=WpCozԥweS/x .Sskg<8tq}^ҍKOߝg6];<.x.Y/zd8穇μ1ĥe}7x]zAS7!]zq܏.n} KzKyH3ctI9q;\zu\tsY;o.yUtΓbt/.=ǟKyn.x~׌םvx©Nmꥧ3ǥbt>uxy\zw_Ki;Νy^o?t7]:{R/}gN}3:.=/\p9+$:tg=u/Wbݺm!}uBxtO<.=ӥ.y)qOKo]t1^?ҽ?ҝz!J8 oO󸾴Kߎ;s]C'K/_7סk׹΍W5Ewugv2tKׅY/=]cuKM5qYyWΜayRõo)]a :5Z/]KzK/ēzm_.=KIeK/]K>ҝG뮗&^^ul_Љoۥk^:tGƥWU\zӶxەqEХ[g骗\o~S/]љo=tmI}hM⼫q5:u{mӡ?n:ty:vꔥ['n[KtZ/N|+\.Kӥw={to~:ƼW6e̻dz.~k0~꥓?닟zy\:tuK?ӝ9nrqK/9}/:tďN!\+]БK]X,ηҩ_K'R^BY_SNq7x/;yHq}t;?Y/.ݺtwExK|tnYnܗ7֙5\..teRK~8']Еot[^uʘ˺bvҥ/K}.=t^z~e=OǾ"~K=3N1ϒGx©C'~<\yzzq%kgvsKwଗ.:bߌ[nߠͼ8f<\;mvQ[+~\BKnyRu8rq|븾μtY?뵿g}xfY&=tGz.xTKw ]w:c'~֩o ㅶ7:v+|8's/]c=p.x|t=/.[Sg^S꣇K/#`tL^t uxҥW]k/:su:qvѭ}әgtwe}yc;OK__.}Kw~]ftt֫ѥ^:gtgW.sy8ХS^:qӥS"]7u֋^z=m8ɣ;Ǒ^zur}k:s;;utҭz\z_]vq~_]euC]'Nt,^s׸N?;𪗞տ~{:;tG7.җ0/8?\t: ]˅ηߛn_8N]?KK/>:νr'֥[ozN=tY/}t f^lOKqK^WY:s{/{,ۋK:xA^us|råj|vppѥ7'\z)'!ҙ+΢K/锫qK>Xt-ХK7!]+]zӱK+.tE7ލkۥ<tCҍK#\N^a/^ ]w3)FOR;'N ]G>]۳^zM^ԭנ}s|KzKqrz+Kg^KUg grt9?S.xq0X/ҙ8]ǯ.å?K:k;o.tsxveoSt!\r8a^sX,_ף.Vk\|0M`[ǹ>K=iK|^u?ںySK:yq,OM]<˼ҭjt,t: \z$]:qz]zGKzs ]tKWNvϠK7O+ƣMW^飍#gt}KwtvSѥKϼ9]yZ܇npy^r\r~Kwv6?yqmå;ץg.8ҩg>ͳKW{O|tt^:gϡӡS< u'K׉z8>;+:ux\:g7mۭu>|rMScn34ӥO9iyA=?t֟¥a算>o^K=.ybz/j_sɷts{uW~ӥ[/n|2gVKhOљ'5qy3չ?o֥g1..X.|~yaނS/]gn=z;tt^ytt..]wbt.:O..tH9/}꥿k.zqye<;^zwhq:~\w8~t8t]}oқ]zӕҫm5O{XߜqfyW5ҫ/=.b{ΜyagYϝ3^ԙߕצKYyHn..u^^å=z:>]Y/Kuӕ=Kz3Oqus\:ӥg}u._ZpvK]ϣҭG.=/?aryv]N=}^SK:q!ۥCƥ.zǥq8u6..]z.zǸ2Nt[~yaS/}fWetK73]zv1nХ7o]qdpγ^zpK%S/orwzOmwZ/]K|ץ/rNqOKO'ߜ'u_֥W{xy|_O?x{*H꽿/t/ڙwuʟ{&\uW֥{?^ۥӥt>OOt?~e>o<Ϻt/t)zu龟ӥstx^.=K^z>}8_ķG>5둳h_K5wǝ.}>Ȼn;^zƣq|pxg.=땇K/s/>g~xs<=+!.|9Oj]cr8>l%KgK_ٮ]tCӥ[OUn]]q .M~4tDszM#t ǪK_ǩ.D^GҭokGG߲>|8h\zփ^Ot4.;@79?8z|p.}龇:ҳ>:/~OڶY?K$.2C7K}atҩ=u[7!:pKU/=˥/=Y|pKҳuqp:eEN<_]uKtov鯎=\zS/='ߚzzƥgm;/.y]^.}qU׎o\:ωۥ<.}ԙw5S+қ~7qәgt].IY/sԡ}Ý=qtu拦KtK.q_K?zY>o^=ۦ^37_Mz]WỦo~3lo['njK7?Gc?tCNʙ5룷Wi}u~#k~gWt.=n=h먭~[ǹgsSG?ұozå7R/zǥrMg~on|9]륿tUg^b6V7ҥ?ǵS,O]z~{5qY_gKb^:NҋLX/yQķG:rxr7߿[WlrKK7.yupzt_םҙ76ܳՙG|_.uq鼏H>3o̳d{c륧;XƷ?l_7tg^ķKINxԵxԉۥ&S/ӱgY/Q]z}N}uK/\uKK /qY?ҩҹ>?~1@qBiꇲ?v({EO??tV>y:}x;}:/Ү<\b8}8}Mrù\=kںpāp3,vN*sOp?r?p"_8}NGloo87ڗF{{pw:phr9w.t?Co}c/N8}թn_ӵu9}9yhEѹs{svop;s׈{8ù7r{tcs÷B6{8ùpη8^ùo. ۗ;@o7Zc^-\K8}p/r?wD/C8ss߷˹Fp}ߏKK~}~>C3 }_71O@8w7t{8ThhZb8/Or}8¹s9A{_}m{I{s=@fxG\¹8¹6w`Xktp;OEb#A_שs翎lϊgTc]php{@<W8wt p(sD,gOOg9=:/I,t?խs9>ۿGh֧ ۗsS>]\NGG羏_~9~M/L{{^s5zc/ngu49OEy c6 ùr?NE:pM?}C}WVן1t+ƻgq9}?|hKיw&s?p9N8=>W=o_ _`<Ҟ]}X[_q^8t5>;t{kyꙏOG=>x='JW>=b<g¹Q^bI=~_N}x^sᲇvchם'$9¹nx;K~1~Nձ|xW{_-?z7N|;}=bt389R8wt<ѹK{Ǿb sqs?p?~{s}4;}|}[_9¹sG];ǵOۺ}=N\_'`և6ڍ jksCj}p?s=}?O,6wW^Oq{|S/ٟ8|w}}_1yȃ )_}ЭWuo|~v;|[by2~o\v?׿~s}t?{s'+_g=p"~¹ttwo^qTGsS_U8?ùՠ]?;u/ˑ}{{5+g7=8|Ѯ1:y9}˹[/M?uo8uu}_toN+';Ir=ƹ끮!">۹q9}}{cpzΝׇthW#5q~'O lTh_2ֽ?ftxt׵vcEB>b]v<#!`/w\_=ԩ?1s߯_˹o{ p_ț{~G~^?EOGǼ8w^ѹ:!yՑq= /{{c`Q [N7~i1~~u_wq+y VsIv/s&Νt݈r+m?v#ƃG'qk,~_3ފs(ؿ¹񽎝xtw87Ɵ8¹nx4ۻOG?:\xozv݆sEå_wڵx.}/_?|s?xpؿ/l1^/]{+c~?]g8v=߷x#xs:xwsĹQ=br+֧>v{<[o?}Ѷ??ѱa8}=ӱ_=cs?TޢphO>9}|џp{^Aw8}<43ƻ?Əy7~^-鏿׌o?għ:tķcs?Ϗx6y>{S'ۻ;⽝;ޞ~Bszď}txC6wF|{Z}}qs9N$y¹F8=~hO[lu6c[?y3ߗsk{x8\=wAkq{}q5s}w.pu9=ދϷ;^~p?}JG<G~?O<}3߅sQ##cȟѭfOi8}ѱ/w9Q~߰M<{1|}?O8s g}8~CO|ڟO۸'s?pT8}h/\?84ow|<9~¹U_8Wx~8|Fs}{[O<0>^b{Yx;I{˝w<>{<_s?p'}/y|.Y>u#{8&?ztƣew}?lkxupsF'8wu8{wp}i羯WἫhf˹Zs>ytz0i+_?o euX?w_,=ߧ/n!m?c1fewotģ}s?[8<q{<{'_f}vz5Y?F~4:9¢#߃Ϸ¹X/hA¹Wg C0:F~5ƹc'~#?OsG/?Go]uķx_m{'pyknGy?нsN|f|F{0u_oëqO7tO7tO7}¹x4K?[lo:v͗g'^W_/xrp?:ow~qN(}oGvO¹xh^^i_p|s?8oۋvvk>kW~3}\}oKoMi<:ηpUi7X7[,ƹo3羯Ǔ*{g~678wߋ$cvU߮q5:j~9 t3ܢt-xϯ+̃u}| $|!fG{DS>.'^חuy3\61sDg7O/M|gxî_/Ĺoon|']>c}E8b2Qp;=iz¹營7Z/kӥ.ՕMn=˥֍]gw~_.]m/h{:]zpFɳk\z׵Ks|e^ogpqS??I;yq>K.FҗnF\zG\".ݸ.[oҿS=..W]7yY<]:}ۧű?ӥKsp\:n{נ;}K>.qn_|?\;'nBs.}8]<t.}:#g{ť7eܠ>={˥Ga]q؏7tO\w>u呧X#O3?LtDt惺\z8,]Ņѥu֧ץ[utХ.nԥSo']5q?菶.Ns>~=ӥ3k^å+ĥoql4]Ƒ㾧q˩^#r_zٸtC|3O1K'/]Hҽ^3Jԟ>yq/\zv﬏oWlܿtWKo勺["//]'.=mtEһ.|p5~8usե<^qN^as8.SO^t:tvuӥ?.}n3ӥG.{O{t腼@z7wB^NUN\q˼ ǥ/} [\qЏyl6q~t:ӑХҥ2]:yҭ_K'n.sҽ҉pҭK'Sިw>~m8k\7]8Ĥ#/:u .cyv8O>yt\:_Mw|0w}>n>]9yv0KN;//tҥ?ǩwylq/t%KXy,*..YMn0\qޏyvK_?Mn.˥7#M~Wol8N]o__u%ǵǩťWpݼ\ze>p]z>$.\uC.=&qWKout]ҭKǎKq韟byGt3W?.]r\ҿ+.c+ǥ3Vt*.{)vK;ť̳t].]K7B{=]˥G.]GKg~t.}qyIn.\_8~ESХ;.=뻿kʼSGyǺt.=O5e\.=^KOGK7q;:AtH92rԥWv)vݿtա҇u~/]wX漫sq<N=jGw\:ξ9*[]{}KCХ_ٙהOw8/@CǑ㤇mꧏG^2]u{t_\ҫp]KuĥW?ҫxU_k^].XnL\z:0]tބe^\_ֵ7~vy8{u˿gSԉ;hK.\?\FK.}åӥ܏tUތot쏖ܶ_..n>uY<Ţ&ޫå?ǥ7KOK6\wFqӥ<ҝJމ7{uOu9ӭ/.utMn}K]An}8]us\:7.ֺtt5t;?.8Х{Х.uxҥSONҭKw,\yUKҿKA7c{:y{t鲻m [WǥKe|r\zE\zҝފåw.Zoq>]][K;.=u56Gt韎yf/{ťѥ[g\zHƣxԭOҭ:p鑞tuzü®Sgצ3'u6tqu9|ЉG@^?ttStcҽtԋtYOJ֙5Ϻtt.zϺt]ӥg'^.Kx.˥{ԥwptwn=Q\qtxtq"?EgNzy\:CqSW/Kqx.=wüeyWu ީvһTқ6Q=\/ҍO.=IJ.ݼW]gtyu?ե{ƥ¥bpUKx.ytKGxr+mo]N\zctyf{|ߥ.s5mqK.޺åty>sKx..˥=8/uCWN.]G.}Nr/>t׸tҫq߱ux .=Ǔ_yR?rtzzƥ{ӥ|K7:]ҳ|{{>:.M.y*t鞟tXr{?K.Giq<ҿʙgKw|K5.=2Onn#G}pqoSϏ.y9t.9Kw<2;u83nGs.x2.=ǫtK~K7t\:qt||ץ_xyMw~[n|.\z|<ӭɷ1.=ƥuΣ.]{tKOK'.~xX8҇҇˫᪫hq5to٩ n>Jt:e|_\Su=\zt{ueKTo ƣ۲^qխSWlԙǵuԷ󻎝x[wǥWoK҇#\ƥK/8']7rpnå;o.t#֥.zZuq{c'WKwқnީsKKx[n~.=w\z^.=wWޭO>\n2}sԋƷ}.鮇N{Ki]zױѺtۯmz1?)"<cӥW'.ѕwӥ_.]ǨK=vm؟zݸ}tq8]9upҗmymKK֝uӥ/qХS1]Эwy\qqYN=g]:ĥK#:v{u};:sSN<:5+XҧL>4]uKO}A;.y,t?ӥW/.TuY~:Nz9i]zk'?Ʒ/å;xj}b>5.ݺ$^tSnK_Non~pttn~.wt=.~7åχKwG]z'D~k}O~-NYש}q~ѥ urĥ[G4]:GJNtg>xѩҭ.Z]z;yao<һ.qU?o:uytaŵKydp|]zt Z^|҉OKtM7G=_ӥS+]?\z~p-χKx]s\z[?.}Kץ˥.zmtqKҙo']zП^>q鯎M}t\҉>Gut&.=K.ʝvҭwK1}oKևԥ@tԥG>u}}{a]#?濏K>~þ8a'X7x>.zz}.ݹE7Ky1/\StGԥ\OW^z5o/:եOy2kEwNLu|OL^qW9(ެpҭKn.;vt_uѥ7|tp2zqKw..yv䛓]t䙏Nyt?oKm^z5Oڷ:u+k]~НKo|o7/=?Op9btG^.]Ko7>y-Ϯ~o:4'}\n:t,^U]st{}Kߏgg~q8˾])]5_߾=\zγץߺpg]z;]z}}M.GKAgttEn_>K[nɥKo۝y=֏okqӹSwN['&] ]z]_qn'?<=]z34/=c=ӧX}qӼ_yם/KUԥls;/e\Kt@ץ91N|祻Kx}K5-K7/^t_̫եs֣_ n}"]st.}op<9.Tt].ݹ ˣOXoO|ԥLtGd^z?km^K&]fLO{9~Ec֥9+ǓuԩG㜯t5Ko:&\zq.ҽХ;p5wkAj:uؚ}}\:}ztOt+]v{3/}oz_XөϏSǥ7{k׼=8ҋ9#'~oX?nK']ԣs+qyKwj:s\ɇ}K.6<]:y9qߙ^~Xw\dPֱ8ti\:sӥI>/qק.9_;/=tn~]oޛS]zCב߬o;}>]B\tOpMe^C]stG>:t3z[3]sv^z\^͗k=I\#қ.7.};>;ǥ9h?uvۥ|Ko.z.ķKY~)5t䇓w6/w~3e͜o^:Kq~n]z7箩4u~\7.]j^.ݹO!oݼt2,]z橇KK:v׼ .rAttyK'"]ҋZ=c{3.|\zN=~y_o~ k8oҝKt陷K7Y ]]\zHnɺÙoz[wK/o:q9gPNOt_ߺGs..}ӥg~;.=G`WΜTKgnpj/w^ԡW=9"{Ko.{.tyK=޺ g.ܟKwyU'?G\z^K/~=x?m~}\st>{Kƥ; ^/q>>'/+p5Gz|s\,ҝK9`:tۏN~q]zѝqKӹK/¥\.Yn?STykzvNj<#/vN׿WKo^Oҋ/O{]#^2;\z@QުysWs;U>=]pəǥquq Gg^k7.]gKtOuagy3,:N;.%n^uIK tp0]gšv8\t:7]z:sW=_{-z¥Nu:O]']s$t>tus tK7xϏK]]z._tz[5;ezKsKo:tnqsp͹$oַ_ݺ:t֣c.=q{|ǥǥӥ_.G:K/wt@uɸV.Lå<Ν9sWK?;WWKt=c.<~K:i͏֥~g>.]N:å¥\ҋK]k~y֩C^~{{yԣܹ5|C?յ3Ǖ|ɚv nߚҿ.}et1ptEKq b0O&.]gKy{gh^E..ǾҧΜq^.1|tΜ.=\z_uxۇo^%OԥK7/=G^qܷ}lŵuGgNޭ[KKz\kwn:`=1Aҫ{O^y虗ڙucuq{Ku3g~_8H˾uFՙs|8p魚wy5K|ttvKu5<\.]K/gZ:uZy5u/u:nnΝx.QW!>Eq酾+]z+c᪋}vvN.yɣȼt 2/Kj^G\zk>>:DӝG]"]_{/.];u /]zӷK ._杇K/Ӂ?!K;_:D8Iuy+,:t7/=oZ:u'?Wi/n}Sեe_I2?>;9u m]}\zq~ sFt:\]:uӥ_nәYv|SǥsuwK;gԝ[xXS<׹)nq/sPtOʼtݴ. ['O&yvrSinѧ>.?.=.:a̧ǥ{}K9DWԑy;]s0t{f]CWauϙkv=nN>.9$;/uͼOn~.c]uۥ]dn..}:y|3/y\z>wtӥWwtGkd:uo^~k<O6{ȫΜ}h?]z>}v7iNn]-3=_5s¥ܑ^tIrM>ͣkC>v_:u;tY0/tһξȻ.}ث8u5ܿf8]ztj;n_wt碘ޝ k]speO>g7z9CgNqԥ\t>ZY&'?Nۧ+qҝO.=/d]z5 Хo{ lnk;7tԁuQnsދ}v䕧K9ҋңp鮟:u_p?7nr7>t6{|чKם}ҩ[>~n\\nCǹ7:֝7~g{]dp{A|ɤS_4ͼy;ӥq鬷K>a]:ҧmq޸k;AZN8o:}{䏓>to{=]z5ϼ˼tustt|?IK[KҝK/ۼt]yB̓¥#2/=q\\zxҫѧX楗mtw!sOF@{ϼp/9֩Gy][ ]z:i`s=NǣK:'k/.zѥׯKiۥۻN]?]}[ť7]KgQSMܖ/nn]9yyKO7KϾb]s[t.=KݺsMkEg|ХשKPx}һK91K:p}w楿uۺѥ;Cnycn_.}xo9k8btM=:åBH^؞/楗.ӥ_K9]zuc[ۇKoyn=ݟN98K9.}:K=x縦Sq* Sҥu~֥}ütKץw3?>uN¡SιPK}7tϟ;gּtݷ}v:tT.똋;㐋]縎KO.NNsX㾫n}hb{/ҙx]Sz=Χtҝ3K#/=џ_\-}.ҽ~0/]k^K/tGwNyn~-ۋyMd^zK.`ե\]sUtcG~ԿӥHuq/?]zz7oOޥSԫ{u+䋵sR]?š?t8r\x'}6t9Qs\qtmߺu\:9\5tnӥOt]\qp麖K]z:u}tKsɃo֏ooO?8?]z桗~|~۳:տog0=᝗_M>:r^OsWoץ7w^͚~G8:sW]ud^sqt:&].̼tW9a^xf^:E[1/}9\7ut\}±ԕKp8>w鏾O..I6/̝^t怹5uy~kC\XcY桗z;]ںtݑy_:v^zCɍKNQ^kեٿ&8r/5sW/z[n^n*.܊t̥/g~{}>ǥs\zeWr9?ݺtya=_K7ԼfwwgֱSԷߏKO'>osG֣uӥ~p'Kޑ.=yS~?.ݼY]s3Kw..K?|祇KԥWt1.K7KtHK.=һN8ҋk۹twt2/}꼋C~3K[\g2/~txKo3۔.9gǥ ޜkKK=ν:9wUt鯏cxq]wnŚ1|59ex=sovpOg\XS_֏q{yۙt/_:b績ͼqҥ`qەҟ/;Uګx}1WvՕԧ]=\SyҩKcK7?Z~8u;Ngnz9t?jz<>:mׇrw;wko:/>uַo[K/4Kw}C^֥Щo7:ySgN9ޗ?tחsW޼t9?7MzXS? ^E]yKtc.}^Y׭Sr:ҙKK3d^:hh(q?_??o??~{2U ~*??vNA:w~so}\X3_'5;? l+ܬ|Zsۮ; ~Y?\T8]n¹tu¹~(n׿?+g^f8s}e_psmp Ǽpu50@㯋_Us_8z {¹n?~/=;G?y˹~m:7'Rȯ_?pvy~̓ oz 羞Oy<8s'o^Ǘ'/?]8~QY{`wsVq #tc_{p5/y3ws_珗5':kS|̫O}t[t9[n_ }ks_5~_8?k2_w9uq߳8sis_>z?ֿt| G}=~qE_{[s{=}}<|?s_O9z*+ѯjsùϯºs__ep}t?:՞ﱎ/z~O8u~o<pdz?fs_Ǘu홷=z=zOۗs_?]>9~yY?o<}kSgթ߱ǿ]۵gs_?Y?W>s[W{^ȭKo}is_rm.z\kΝ?:sŒ@ùvz?ߟC~nq }-/Ǐ_>q˹롛5{'ϼ%w8{pf?O3 ڏχp|w}Tt3q}?k?q8}xs_s_:^ǻνun8>XXXs_맭ףp@a]:׿ڟ νuùw8nǿ뵿߭[p˹vW{S4Wp{umon\oNO=_uaښsx¹pgwq=z<k9?/s_ù}}]/w_Ե?q3O}=?ubzw~zkqօ_˹tw=}}¹_:;ywO=8<os_z8~ 7CqѹWe73_}>-gD8UOS~<=Kx;׷¹ͺpX܏{]pG{OϾpxu/:u|v%n?tu~gǹ5}_]ucoo\=?pV~k/>z 羏/z=_s_7 oe=z;ù{^[7*{ sѹ|Yg=8U\uϧp ׫s>ù.}oc{w}9 G^Oqs_pp?:x_˱=|zysu#o}.%gS ^os_S}tz߿q;tWԓFCuσA|_Էù͚{u~ùףkõ?o¹źgqѨvb]E8u=ퟨ/ֱqʜ'Zx' P8Ab}8yuw\3ao=A8}Q-<>q=;^?Tuܳ>υ:+''>{|:;nO{tԷ9Czڼ%z..=9yqw=#7ׯ8̻o?s_;7߻zb=^o넢F;][߮g\އs_5mcĄs_O؟yw}"|{ԩSW8uùAa]nq= ^z _]ԏ%:pO\73羯ùg}=_:ùG}<:jP=Oԋo<ӹGt׽pmz6|< >F&^W8UԽs|S~3N'wd|Z?~美WO9 oԣsc}Zi}9^m-[8}~ ޺s¹g8] h{s߿qٿs_Gl/֏cND8 羿sS _k쯙]_?; t Nk/sU¹ޞu8ù?ipp }.5)z^?olu]??pkU~G? z|Ix]g.^sߟϴ 羯ùvq}>s_W]hs|;y[C>N^=^Yw{zۏx|_꽯FkԏC}>p8{7Go?et5otrù}{ùc<^#.<꽽Nzl8u3zs¹˚qŏ߿qpnoԣ~}=qU];t0SߎϻInN}}ןù˺;xs}]^w8=bM8\yxsߟgaM?}8}=} 8OSX5c7p-ڰ[sǹ#q{}uoxhq<羾Vԯv|8}/:?ߨgo5t_gxӹg羮wN?x_8?e~8w2tps_uǯkq}s=pg;T8^{t5Wwoq|M߬S _`M6Gk^z1H8|?ZQ~t7ν9 v>oܳ8zӽ8Ĺͺ6k};\W8 =pz]X>tԣܳsݿpbM=:wY>{z{'ܳs_~e]?bt歏k^s_g}νuùg0=ZO'=/8ƹg羞e޾gpZ7Ϗ܏=Ĺ!:K>kWp̣[N4_tO>^`o_q?lXS|721W+{Śz*=} =7pn^]<_wަ3Oe/[on3=}}9^jNztamN~{dǾzK~G?cw/ǙI8ut݆s~ 羞o?Ν2{s2~z&1sp߈sz=Kq}ùg.=}}xԭWMnoW?O`z|t=-_{Dp.]{Wk羾Է3=ߎg8s=Qppp[| ~ߞG${ނs =ܳ^8˛+o9Y*9',>_sz+=ZW/?J;ut?t::.=]ud]f9.1];֥9KR.M>p tjqq^צKΗ.>k¥֥s]Ko]ל.7|?S:+ ?.=O]#]:}ۺź%ںkҝ.Μ=}Iҥp]Ko0]:}htץKG{ץ>^>qmC\:tu%tK.]K<^\ǥW.7.MS;p<^۽?gҥӥ{֥g~=}̿ӥqۿQg:sƥKҥӥWy6ts޻>\zoǾq̣ť۷.WߞۥwoF݃oÕһ}Z׸N>nnP{1^S'yKĥGKuw]:nw]kܺ:{\zߟS,~F_c#_<];89ҥO[:r9ۧK7.åӥvq;\};>ҥOБg;']+]ågyh]k>=.|i\zo;_GXX?]7_XKwx[׎Ky_iq;y%oq};Nbm_\cn]Kos;tߺtK/pzԥ_:jdzSXcݬcN\$]sVt鏮[׈׳.]zΉ\ \3w:; gӺt?pqy]:3ɇKKo#ե;ht/33{M"Vӥ;K^׾u^A^{\z¥;;uW>wéCr].z7g/]:}׺֥Wpӥ7qI7otӥ\zwn۩C3.=ҝkK7K?ҭKgntһst.MKo+ԥ;_q:e}եyKo\tnޏååxK7I^[ҥg^y3եDžK> ]o_:>zŭs;}/}otWKב҇_¥{~Хg.\z-.=>ta~.]Kgny}Eb҇n}Zp-e^ө79.qݎ%u=qJS}~v: ]zөҥg|xu鍾!]:st~K7J͛Z~ΜE]z֥? K׭mcһ}¸KϾf]_+.=q~Ln^".9]3tt qݼV]z7z{/ԏΜǥtutoע;Ǒӥ n(]zpΣKϾx]zag"Nz|8|ݹއu іʳ~\yct|v6]ytq9)һK7]z?voO}ҝ_\~\:suzuMK7nR{uw.'|eߧk8>@7}_~ǖ!zotquӥiwqxj<8ҽå7s.͇KǣWHwsԭr\?KDӥ;WS}ĸt縥KKtϰ]NǏKySt..nݹ5}l ݺs>:NKpt\IK\z^oҧnrgv@KKtޞt靹t>rygۋۇK]׋SI:3?In.8 ]znKѥԥS9\zcùlYv{g`M^|=-]FĥVtt9' \}f.=})]܀u=OӡzϺ n.ݹto/.G~+]J\\z{]o‹}yKy]9tRu鷮Z?Wq tcNmթS.޷SǑ_\oҩץKyܳO1+.=ҝSKKQ^KΩ:'cqޜKKw.]7Klv|]v5%_ǍKK'׺s#N{\?^ =ow:}Mnߞkoz~:uyK.p9wQ޷3M|.K[/=Yϸi[g^?+ĵGbΙk¥dt Kwn.>K]z~;S\zέĥ]u/Un|; ةt>w眫K|Uե;7\p]zcλ.=?tk8/nḛqy=Ki<ҥOKwN.ݹ*^7:w͇rMn1թқsqFKw-.=oҝcKG3pkoǵv9gs\#Rr]zq}o/.%n[&o<];7e=_&]svtyܸtC.nSѯK;]:sulntfv\һyǥ;sp9GEN`a|w\z^KwzBިԙtty>ƥvtBKoO~K-ץ߾côƥvtq/<n".=.ݹyttӥWs}.=.tåG~|ѭS<kӥsp^N^t[Koۙ_1G;'_.Wn~.]KO=r.zts p:1\z{ XgK1]\C:sHk\zաS?:s;tAxMpA_ַs;mqåK7VN(]z S`A94K?D ]z}]:tv߸~~N1]z޿fY_\}Ϝ2ݹwԗuԏǥo_L:ty~zҥ7ۥ_n/.t|J=KۿY֡S_ſ~L~|ti=9]ptyҫJG~f+2KNC޺N7?Y?k8'Kw.kKt9]z{u5/5mn̍ҕoz=EҥƛΜT\~k1*]Ѕwod}}ΙةoԥO5ùKۯ.~']z^k\oK/>_s_.=өo3^q~Ϝ..9|>q[қsktطsc~]sӥw{~\.8p~H^uѸt_w=.ѡ't|.\z3_Y&/ۿ׫tsҥ;H\#e\qӇK;4]s8t|c}?sv;]uy>.һrs.z.܅tgKKtԃu9w ̓ץ7t;]z̭7枦Kr䊦K/KǥťwHqåOu7\yM.pӥw/.Cg^.-p^γOKO^Mμ ]3/}ꞋkNg^z槿߫uΚ:Dq̫}.OnwIN#]]:SKo{|uݵ[wN"KQݺ}yvW]}<>_ҝz?+qoWǛKOe^:}Nuf>y yPNuknq%_GWμ/^K|[' .=]vKѭ\.=>;\Kס;oON\o94/=rd=5}6g/Kw.龫}l/^N$^tқn9ώ'ҧk$Mg=y:.=+"ާuG~:u < zJ^;_:s_.ѿ/q!_թV>f~_1Oq]zǼK|o<\Cǥ:u7t/.};tl\:ߋt\s<5u٤KowWǥ{]c{~\l^t敵NݺNa=Ng~vn߳nptt3/=^ۥ?9>쳛g~c N]I!s^_toUO=yuGN]#וuoyv?:Qn^.t5pקo/]w]:}86y~pytХK{8];U>CӥKzޘ.G]z海ͻu[μ̳<ޟ6#]/0]tܫΜ5v Kne\sWv^:s^y\sN.Mw꾋k=yK7/Un.ݼ>\zқƺt]yv 5/ǎKms.ӥOg}楷]oqt7tuno~ԡ>u{n.p5c}GN.ݼZ] 2/ݼ0]p} xoon\փL~9֥3'/ocӴ3\z{g}/t _]x/ț}{{7yw:ryv\8]z{pt,l}{t~gKn^4.ϻt|/w*ҏۛRYq=߭3/.ɓ'/ .KufyIҳspy>1/W#ϼG'_o/uK>ңo˼tfq¥5ҙ\iL7/=t麧#/fM~N<`]s>v^:./.zܬq=K>35/]a^zNݼt:y2ӿC&/å;w/]zӧh~b>Ţ3W9MN=u;5sMo]y:u\:>;}yg\n:sa_oO=G9=ۥw;NW11/]׽ǥ֙o?sǥ3)_̜ztKwn.g~e|z{/qEWn=aMu{;ҝ;Kossk~u=\\K7ms>{2/}ַ:}qK/:u^q-:s8.<ꝗ;uMCowK'|ΙN^tԷ{Ǹj+s^KvԷץgKϹ_Ϳđkԥ|ӥɻg^z:s.9}lUϼt^q4fNj|0//]:y0'sy{Xw9򘓢Kq3#2_׋KT]zKotžХWߺbv{s..95KE~ʷK׭3wr;[wN=ǓtvKϼs\>@^K:SJuя.y:ӥ Oۥ_ۯn8tԷ/:}/owַ_97zBi\z~_¥7u?u鯮5:Kҥ;Ƽi^vϼt\<..=u5DsH]3by7+EΜt2/ݹKzμtousΜz9yOַ8>\OE{c.B{qۣku:w]}v/kyt ԥ{Koӥ{ӥ{}K̯>š3o'|p>.iҝKO\}qx{{Kzқk7_t?K/>-u_^u-{?.=iwӥKԥ[ĥg sK]spȼo\y^?ҏ5 \zѥ.ﯺ{ǥ\yֻw^z?u˹/ssRnqpwO?tu0/ݹE6/gNԝ_l/kImlҥ{.]78s:qΑ¥KKoC^ЉKK nN{ogNj^K9 [2/tqCmIХ܁W7~7љҙ0=Wwb<~e~y:tѷNz4ݸtӺtpCU96tطsao.ҝCKK۵7S]:yxҝ9;{搒ޜktGg:^'uwCwsg@楓G=9'5s@tIӥOyCd^7.=uq>¥.n¼fku˥3U^b_`]s u|K7_to_K;sכΜz40^N եӟKypLt-Ɯ[]sҥt ]]|7[2~pۥsҥ_nֵ-եuv:sW\ޫyoSvo~\zׅ?_yg^qG' +.GgjG]zq.tK7(]zEGKW抝.t#.].}q:S\z1lq3/#w{̅ʼtv}շ^vUWN};.#t.G6qsйUg^KvC>3#O/a?csM~ؿۿyOåߺ|\:ĺt=N+O\-ҥGO^:t΅ХNzy]:stεN>{һ^?]yc]\StcINntG^:ҭK84]z/eǥ['MNq?ۭ^Yg 7. GK?up"t?3/mҭg^ө<:sS_ƚFӭs'֙_Q:t| tt2/]3/⼽qKn^\[С_8u9t;t.?ޜGz\:)5ȃy5u\S7KwG]z'ҭ3ҭy鯷1Or]Kμ3/=nO~󝎼Koü7_ GuۻtGNt8˺F]z_I;uxK.=0.wWq\p}kL:|bl\z6Ngw1O:uVwXt8q. ?g^:>3?W~!t]K7ov_kq1'1աSKXs˼y]gNy陯nY';oҭ;bMbיq 0߼^cb|\m>.Onp ӥ;FҩåK|޴o֙p2dcgnLauߗ}lg^z'u;/bnȼtze_Ŀo^z{ۛsy~m۾#Oқ%$ \zK>.=}Ց?:'Ľ҇kituKt=+k\ԝ;o0OO9ҝc^zag^ԩ?H<3/=_Kw.=?tOo]~d>z?@ܘß:r\stoKzQ[ԥs=Kϼ\uK7ټkKowre~)>3?'ttGm~>C0/]7i^:sҥg~<.]'KONԥK/En_5.=.<*WѥKO.q<]yC;/}_‘?Ņť7Huɘn.*q~In.]KOK|Ozy<]_ݹkpq×}qn.}ȣjon9Fnߛy҇yǥGKNE>u"]$;/ѩӷ7t䯓O}ڼoO]ՙ_Rxw^zgMyn3ѥdžKoЭguud\^X3\.ϨK7oW.=}uoM>wU}uԸS<ҝyKy֣q_y7k䋎q[oK/֫hk߸/:y=ҝKKouNj̣֣qݾ8?ymg_..=/pԙ{G3ga^z>_O<~}{~3/ΙeN.ݹK5߼tjzoա3'5_tҽ~ץWKzve;s_nonpQG['7]z{hx2/ѡK;\qaå;YI~?q7߇qy}KӥU]Kťv^s0Kg'/O׋Su8u8t98}S]3?ttαå]yӺt u?.z.מsG?Htxm\a_[_?.ݹttqҥ]k^ޞ;߿Oqs ҥ7]ҙ.Se|;kΏK8\;]CimK:vzӥ+&ҭ7ĥۗ.nN_a:ŭ¥g=adqu}֏ӥ鳋ׯ.}tO^?9\:Gts/KO76OO!Su.9>6]}uO/tkqp[][/}ez~to|\^u䇓M5NS1}ҝCK_pݜ.};𦛏Y]zy˛TYΜ<3[Tԏ3_~;>뽟#\z{78?%oӥ.9/ܿ.]K,]zUKϹ!ַ%皼tt陟N~6Fat9zrӥ+{;uCN}|o\sq9gļ{}S?os]9Ktۥ?^8_vԷөo~Kw.=?Kw4.yd^:/.=Yǥɼҥg{ӯ;.]N}[߾t%߇NE}.yK.:ޜKo銋ν&lίp5ۍzD9g~u]x[G?Ǽw\z :~Dҭ#/.j2/׿y~zԝwfw秿ַ_::Kw.9XK>sN۩?ncu9>e67Vݹ{9v;ޞ3/=.~t3/}ߨOӥ7uD\stѕNy~<ƚ|եSH +溕}>ѥU?fNXt-uͼx\z枍\z8Y\z>tKͭH+]+]s{~oiu/.t:&]:twTץwuu#/ڼgS|ٝy-K]KorqԸtT~k_k_Ko_t4.|u۝ =[^a]Hҹ>ҥ_qxns1v^p{?źts2/_]z3Zu8{qKםS/ss.յO;ߟ3G.92uf>.=sK\Sn.tM.=㐋k9EN=ޛysms\f^yƺt..ݹ9.?/YOʼtq.ݹt3O}{/aX/K>o^y׸s t:u xnn~yW]0ǹS7K3)]_ 9'~eXѡy?+?.=vǭ>q7_t9~>Yq˸td^cƥ;wft/ҥS?M^?MGN>8.=]u}gq~ƥw|>̏<ԩo:tc95m\q&+M?FcM=v{WttOK,]ח;/=]'Weԡ?qӡsNja\׿'/̩G:t;ϼq!}?ùn?]po#{M؟yܫuu`c ΟqN卼>s_w{z]Fqܦco v^{ͣbԭpxߣ ds؟sI¹j[?^Oӽq.9z>u>G={?\zbE8w?:޾g^t80k=ss_/pI_57seG޶^ﺿq~X?sas_뢃-p㳜ڟs ¹}ƚ<?_G 3 ^Gįu1}t{yA>}(/ 8q`Nͺ"_@ZuU|9rz}]5q/׌ؿ|K}&bj4똄s_-ޯ,y-g2~e*r5tSb;/1qϺ6ѹ`^/ѹV=?XOs_¸s}:'1ùO8_W8u{=߯y {c&'sqkkc}k>Ȅs_cK8~ӽO|?8wn[tz8ܹѹdzѽXߺB|ӱq_k>W3'wkf'櫻o^޾uk|¹?87n ˹`ձWSכ3s_w?cOg[}__¹eqؙv\c}_}_>Y{}>8ss:y8{_ps3~X_;~Wx>O/xi|3z}?1ӹ?WFN[Oq}}m>|}ݿ/Xo3s_1[ ޏʘx}}~f-}>7%E׾ù}}GGPc>rs]~}}ub-}ngb>1s_;]o!>p8}},{#uIen۳#8cfď}Onop33:Ν:>1y%=8}|7LJs;}_cǗ|x¹?ٿ9:^n'k/*Z߲8yu<3Kxog8/[n8>s_3x~\¹?e8u}׵{8}?Wq:7 ֧::r3~c=}}#q{wL|7~E<{}?>O7A8u9:sqRw֣7籾 羾ďqOC;a?:=/3{މsߧ¹8ٿr8x?KW= 8zN]c38~}˜x4םѱ߬/+?}|֥ ww%{JB຾G_:'֯?ǹ>/+泮V$7cǷNc_C$>}?Խ?/2W.~ |ӱ?Xs_8+ߍG sx&:S3~s_볗sQw`߾u_gw[g`n͗*{^sg?:5ֽ?]_pksө?}_?ùg<K1S3ΝA}'ùKh2}ùg }ù~:X=^QOK_#~\W^c>D`fg¹8u?ൾ*pp+^2fK^#3`<2}]?g[W,G~G Wq:g|֩^s_SߎOwoY.Ƽީ~׉>۩#\G%7/Η&pk:5u¹&GWZPcx+u<|Gƣ>}|jģ]?s4sߟp{ }6οpk : ~_#e=}mď։Wu.6Wz8}pM-Ӿ?߱ޯùg}}_1:qù{8yp;7z='8u\X{pkk_8i0n1?_j5{t5竌W|U:|?^?_#㽷ۉG߼E/GغJ3^ci݂Y"u%¹=S;iitt8/ֵ??G{~?Ig7Οp:XgA~NB>Øo_=N|;g8~K;^o8 羯pχs_'[M8}38¹9UģWeӱόo;}!EeL=Ĺg|P;itzqPo%.gO}t|ùnz|ԟu}9&]uGn'= c/gNZ;its_uoùG7}c w<3>sp?:?R#~O3iǹ3&ߚgoG_:ۗ9sΝ?:^={'^~cs__3#k<4]o8uԭt[zۗg[~ 8>{s_pg^.衛_G7{~?G>:?'յ1v=?=>b7tk>h8WON5u;nwԍ;~i{s_ǫ0˾#Չo{xs%=8u=yv83Dwu>ؿ];௏}#q+^cbqqcu3׼왿.Og<?"ݷ{'ߚ{?~q Wz~382ͷ\3}?^xϿ3}3^co8\ܟѹ[+:/3~???Mn0>]\k|ͷgv8'ltk}{pi?ތ]{=q8&psx};X^:'18}瞾޿ʘx{8ؿO/9~XWp+Y>#;tď/]kG/sߟpk=|;/?nOù/xtu<{:vD|4;_?1Wwpzۿˏ==}N=t}+μ;׎Kw.]wKwNb߸.S'q؍kx=?.};a^ǥåny\zljmmp-tݰ._z~q#OѸ}wvǥW[ԥO2.}?;\z/^/.Itһ4qC]:1N2tuvI‘K^]z)fW{ǥM3ѥݏݼ8H݀p_^Ќ4׸gwzåwGYwKvv%Ӝt.}?>]ut{ҟqtԥƥ_:pt:N]zK+oڿ62n`ay<&Oϻ7r~K{.KOKo۝w;9tKwåyvDuҩ yooIbK7W{N\c{uw x?q~ӥo8CGSz޲<.qԹsn׏K?p?.=iGqågv?Kwt{ץCӥK. .S-]Uwu\v.8sA?q=tӥS^n^.ӥ7k^\=;ܫyqk.8u=pǟW^HgL^Kt]g~G8Dӱ`yqCg~~v#.:+q3gtu7['n}+cWz~Ըt듷.E8]:.=ÙKw}K}Kå{ԩO~ԡ:'QAݿÑқqO5xu|Ϗ.ݟpәo|\\zh]ytJ0]KtWt탺H+~Wbn<:7Kҍ7K['8w :?<;޿pmlstq}zө?֩_ֻ:sǸpuK.ݼw\zһlqY7މgK//..\zǫKN._'Oyn^}|{zt|\z{/]:7vp\z.4w\ҥϧi]םW/tvp0}v)Vҥ_w4;yvcΜC/;.KGK. ..S^?]?ҫuUp-~7^7\Ř:q+]zz88^!+.ѩSw쿾5ƃx/8pқt֥ҥp:t9åB>XdƘv;|ng\tuīӥ=&}__yZF~yӥP ފ;\z^KQߙgw1&^]zm}8]3k:t{׭3_>~y\u ӥ#]~t䏮k.ٯ=\։GntcK.~\oOzk)⶛< ҫn[״u:u8¥7KϿ7zq/tO^?. u^tSס}ա3_ܟtҥ{CcKD:]zIK7K.:%tNK:w/0͘<8tU'zo9&}}ylO\zҭKK.=tKf<on.lq:H̻y]yt.ݺ6v?uэ18*#.|\ޮW|.c2v{ŵqٿ}zΩ.ƥW'\x~\xju鮋q;ᔋuR|g+\zgt鹿ۥx]~mǎ:v^;.Sb.gN_:.;ۥ5ӥg|Qzަ[wu?·n<:v\ut:օ}t#ȭ+'M:e\z/|tӺgNKno];C;.=o|SW~z:.ޏ.]'zN[~Qga\"X^>zqg]z\vӯ|oBuU]åo.v7~|aK߲/u pqݺ1uan:y\vW|8Xǥ?{niKGՉUu1t]~qηvtեgn_)]#n<.;.=AtuިKo֑Yҋ~vq> \zzK.:wۥqaťwi:GW~[N~4뾩 .=K:aɷ>73梁t:ѥS*]CҥԥOKo[iաx/׃tI:\zn'ߺ\V81<\.}K:K﷮ث['ݟ}!ҧ.5^^>.t>xo9]t׻tۥw?b{oƷ9MN?f|;+.ԥo޳ŸD~uz.N9Ot7J>/~ό)uҥOoKN2Kg.>ҭKG?v9e?uo+݃1w_ҳcv֥[n֥S7Xޣ?uMw1я6?]:N|߮:u>F|~C\zIХ_tM}\:G\3.˥/WtKpJtݤ.t:]\z_ogԕSoK\f#uںSKҩ.]K!.=ҥ{֥ԥG[_kt?~ǹ|>in..=9uHtK/<4]zp5sq>s>8r?Y3> N\c\~n>㺟.~{? q]zיKxT7ic7uZW]}Kef<_鼻inSޞޟ.q;'KХWץӥOntuttҷn};_Gpt/.Wi:t}}ǥK~tuCӥ{̋cK>kåw.]'Ky{:txtNN҇c]zeL[z>gwC7oONpL޷_K]qupĥ7ҭKN.}XGť.]zi_5sԣN>uK:8ɥ;; N6uC]:&]c?Nd8&.;yԥ#ޟԩw̉k輷Kסss\:kI cة|S^}Z:ӥw;u7|Ͽtj~q]cc\߼̭8 G˥OН/.gi%ҕ׈ .]ǐ]KCWKطy{E|:]s'qL}t8x=:yvcߏKO'K|y?KNӟ-7>~-nUu!{?_!.ݺ tD~_zҥC;{u|:tsåw;suO..}~CgN^g7áKN]/=_3nAw߿|:+W܋gS_4c\op KO.}o۫۟:X/ץXѥ_Wx3&o/VtvO .KS.QGJ7]z[׾K.~tsK'O~;ut_C'o8֥+V_8Wg?r:Wχf?k|]GM޻/>=~tpt};r/=]|ڶkg?^uXå:yqUW~uߕ{v]nCIuз_]z>O?b/=^Ku :tW]<#ޫ+W~؍kƏ߿\qiХ{3~:]uW엮ӥ{֥[gKg[~yҽ/xM0K?['1~K7o!]z+|9&p[/]KyҭtKסۯE~>.]Kq8K'^..׫K7o^Ϙ~Nfʸzu;.{gKONyl엞n1gsu\C'o}KOK/]~MwNsf1&k%_<]ytm;3.oK?m<:tۥz[g[Dcե[91_)үg1/'tet/~ǥgUOχ>>/Cn<:tN]Kwq~Nw3uzeq:U]gWuOtwnN] ߟOd=tե^u)ѥ{~u\Ƹ!;K~ԥ']zt֭Хg#|m/}:&~xy:u<ν[5_wo ']:ʇyvNt}\.JH޶[_͸t>_ҫrߣuarz)~]m|qLOwҗuR߸tuKxKt?3^KKn$.=lNtօХIԏKǥ77q~ҍK\Qn\zua?z0Na]z~ZwҭGY]N޴.].}_]-ҭËK/=y!_fKn>tYU޷SKo~q8>}}ȇN}Ptjq:t~KҭK~_p63]t8t2~q_u~s)['3_յ['5ǥ[藮S_c/K{]uYn]:]uIz\Jn?\z5K).ҥ{o/K7]zs<>ҍ7/=]r~u<åz\KK~됾:tN//tWuh|=m\e6//.VnWS#]q~ow!Su/}қQ4]zGG>uG]:ѹԥ:엞NKn5wڧ,>ߺ[w]ҩ?q*+vǥW5ҭ`isuco K'p~u^t7^.fL[7S'M⽗wKƷӡ#$] ~CN|֩xxF]WgN/:ۺqN].}8f<~nx4SX:'֫qt_:>KgKN엎J~Uҫm|5ݿ6#xo_K't>~;7vy问_tuR?:uWN>`8|n|ҭ~tt=]h]zWtgtKa> Ktsu?{t/q[K'/]zuG8_>Dtg췽۷1uM/9uRǥ{.9XxwK[\z?c<]z^t.+S\N]+ҽ>.~K6?֙K8|\uMtѥ{~ҳN.|tUgX'u.:\t҉s+/ ?rO^W:ҋqԋLp~/yt*sץSXnGtet.ӥХ=~;T:w:鷐қ~.uen˸F:uw26헮ץ۩u#K&t:=\Kwty~ťWd?w8wx]]zk{L^[7- ..=.: af=;}3nqC'o~:s[@~鹿t.̯Tެ+K?Lɳ?y\z~^t3?M8\Ft靺ۥN*]NI^~gEwN}z]2/Ngn=1yv.eA4Ǧ_ƥ[Fut鞏ۥ_;.M7xډkЯZo/{O\zĥ'M+.}:~cץS6]zҳ:qtǻ|gtgJ=_z[Ǒs|{֟ә?^қSKOҿ_ҭ+KpΣKwp7?q}>4[/4_]:ңN.K?.~/K/uq/ҽI/y5npЉn=/K.\:ӥ~2}tN{fN]rqq}/ű~t3>}9y{t^ץ7ץw.ߝtE.}ЉLO"o\#r}Ƌ .}bܠ'k~'CǸKn\z~_:y/8ꗞ:c}3KuUGu=~gn'n\z9y{엎ӧ_o[WDų_z\Nt铺Xi'qW?{OFW#N4nKw3 v5;`toY9SN^'yUWi:twSi܋yq_1/ѕۯe|rO#qѥ۟F^uݯyvǥ(pcNnޝ.yۥ[ n9-nŸ}\x9ػdnzŹxM4:tڷK:tK~Gct3/uSys;?noGӕ3_?/=#O|KN/пrKҭK?ǵ;\zO}<>oKxǥ7^:uytϓuWu_q鷏g>я=]z_)t/n]]uv~w~_G>utxwc|CMngua~ҳz|?ͳc[wn]g|ӥ@tcCG^6.=w/=9oӯxEޱ.]it~~q?|ԥ>҇~2ХgK'$]yKOwZwuWӓuHӡﻚ7}>{̉:u_ͼ[nxyuvqխ/q?&/Kһ9\zm҇o~/ҥ{.=ֿK.[v:/c<|;?e.=Ywҳxչ_?>S/]]zqY/.ݎ'.=O\zԥ_[O>u]rA[KzP~m?ӑW\SI>ýC7PN|;_O[ÿtq~׷N{8_.YIt%=Su?uWq-5.ҭKog엞_?ױ^YWNz5ܺ:⩺tv~7K .$K']:ީ3K֥¥g\zKwХji:uw՝_=_:å~uXu>ҽҫu\q~ۉhz{usqxc{/[WQn3?:['?_z5uso>']:@_zЉⰛCo~.ݺ#uݺ1G|XҭK7?!]unԹu}Տ<:7øtut[/+\/|t>7ҳ..Ћ܇μ>K}vSn/GXoҭKN߈wO#ov^on'_֭/՝I։w^Ñ.ա3ԩSwko=n<~oN?*u_:KOwN.}[)ҋco׹o>^ψKcG_.{o%3˸PfLv:up :e߳8>b|keԙW..ݾutY4.]qKՇK7*ww1?Kptҥ#G㖟̷ȫND<֝w/w~\z˥{}ӥw]q~҉K'0[|hƣ9Kԥc+Gpe?_R^t):q~7`K.]WKPg?oW<~ӥǑƣqwӡ8Kyijӥ{ҧË]unһ;⽽yĥ{8wk~tu:tХ+7Sw7]ENO|1}Ϫn)]z:+͘|[vOvul|;\.t]M8C'^']:toutҭKtҽ_ҳ.pKϳ:ao??~g({__?u_+ѹs/Cν ]n82W[;ƹ3{k^Fms/ຂs/c|-H~sGluܰ1s=>\I8cs/}8b?"z5߽]?m8Ýs/~+_t+tg8-: ]k/?ڿo=^O82^wN~%>|Sb>9s&{?ێ;o5_х?1_u7A8uux1Bpk~cz 7Cߞu~S'`]~/dz|N4{[z<[7qQ|~=نs_ù7k8ѱ&mo[Y3o8?t%k]oy}|p:7uq]_1߳_17:7||#~W8u^^u[3\O8ߘ}wLx͗ߘ|C?7|~gVq] I^x>~ͧ{ _\J^7߼ܟuwqWc0bx[?Pu8X? ru }oQtW=u36|w̗c>oy=羯gW\ù?f_<~9u؟0ga]{K's_Oycԙ/wד7߼>z~2ν d8uys |st~|ù1 |xgp~}/X{|#}˹>u3#s_]o85o.O?q˹{W| 羮'WvO}|ؿ{9vr.%cw8pOur¹>W㺟}¹^z9u=ұ?u=˹޹speT/?/7qùg;<ck>qk}Sc'?{szggyPn9ףq_ø{_M ̘ٯ~|c;5c>o8:OOGpS8yҩ߱ڟXνt+>_gg韽o;|n{X鿾g:z;_ k}6āpM?'k:'羿߄s/nG^p{}p@apԽr/0^ʼn/ɷٯ|/|֥ 7۝v7˺W\Z|{̧ 美x/s_߇oe3}| 美^]{]6XWp˹k/ G}b=}8W|pŹggz7c:~^O˰^8N~~˹:)[Gq˹nޘO}}{W?xnѭX/O;֏'o֯~m1}+B|Y^u;G'?"93b}>< -q=>?ħu`<";:p+0~-Għ^u$¹7Mo9z]{{=s_ɘ{Msx} X'7??b}o8u}׽Xߺ|˴V8>b>Z8н7='8:O|sq\:pq%ԁùo帞s_ؾo}ߦ51{~ƹ1~Q!'^{߫ƷN~Ո~xXons8}}߸뺏p{_.B77"]Qoo?_E77^3^s_6ùϟ|7~?s_θ>7ugpk=cgŹK:73~sדpx7upo|s_e{ǿspC8&cztoo{.KS8ѽ^?xN}ͧ# |4]{t¹+X?:u¹kn_ ;ӌ{oc=Z>Z/܌og_Wtp7:_t'd8/{wO8ޕxoKx!}]o󸿆s?ٿƣhgLֱ3_#sx}}?ǹΟ߅sp^8s_n^YW'{{2&ޙF<gp+PcScj|8v~5Oi|!:n5z!u¹woùe83b>Ow&>s>os~u}ާ#ᰌ<~3森6}uƷuc;f>!~>c7v];c4گXV]{,߻p+^G0oߺc׈z1&ԉoz{ķùqg<ڱ}-ۭÇs|R_FM~s߿q{=q1n8>ӵMY&=P|g~㺟V8ӵƭk8 >?ng|o x=|k^osqs_/~o}={p?s?G{ٍ{'ޛn0&: ON|#3~}}~;zf8}/{Kp} CZL-^pk}.6;⽳o67ùs_گ߼dp!3{/\N8oss_>||~q}N|_#~kďuFZޙ/t}ǥc'ߚbǕw3c|;㙏|t%Cn>ses_8O?cƣ93M|ߦ_e5r?{WWsƷqw7uw8uϿ.Ʒq||s|:7cމG_s_z/W|fL_>^uFqτs_W'c^on"Ntvď-s_׻!ENx{į+~݌Nn>8nG<| :t_˧/׵khï#;pkt3=Z]v۸u8ѱ3_8pĹAG]p%^o.zOt|O;c?Ʒ/W71_=:r87S_ٿw}}&cf<֟v|kg7OG߾羮ws_͆s_*]{w8p,o=;:.ķ/wƝ 瞾 羾}O|G}oM;WqeZ {t+<<&}?o{ùQW&>¹zs8xWb\Oɷ/'s/:8%^ɷ\}Oq1|mI>>Up+^P?|~ƹg%=}}/|<ףps/[oqŭg{I:]eқ=\ҥ7\.Ct.u ҥۯ\{եӼǏ/ セ9].Y/N%]zn>ÅҟΥ?K8_W\>K?\z0.Ung]W^uң?._ .}$]zԌ.}wKpǥO\&uO30u8ۉkC~oǾ.o>gDbo۝Zؿq5kT%҇u ytJ:\(:KNj\ԙoK҇ӥot#qĸ /N\#.Oot,gĥohnǹK|#Mt߸w;u~㳿U7U7S.Gw2/au֑Х[BN.=ݪ.Kť.}6>.ƣcoa\u뭺\qx\y/;]zc'Nƥ_tנ|>Kq8̫C_qc'OQK=\z_\z:K]_\(c`ɋk:qŇK7/-]uKncռSǑz >y}u.13~͸åtyץ׿\z:C҇urt鯮>]: n]tt^0r;q\<.~׺z3槟e%/tѸtootҥ{ǥҥZtoKn:\q']t=Kn]i]up?q\:_WשNupѺtѦK׵ҽ>ҳy50ۥKn{ť;yyץ{=ťҥׯKϺ 3]:0ώ:WcԥO@鷠Kxy~ץ{a|1&rԉuۤK|1O/g I>w'Ӹx\zhq|pcS7NR?~K:Sҥ:S~_ĥ3]:}q>}#>_K>K:{\z_ktJҍKEg8.]ץKϿGy.tU]>. yby\z{OKt׋t_>\z폭K7WnP]K^ڕygԑ3<;O^<]LgK ӥHnY\~ w;z)]åitqBaKu{t qbt/ytSWKOyUt?t`qMҥ7_әK.}p.]wKҩk.̋ӥǪKpt}vΜ<;oXה:ü#ӥq]ǣK?ӑctOK﾿t] ޳wһp~Ku:$\p]vҍKeҋn7r8Ǽ;Ǒ1+>;/nO]u[tuO!.Ku㩓yqqҥGKOGK?._;.].ϟ.]KY/%)]ќ/5.=4^NХo|}_EWN|źq}ե8tӥw9qKn9.tIuҍ7ҭӭKYn]\.=i,\#0u_խuHqyq8h\+]:ҥXNuta[]{u\stt.[nޛ.ݼt)ѥ{ĥGgדүӥϲ}],:vޭK=6:][ij;c\zӡ3ߥCjѭSwQ ]:[w~=yo8n|v;r\׳o5q=Х[Fn]2]ztmKqt8Wg|S|þN?CI|Oٝ?\z..>i \z.t<.:6éSթ#pٟ}/t޿u_n:yq:j^t.}ǥO?y{]:KOߌGsʉӥNNҋw?v8CG>u5|>yqq!\`.x.]z..܎N`>~~?]z۩oď҇NW>~ҭKwK?m:;?1t܏Iϫ.=|i6\ۭGpYUnq~K9G_|Ցw}qK٤KMGǥ>ԥ[ >]?ҳ..ݺIt*n~k[NuOcZgWv:c^_tҧT]K\vG|ҳ,.=ۥֹg?~_tq~c']t{q;}ҥyƥ|=u\ěct:t|ƥ/>cI~'։q޿q}ӹn(\,f|;+iG\o1Ʒ#;|Xu8.uo{{uLҭ<i_q¥Hҥ?SK.9mn'>uN>~ҭ+KN1ˑ.).}~;c\GnNs\z[Oǫu\tөm~֩ZN'nҧu uWK7.}TǧKt}ԕWҍѥ/]ry>]sDuok¥ԥ:fꐦ 3?1`L~}cEgN>8ۋu:ǥ]z_ߎD>uSwϟK_͘|f^?^7ҭۧKQS6K|եeO}}حc'<_Ŀu_>>.=c\z+S'N߮-t|p?tGKK>}ҥyK7>Kҙ~<\Qq҉7.҇^>.qå].~tKoK1.sIåÕ_W뮦cğMGN?f<}NzNsnTnTGxp~q gK..}گ>GKݺn8t;[']uďK{=ֻtPKqҭK.vqn.uǥ&E>CɇnXåb|p~8޺[K/ڿ7/'./_s|uZpҥ['F}N~Cבu7u=ֿ8|n\w].|t5WGxo}~{N~tcoץS'k>on~.|tKەğӥO|ҧu]w.}oK?py=ƥ+]up K.۸>tոYTN<:XG:hO3MNxkx.l\tԱեOҥ_?]:uӥ{ҥۡ?NN|9]k0o?3.=|ҍGK}tө7\|KvCǮ#¥[.]ut['^ty?/?ߞ{Vt^k:59֥socytһ|q֛qt.Pt: ƥGtgCo1~>NiNJ^ώKׁ_t1ƥLcχ.nKuø ߫S{~]zw9rLS7nPxNҡ:tx҉qcp½ut␺tEt&unj^}>yq^t5.}=nG^#sq9]._N~ǥӥ[J>ǿڏ<:7߲ygL ;|.=].}+_uQpS']z\_u]7^wv~olwm !"-!=]Yv_7XKz}|Q1S7o5y\Kסo-Й}YHNK:+X3o/O]z9Kq$_>8utҥ_:p_7'/}6>Kf^m6.}/y_t<+_toNnpl\zOeKKs'['Y:sAwKkrO9.]z..q@楳?\satM7?3O&NSu[QXX/:8p/.}8Fեguƥ_Y[qtyK/:uu:I|һyu[q0>t]5.ϣҗn}w_85}np;u8?uQ.#/N/Ottt#ӥ?tߝKNGc_ϧyC~E߲݃:]Kd^c^xN3.I_y|StBӥ<;k$v|t[ {IwN]םyu鸤tˑ^Xӧ?.>t~ӥ7^ۥK/2/9Iۥsҝ.#ۥ:uԱu_t.|XX:q8Kwn.s8SwޙX\/\7t鏯ezѭg']"tJ^C︛-\_H5}WoK;>\G>>.}b^}W]uХ۷K7rWrޟ<ӥ?e^} |K7SۧOޞ|.<ҙ×.]Kw.=xtåO>.|O\z҇s¥.}קK{;9Cwo:s~1W)c> v:u9n>K/rǥKg~著N>z楓GK7 κt(.}q=.=3/=uG/Un֥ù߹?:뷷7Kҙ[.ݾ;]y#t7KԚ}~qzoҪS֡q܃ay7g.ùg^zաOo>@d^zH]{9ǕײϷ.}tkxt9>uQo{NKK<;/}s7kץGKҩ} ӥ/زK.ZXS?tCǥqcmW^b{uo/gt.ݾf\zb]W]圃y7u|sڣk-]zͼk^N}[ltq~år䥓.}ؗK_mto9saK7zo:t슮<3/f{o֣N1¥oq˵4y2nǥWt|5]P^pַQ>ץNLuG x2Of&O&u.}eWO}и1x}җsp{y2Kå?:m..}W.|pb׍Kc^zutTݹs\+k殒'^͇OWU]yE~ҝSK_G~Kw.}9_֏]~wۋǥӧy鞟t~t.;sr}sC.'>.9jp7G?sϼtpre^z_wm^q:]z#/V\zNK]z#Ϝt?K/۩3.}W]z:vpүӥ].}ru\|t陏Wutv^zuMLWu̅m:ust{=^w#ӱL+֥[/ѥz5/}=?ҝÅK_z.Ϻb>.o|\9x͗եt;KqW5ҥ2K|c^n֣3ݹ:uׯKe^z|3ǥĶKrW]o^X2?.lNys➏Kw.yyn=u/^?ou㏿g.,{W85}õ}5sM?{̙3 vp?>.K>ԥKzF]z}__ust3OG~Mv˹k.?;yS\Ӫ+gǥ/guF\W\y׋.ON^zE.=ߧ\,]z3<]:\ou]}t+N:srǢn^/qkk/Wַuԣ{\z/IkJ>9.åL\;'/ٷ^^tK7XN~y7\z^W<ퟸ>a7K#\۩x{GO^s;uv|;ۉw69LJ=ʾ;uϓsW;k~.<ӥ wU.]WK7]O;ԑw>_|ѥ;S=]sKu~>)뽟ܚsHkǏN6:ӥ;NC^y|tk湇μu;>m^to㢛|گ=En=n9p[>KwNy8-]*p.註.pe^sWv^v屿KCGG]ǥg~=yumS?ޞ9q}K^ץ?:e]z5?N.}C.=7:N[u:t.ÁԥO.}tCַu/u|Q\zɥJ~w.}y.#/fM4yc5/^qt޸k;tsK>]z:\]z.=oK'}֏?to?]zӡ?9'sSuԷ;apĸb9yݼsONyO]0Z]g'';]ԡ3wfXױSVtbM5yoEn.~]z:❗~>]:ur ߇N|\1:bǥ{.}2g]>ɚ|t>K rp8sZc:v'tt~I8Wzm3G`Y?䥛sKХ;O}П鷮rҷ ַK_nu<ɚ[gq鏾u589>;֥#쏎.~š9LK.=3/å{>ǥK3tK_>>Y K>?Qu.ktK_.2o=\zb\^zCқ|\iY_>u]ҝ;KO]|K_\ʼ.溦K_p֋ҥwtU.}ꔫx=.9N^Coܳ;]Yۿ]X?׃I=sRq޷Kwqޞݮk^攥Sߓ~̭[܎ǥSӥ_tE{=K=]/:k:uy{\st#K_e֡8ӥ3/ѝ_q~ҥ.ütϗtץ?ۡ;'u^#o:sCtҽӥ{EtҝK/ǥŜ]:K<_ϥ>pn`8$\sҥ/cdz{ͻѕS DžKssCNr=L^owty{u}F]b_թ8ѷKvttCKo1]a]Ss['y{yҽJ>}u*,c_ ұSx{KgpKױ͏֥7yNSOuU}}s.B)[ObxKwN]/\ptҥs.KG̿.Kg#׻.1<\(jzqƸt߿#s"~祧c&/w}9nίoM>yxuk{T]p.c]7\6]μ3WwԙwݟKN]w:o/\.W.ݹ:N}utK9Mg~=wt5y}t3ϼZ׈ѥ.o#t5q]:ytu:Y]s-tyΝҥ]eyܙ^u|ǝK/:ut7˼tZs..>too&t\ӷwΙ?G^{_׷MKow\z_I<өSwNtE7NnPbuչWyץ/ƫ;W^xq9wK>ooۋc]ߏp;Nt#cҫy|AoW{.=åϏt:ө/tۋ9'AESK<>3ǹw:x>}1gBμtAK~K.=^ot|;ۥvμ~;3_¢k_} ]9nGKIy)^W{W ?.]WKKG쳻uE.}KқJG~^ep~\3Хg7.>]cwҩ۷Y7tDKi?Stp<g߽.ݼ ̣ϼCOK=SA]}%zK7? t&tK:}wqk^zVyӡguw\?ngKuҫ>;'y{=vEwnǥo.}xǥgKoy2߼̷_Y.} _˾t>u\ώ> ]9kpչ}KOi^.}їKwtyzt|of=:u>tg2?z>y䙗t>jq>0]۵~ʚ6ngx{>@ӽ}ӥ}{Ko>S,>ѧy3/0.t.cKoۙOC:yΜt>_ߒ4{ե_:j\:ӥ[W|qMY>J\z:]y[tȷKtqKO}̭7g^tK敓p6/a}sޕy{K۱ߓo7]zyui ݺ}KO,]?.>ygn.2~G33ώggwةvƥ֥{~7/Kו3}ov9nxt陿}K{}v:uU}ǺG)]:˧K_g\t5y2]N^?uqy/]XwCK׷.wW֥wt.'/}钶K'_~ֹ<_Q]M2?<.^K7OQn^.9ۥ7.yOnߞåKwΈy̼ty\\0?Vx/Ƿ$y:uq㹭Gu;/=&u3W6үʙ]xxn83~yWå?>3tH>u[Y5y2}ȼ̧__әS̒g9SNycõWgWљN 9]:NG.}NVK3.ҋk}\_һ{vq|sMkxK7^S>t]]Kw^:O^\?\:ytӥ;GNw.]gN}GKKwn.ݾt-]ݯϜ#/}ԩ}CΜԥ;ώa^v]zqAv~:¥ԥ/Ct8r^9ҹtn[0/׿y֗u阫xt~}v_K_u;sƥ_<>.ݹrtrG?]#EKou\zש?ӥ.ݹƸtGz~f8^;ۼtrҥr;{>.}C/\w-ftqya\6nzǃK>W?.91t{tM>̫.yKW^XwָSq.:u۹Gg^>{9W}eXe'}Y?.:sqKwKK6ݝ:uq%U:̅-:ǃ.}9\Lt 3/~d&/}\һ3uoQ>.~t]ojtKukK<]z1o<]z3]:9]tuv\tE7ݬ:srk\}ttGK_ַqe?p;t䞑/N^:/K7~k\|w*gbM=_ױۥ+^>u-Mn7sK_ޟEJ/å[_ѥ9ܳ\e^zϜqrjiӍ_ЩϘJwq.lסS:uq癗3.ݹK_5]zׅWӥҥ3W6]zk\҅7}R֕qmF~;.}/~+F^7y:9\ZҝKw.NwߚUvK]z?Kץo<ԣq~q|K^|>pc积ܧsay|gdݏzzk^+i?\:ϲ~p\ӥϼtӥ]uIХ?:jvbst䞑s'O<]z:sKC'.=7楯&lΩG\G_#/{vn99Փ>~+tG/o:9[֩Gw:|.}^>~̟ٿ}>|\sBK{;C|K#.G^Сҭe^zfw~Nߓ>uvS?st;.=җs@t7߼t?ۥO?BnQt"һn:oǥ} ]st}w㾗>tԣ{y:u;yN^\;ǵ1a߮WܞgC}?:3;czM/ָ¥w79GNXө?\z5<]zu͜١C',lg珼Vw]3w5;ۥuԷ/cק.ݹ;/==O77XS~?_?__Vrk:z_o_G?OvB7"ߎ_*?_¿O^޺vp_pO8C ^n^?Ĺ }:%s/U\rms/MޟW7(M˵5Nu s/<~ν.u|pν:F^yKӍq||ν޿{,|¹ץ .>{{܋%uR/büW8tOo/s/u˼pzq<s'pE's/AKν;<ν{ w:R ν\z:w{5ʭ^_s/e'acŹWtAi:pùS^{s/Eνs/t+G>P][ν>=u=x=O{cb{ݿ}=gyk/8L5?{;ν s/̯-νLK{,܋.4{ u4{uJ8^%{yܣ|}}C^2={uK8qȯs^~ؿҭGcs/΍s>^7_]_=]M^K^t8RWw{ ^v۱޿| ?źx#/~>X_q/ﱘs/Eg}Wνsùt/{Źbu8.:rù|Kб_On/s/وcW8ﱝ kzq={8zfz4swץ{_kӹǫt8r_ׇs/]gν8'({={?~q|q¹w8w7ϸwûbC }8}/^s/M'νdr( sp.ν{1>{_q}g¹~pi8Q8ppW\.]{z:܆p58K3<{+98Wol^@_=?|ZW\:LǿνT]w^q\' ν۽cݮ/o:ю;A\WwB=5{]^2?={̏^<ףޯz>?Kzs;t5O^-}'=>Эنs.=?w:b={:pEwsν0t|pչh: 8ν<:;g~ νss/Q-[w-ùùe+^t8w  :':?[ƹn {,:pzzgq}Zu%Oӽ?끋}/WνTv*y_=KyO|?Q؟}u8?{qnν4-O?z<׹^Gsν~g]8|܋y8bT838θp>U{i>}8۽/s/]GzG{/qž^{s~sKwҽ8¹ Q8}'}F;K\={ַts/E {qK8G֣>֗߈~Xf.B8w98?svo=vKk} {o-Wo[(x.ַkWuwamkczpꬫoqE'=8s~vW\ws|>f:zYy^WyB^ >׳uSu%s{\O%s/ۅsUx܋. ~Fمs_sK׀s/[zνd>s νt>oqys~s9 ^no2p|e=u5y<¹g8?Ks/;ν`¹O8޸-{qX_+usx>s^O8-}t9!ܫ}:S3N^u8玾Gѹg}py~羯¹Y:¹s/ν4pszU8}}s/~p{3Ozs^s7{~?ù_ܳD8\{|<Nqp܋sùgνng}qCp{='ܳ_ܾ)[??w kǹpSYS/W8}!;rmùMpu9g.~<¹Gޯ~~\/ >އstԏqW6p53u5;{ֳ¹88{{i曇s/ǹ+[?v Whg~/a=zν.q%b{qn$ν9{7w^<ߎe=:/"{^o,ν8)<w瞟8<܋Oe6:R[oqz^n=뽷quXSƥ_?m8b{q97{Úc.@8\pNz~|~N^_ͬmR}|¹Y{|ps/pnν874{ ܳP^N~E}s8~ ͚~*~f{y=)sv\ss/sۇ~8˽9k/q{7ک㢛psEnν¹?s=8ܫWs˹ߵnĿ\ӿ~ϸ]8YSx?GG~<{^js߯_{>s{CN}㟱?d܋spŹm8Wֹ_Q>O~ùs/76{^oח~|묫֩oùs>s/\`8E^.>V{z{}ܳG׹Fn?= u:֣ND=ѽ7G:~ʚmvsqM?x死?דFߥ3s`m<ߌsz@^s^s/9 {aN.=u9:wo 3O__N~Ǐsӹ[ѹ/sI{i>{۽跾t w8=q۩o_:~<> s9pK3_~98ߣ~3У9tgzk5K:'On=9{16{unq{O]Wƹƚ1s GN}{~d{5~ 'OܳR^t85ۮ/S&k AHg~ur8sIǹg]ssqKpʚzc~þ^{ls/΅ s/M'νkN~kz5ίzo?po=BNuq}еq)t].2>;}..Ka[}sMq|_/'};*׋ttg_rѥ?.[rmwN:Iܿp}Oť^җ>8\|4yѩ77;;Ntҷѥwt/z)>:uǥNdKg^]n8sS׈|MOsǢK7OWӥ[b;_;/|t:W]e[g8鳻uzy*ԩC-n6:uqץ9ۥg;Õ|_O7Kg$.^YXS'ytouoo{N ҳΒ.ݼl]Kt<~tW\҇rpvi_ν3S^sw5uܙ+0t䳇K[~.WqD] ~Ys|5{t) uON^un;kƥש==t{M7}K=WE>tM7O6}}a?uo|Ksltwåc_QAt@toex=.ye_Z_Ϲ x|\z|p3ӥW܂||қNro}\:]3ޅKϺB?voF`?L!zN9]:,]z/MO3.ݹ%yt]9}kKu9O^WGz!tݸk;^aK?~K7ߛuӥ_/.zܬ[Kϧn^.}}vs;.>*׋yٸ9#'a_/9åoJue\zֹ_//xb.ݼp9guwe\?Xύb>.8p-t:_ϾA]z/RGoٷwK'ϛtve:}v֟~nni}{v\z5.} EsAqn֥7c`͑^̏ t_Ϭ2w`u`t_ϾpPnpaMݭcǥ?ng_ָyK>O 2+d#Og:FkMHn.ݼå_]D ^֙zvdm<]Ú>8߇KϹK3Wҏ=ѕ[?6OR^{֥߭KCS/I] G]:\z#p9t3.f\46kҥqm;O ^-8w\wѹnz uK/Z&OZKҫN5:qs\oww\avk*;N9;/\oVMԷǥ?Fztޏ[o.1޹#8uo^.9So?v:j]s)_ Kz_޾ѦK7?Xx3D=z_.'sVяkyKw΅.GqzYtMgݲvv/9snݹx>_OFz:L]q3=>qeu:[>t.^cxqOc}/ݹㇵ}v5\S/oK_oOo5sWϝҶkO|wDzo{E9GzupKԷ:˳z?ҥrё㊫msuvK>NCz^ss8mu5eޛy}qyԥ틫uK‰{˙K9=@ΜM\~.5sWy<o|tnqN*z3сo;J|%:8ߒ^~tɚ©C.}_&=ϿSOLwuͻ$ݾ\z..}kOK'O~{qM7O{ӹW~Yno///]sE;tlNئN9#.}:w`Y߾ޣ>K4\z#KzkW~.=9Ny{ǸjKv֙k:\]ҹ_q|<˹k\y䯧ӥ/~]3?zQY\7:vV:/\zڤKϼs.[_MtL #tַkҶSǥ֣qwԣK:z3wqCwc~#]åةstӡS?uwmݺ7Y?q},\z]|ܳ{8f]z՝NnW9\z悤K6wkw}׳zd}q50GusVҥ3*]z̖?\:3/:;]z:Swjriם;p9*]z/e}N\Gk K'߼ZϏgg. s*k琺ݹ59`{98+w\K/ߎ\|oԡO<[ЙS?tԷK.~ny=&_0X#]z7>|K7?U^s0J>0W Koҧn?"<>^9wR'wk;uёKzu;ҥۍuoItᔫms\3_~59eҡ_?.趛st:s:sovCեs=KSKש_G?Ho#umao^+΅ׇ.ۯϿdNŚ<9[ף:u[K :qGԏץS7=>6֥u5輽bƥ_OgNnDza.^t/ҳ.pt])GKϼ#ONy|ӵu?_/v陯N};η3Y| /\{Щkw5.=9`<0^tm^O+s*EΜv¥_KN.MtHtrDĥߧ+k{\KOO.ݹ tp];p魐pt>tt꣸~KKw.ҟKϥt.ݣNbyҥ.k;y<]F橓o{υեsyҷԥnu?9Gy陟s'}Y'ӝߜWu1/]?͓ԥiu楷^<\z~yݖ.]f^znClkқy=kyn|һۭ|t麕p>.};q#QG.~XK.O2_8\p9Nh].Zu櫇Kϱ#/}o]qy{:t|߭C7/鞛}lv^kn~>K>'\zAye_gz]#/=\PCKU^+Oҏu OvUWnbMKågZt;um;t>+nǹw;o܋y>.=uܺΜ-vs{'B~-;}q<]sSK`^vӥ㪫n~.=]Ot5ON^ם_34/=3/=O]ytӥԩkۻ]d^s`tΡХ_>~y2kݹݵy7:t^twf"sgӎ7]z͗5/=gtwn# Б?ztso1tIuK|}mS>>tv! .ϼtͥKNN^yu5=¥7k]7C{gܑkO}EuSs_\3wK?ۛKם4G^:o)ɓ:t]͚:ſA:}й߮#_:vZnI}}=KW O>u!NCaM N]۝_ɺhp8ϹtsL]:ҥ{~ӥ?=y鏎r͓ѭoKg^|H\zϼtȇ'/Kw\z:t~ޚg.҇k]zaMɓ:u?!2u_̼Ku/ҧ?ye[}2?'yxy2.ܟɓyåζ:Kt9Iӥ/.ͧYܾ֙>uɸKG\N.C.% ]'k/;}.}+ԥg{߮~g8+'?|&O&9}{piKWn߸.=t']z׵:u楧#'s6O.}/nOܼx}~o7tԏދ+3OFg^NեO>.|p۱KG5y5}{W.}py}yү}{ܧ}N9%C׭[~Xw.}tg^qvy\pɻϼ-v>:sK˹tKoޞzҡfy:t#oQ?ŅK7]koϵyVOsK¥ןuny6}^Y%/zףۥXuӹ{#yntP;G:t'Sťg_ӑדtЩ;'w}.};Nҩ'N\Xy}쎼vq'OtԣKougm=濤co}܇2odI^9H楛.鳋ۥ?>/G}{G8񑷎Kg..pw.Ot=~K7msKϹ^3̧!>E2/zܼt?t_K7_9kv|^K߮Vt].ݹmҳ0l^u5SgN=.??ۭu W?n.oXSNN=֩oWӱr-{t= -NhhoOwK'^^>.楛ַmtߥ[_qK^yo~\yGg38xKﮩGtϷsjtt~u)>]z\_ti=:/tny5}{~4/9JX]/_x.ݹtҥ&_?R}|sHN2?!]:sn鷞g?_yU|7ҩ3/NNyK|3K<.]]sV楷K.}K|XS}\:.']h|#/.K!ݹ:s|<ɼtiGoWV[5^4f\WN}_hݹ}xrM^zѡG~̜~peۥ_:su8r|?\zg=6].Y~҇|fw޳yU9M~K:mrU1/ҥz3/9 VåN0Y/t׏K/:N3\åWvwn?xeMҩo]>.o|ՙS_:t=g^zޞya72\^ve:Ωo_s/ԣxHo#oNp돼atҧ__tם}n'g+#-ȼtϟ냋:|b5eg^#'/9 3G"]s#t]7<>f?xѝS98^u?Im~̍J>tӥC;wBV}l^zus_:+;i~X&o|:mgDx#\S|6k?{^J:r~ӥ_zYCҩӯ6]׾tOt~/ϼt_{=K/;Xo.p{Ύyu^WǜU\z?K_;u7琾_?>O3/=.#/=8.t]]2/NR=upG^ QxSgҩVu腺͚޴֩s;uvw]Yn"\zJ^p]tN3X':u_qҙ~Н[M=ң+g>^ѡ'SXknvҳ/+]:22~ ]&\[Y[XNxv^:?/3.۟㼙.tްo9䥛wh^Zc0s^cotOǨ:}wқy޸tae^:uK>yYw9qMN>{sCc{ӭs|8f~8>tqҗu /߼t}qkҟys\zoK<@nst/qߙ>p~2/=܇<ΏҳΙ.Q|w}Zt;Z':v.EwN]|me<@:9D>?ҙO.G^vݿn@vuݥ3בOۏõKwNyk]z:νqkw_IkoO1l_ttm"^Ow;k?^ݹY7D$]}vy|N^uOI>.}O?;ߺŚ_>}q;}u~ףkyWt>nt|sw楳tїS$wu\:uåg]1]:tηץ׏KyҳK;]u'~.m9wR]s|_]s3O=o/teF!"Y}qak>-\z5?ltq5K)\z~?ƥҭf^zunԹ:t'SS9)QO/ؖn>;qtCL^^yW>.}]u8jK8Bҗy۸ttn^NmvkK?.Kť?:[۫:t_.]'a^3/=9>\vwMy6.ݾ^?.6>E0pOJԥw]/._twt:]z>~3d}ӱ~~~4/ݾp??gKr/m)V]ޞzo3'5pYȼtO;/=9+ӥKz9.>ԩ'.=NxC7O_=Х ~?kwtuüw\:snpoN|åo7.}߿kɚvZdsIz/N?\p;.GC7O.֣μCTft*K^m|\}W~|Wɓ!~}\{ӱS/Ǎ_Q~3o\1/2|9saTn>.=w۷W{=^9Mgyчl^¥g=(]sgt㝗q|yu$ϼKN}N<^t<z;枦Kw.}~HΜ1.gw&/=^͓5/r}Kotgtѥ'{w9t5mksNuԷqWԏ./yѩϼ7]pkoM^wp٧}bιNtoS3/>p{.]1`dx}o֑9ɼ:p>E(Kt)Kt䱯tႋxKw.9o[qnvyKqQM^w9yottS>.>t闷9]N}gGe9c7O^y;/=w7uqc_wKyݚ~mn^Gn.~SXS:qKHS>u;ǥ0Ķi:tV_usםS//:u>:~Mܫg!u5_%ݹ.]wN};lx gܜkZ]_qo:C?nҥʩoKO=Q3\{Y.ā9'׽Õ3'ǧg`M y>c3{5Х;eבː.}¼tߟHscq#ǙN^sir;cGnAt_3]S']z]_sz9y%d:kӵ~+~뮫GsHz{4U7o//]ҧƥ0\ۇkבӭ_ѭS߮S$_sϷŚq"/4[?.].>\S]zKǍK/K|tsFcq|8Yqko!e~N#J~|֩㜋rsMq|Cgjѱ?tvw͚zCiԩ⠛sWKK˼tpL>}(\skǥмt;wӼyd^p{...K'4]zD=ZGK>.|)7:sR3_GzZq<9I;Qn=An}G^|~LJ_S'C~|:v'/}ϭ2/ݹzbt;.}_tq<}>..mKm3O2+5Kסo=蚗A]5/}:_=.|t#}֩=y|K|?S}Cg)?tߏ}hxsWgKgNY3kedM=+lK\rq]Xkq\;7/;O^zF#Pt銛sWùҗ߬.=]9Ux.Z_8җF.{zƜRtp?.+yt9NҭܙoN:.G[gޘ;<>CKt+ۧ¥O#/Z?.=#2C}]N=ϜK]c.96=WͿ5/]KwDzop-.6/[]z /]psR[7pqKsM:ksʼtKk^z_\:·K>ٿSgN*<ҧ>K:Ksy:uRgtgys|O2w!뽙N}{9.ѝu']fM=Ku\ȇN7/9 n'޲fMY<_楓cr䥓yy\:~Kw.yo#/]>zֵtҗspǧ7y鏮ܳS^x?ҧcy:sx yu91yCyp{vKNNY=\zΑ;åGq~ҥWo?~.}K.9$[kWt/r9n^Gwn:NKKo;o}1Iw^Õg~:<~G^:ޙ^>.yK;ѯǥSLy^GnBttK>߾K<KKK/ַɷ:tԷ3~p,’go_7kΜ~[Ϲt~ʼ9α e^zw\s¥"q<͹ҩgK~kqҧk1ļev.==E6ly{??~u]ѹW2- _e/| ?ވX/G?^7_oωk%Hަy؎+}!Aspݟo<^͛'5soUν6*qUscwܑ^u8v}ݺp毷fy8KW]ν^c ޾5*yp:j~'ν.3νQW<[s○so۫_G~>ù78q|Q{cN:7o_5pM]k;oun^bMkM뭘~a|¹Col~sp1νv^9+}:Ź̛^uo/չk]Gzw[۟p=ùTֿ:DsD8cIu{+;o.{ù>soż_k ^u}8%]|8?O¹n\pŹn{8z|{itu_{~so퓿p^k8ƺ^y5cg׹vيgpg   t VW1y}nuߗgyxܿ}sg6<}r_W`Lcq<每KgF5sO_ǹ=]\qޯ8k[}=x>w_`71?ynn]_>\pڹO6}ǹtG}u#k{?n]:7}:=0}?o}n:88}\t܇W/'}sQ<>Ǯ./g8qU%}㧏+Jsw9̟o 8>os_7sSsw>Gqu=sg: q'}>>Ź#}u p >݌73>S8Y_:qqk.a~Ge8Odvm|{|׿p[_8/Ís_׹^ 0o׹NЕƹ[qþi׭'|'c>40"ν_:qo:=O?Gכ>׵yu̓¹;7_0 >u?9_׾eo^ǾV>}!O}F܇yo8c#}s>81܇q_8aNN|Pss30_~ܽ~1}DzraC{qݟ| wU? Љ_G{~lV{3uþH #'mùun@>~Ź7o{8w:w8zzoSne}Տh2^eu/ptqCLJs_槑>~߻OnSǑq槧ϟ8aϤNzo鰩}vs{8a3Γ>$O{'_LJsw~mwySouԷq_ԇ;8u7܇y>q~3S7u͛ܽĹdL}9^| >Mw7q4O>}LcS=}գv3KN=Kw'뽗zе_w]/GoI}sC3Ge^ƻNc[6}sup]vקn} u>~܇N-ν1:!ϯ8"}L?NU܇WwکGzCt_yq7}z>&}ygE_>:ù[oùϏ}׹{Kn^#}t~8ΣNx^d{8^+78aԭ?:{X/oP;o}\:pc'1>کoOLxЍܧ또>SĹsGzĹo棒><܇y8>:xS>2Ow͘{/}\p~qsen}S>.Xߞ:xܾGMs_ !=:Lin qv1~֏q_햧CN=ǹ{羮Gs6}ukt{q ƹCsq8989OsnX Csk׽ezq瘿>?ޛ'}n'}}n{7gẄqUo:z͘Sp[o_~eޟOuWܻ~muu.꽮pu7'׹Isq_?l>ϐOl=b8zt|S?ǹw'}u7ܧŹKdN}t9{ܻމs~ŹG|קyq1fc}{ӵ7pO{SҹM;LJs>u'>'}qzp>8?֣?Ozqw?s75q ~ t{__s_oc|ϭo/KO?νAq81.m=zs;ָ8:2q_s_ǻ3v>s_͸8׽emk^>ߟ-o[S~W gøatN>!}q_w'<{S_Y>Yuatǹ'ԏ;oy}>~p^/_G;zy糳ͱ㩗n}??8h5Hws_Gq/ܽs_s_1SǾ}6?:5?9ܭO>G 7ێzuƹAxǹs_b<3ߍ͍4y_]}ܽ1}߭?8?s|s7߇qG{soqٟ'ey{?_wٿ}ޯǹw=8oŹ{xqҽَ?_]>:~ܻ~swsy=;_{mǹ0}r?Mwhg@p8?c ׭d `Y/ѱӿup}ܹŹ.zq}美b|Posrug_:qzue[ޛn=~>p_~N{"z!}ͧN?siԭԷqҩӿ=8ߧs_8~|}sǹ^=ùD]`.=Nҋ1.}q|]\7$䅧OyfSYw߷Ku!J#鳋^>S'_oKߴK7.tYwNЕo_tS}cKg1q##ĥO+.}.i^gമq;ǹ_n >._tuüP]:} v|ttou'.ױХtq:\u"],f҇x1t!r~{ҟ複};3iڥvgܺŘ:vq޸v}{5Ot\KuS~rP~~\z.yA>7Kg}v\wK/>Ϻ/~t]:J;uǥ_~n qNc`W #ڡ_KqͺSnc_{]歏8q}IW>qާեO.]Kt>._ّ.t||K:O]7K|~}vGGa.>ĥz|ۏ\qE2.ݾF]NeݭCqЯl>qu5ۥN:(/7K|klu {.=nws]i:.3i79yq=ťĥ/8~>wwt\=.ک˺us3ϻ+~\K~]M^n`sEAVto/ۥ_9ǥS7ӥkť"ڥ:ɦ3Nu/qy~ώK|\:/NW#/Iu/.}n?uv7y|_.)Kt{Ku:wXx|?}ıqKKw}O]..}uqo~|\yʸ.}|KJg>Et[;up[ҷ+.} Kߨ+7q.(]z!.}u ҷW\>.}_yKӧ:Ia{\z޺ ҥK/BХptݸץ.uҥ[wj#¥w".2pK;ƕutҭ\;?ucsۻu=s\:n_;.}KoKut\Oyqe{\.}KoK\o||]rnOKs]/n>.'/} XN}ɫK+qKӥzooו>XG;?.}q:N]}xt].ЁOv\4En_.?t.>A\]W֥tKӥw}\}p)o]{wȟ?_ ..}ط1ԡYנ]uΟۥ]z.uOt݇K>F]҇U^_v}Yԩϸ֩K$\,cǥߩK7]yX|v8i<7]}q͓ӥ__qss ]wF>ͫ.]K7OJ?\zǥ[>.=7{|C{Kt]>̃ӥ~9>3ukS~\zx\K~gvt]Q:4KK7S毷KХ[? ۥםOw<S[K^:tyqХϓ;..}wlj硳>\w_ҥSԥ7K7.}tq~KOD>|\] .v2a-.>]8\GOS\z;&\z;^]e~u[֍u]JFn.3p+.}:ϝ+]|:Nf|e[uM3ĥХߜ?uďK+/>y{Oҧy{َ:}|N\zãKwץCCLi3ťSѥߏKOI>K".}?O|KoKS?߮C^>|gCC\z{>Kyݸayҙ[֡?^~MǸa~8.ҧڹڟs>֥Smu:ҧyΜuWKһ^Ko'KK\8}=OwMW:K|t^sqzt]zߎK8ץn_.}nu`tҿ>uk׭玻O]:=t}xn:9<:9]u^t'^7J:tAvKWN:.|?;.ҝoyЙOKSﵞKVϧoӥpoե!K?.}vytG/9{KoKO/ץ^:uC\zKґGyGw~NСK.?Ϗ|^tts{_o5t[ƙo>[wԓ1[ѧcy^?]"]z{KѥOױԥ{=KCtץt|tХߜt|ۥuR90O\zKzq׸t?dtҝҭ/e?C?-:kǥ?ԥ{K{Z\9*ҽKבOե}եӟK7oS}Mp]1a7<ߞ:v+߾FK..~|]z;*]:5]]4uwۥkwҼҩ_KuqץCtOYA. .}g|ϯKCǝzv.;.}ǥzt\][>;}wحG}S6t].ҹեvu{|\: \:෎m=owv_n?LҝGq>ĥ{F>;{~1.}ӡqθެsqCtۇrp=өקKEtaѥK]*'9Y_å_;/\^1_zX|g]v_p1]ҽ_qYg>?‘O]ͺ t׽ѥ;ԥv陏҇ymݺ]~}:ug֥ҏҭ+KgKuOc$88cN:|~'ۥK暑l_>.Kǭ?!N:urLԭMtKיlS|0ƥ۩lq麆ץKvKu> nR>.wٵK_/$8ͯK/~x,\dp~9skn"yӺuK?u;t[γ>q1y2<:pL݅vN~w9w^:z{|t5K^7I..(.].|v.:{oxxyz>7_nץg}_>:4|KߗwK?#1u o\KǵKש];.tK7_>!x|z}A9}q䝷KP~qNuKg=v~_Kt:]㸨LߦC|}]z;_uwWy8o\:uK}}vN~\:F>o֥?y>ԭ8>[g=j~~^2_=Y'>{\zu·pko/\o?]7K?t9cǷKW>Erå{_]:uU]zK'nX>[K#ҧyίKS\}睗NN\zK7;¥n}>曻ơcKtq;_0나}>q6/}u~oc̯K?9~>t{~\.C^o>.\zA5/x]9}#KUwopͺ^'>u:?y=}_3֥[Gnn_/.<ߛo;tKw?oy2-EOt;}8者i^k/_Wǥ..uuե>K7?Kgqn9ݎ'ng0?R57>@Su):}ץgo:sS3:u{;yK싻ӷ;|oǥ?:i}.}8=>\q_Ky.p:8]G>uq|}l[NƏK?o~|~{~Kq靏N]K]]zpg|.vۼ~t]..}y¥;_Х{\}S~3>uuDt*qGKםҽt\{v䫔>Ko]N_\.uA.}'^:u8rӛy߭SwU89^{_]uC0.쾸1}{_{gwt#.}o.aL߻kt^yd.ut:]:}u[.ZJgчC_gt;/q;hg~K׍[jNPtuqi]s]GNKґ>?.<.]e<.uf^٭wyKc^Jκ͝'q}{MN_C6]9>:t5O=]On^.]WKc^K>t^Ǽp{G~\wԡS%{/:>~cȳ:o^z>楛ϋKrv|t.spz9N>E:/uFpa]y=p K_ԟ+pT?.} ?:d|}}ҥw:y?޼'C]zޛWt]..]WK7X1o:~?_ǣK y驗||:FNw˥g?}K7RN.}.}<]pzZ߮e_awKו]GM:MqwZ.Ga~\.9y-G:&r|;/]Ko?OiK_.>E>O{¥w=MM#1}v|>.O9.}ǥ?Go] һǥcOb^W)>:u]7K9O7y}?ڥ[ХO?]w7_^S曏Kǿ\z׾[:sN[tǬJ~ẫ0Ou]_ҥ?qct|yzzuu^c?._|ͺLy6ף~S>.y:tN';/|.}k^:!K H]zu^t?wCηu;ב;>q)u۹5/z .o߼vKo~y8[:u{uO}}z.߫ץSǥ:;?tmեyCK>xׅuoOƘuR:Ը9tߙϼ.vۥ?=o:9`ҭיt楻N/.{ 2?Kwݍץ?^g>KwݛK'.fcgtѥҝ}֥_oϯKu.t7+\z_^>uvqVy鮫K_].ҡo3]_t/_?\늾.}=owzS_t.}\v8S?:.nQoOsK;6l2v׍</:tO9LJo>g>.xv9~꽻Sn?swo]:sIeY~x|ץ4l8#tqd3tyy;ǥ'O /-]wui:򸢏KwL}'/~v:<]2/ݼJ9t^>tøtrڥK7uWuoO=q+ߌGsww.}:{twOK7_un?v;ޡcǥoo~Zү߼tlnީ.q_uޏץqtݱ.z'/=w]zq'v>o}[>7cXKv酋9ܸuۥ߯c<9ХÎK/].5ҹ.utӿGwQS|twǬkzt]ynu:l߯cgy鷮Щ+.a}ts\u#]?'Kکo:zq黮ޏ>v/iޟ=y#:G|dWǥvʛя_#Kg^8y=o=۝N>>N.}n.ݼhҷ߼9ҽyy|.=ץ?sIe۱;?oCG?:dzN[~\|˱.]:toN~]`Kw]G]g:rNS~u?og~c^qTۥov۷u\Μrf|73\Kutڥw>|sL|q)g?.}0.O;_ĥy]{Ϻ.ϙ ϥ?UqqDG׍Kw_zt>yۭkΩkāY߱]_5n^:K]vuߛ_x~~]zܠyץ?:ulm:uu1{NJKz~W_upӼ8\xn>tu~:4q˿W]ڡ~NK>u<ñy7c-Kl~N^nPХ:v6ƸC~[mХߏ˾yZKGy=·;ԥSҩv^ɓ~qn>vN}w]y͸7Eǥץyp߼~˥ѥ?:z{Ssy}tG.]m|>y:t_pһW.=?.Ϻ>E{c|۾s+ǥ/Kwtyu ;}v9x۷|y9]gK߿.}K..?]?.ϛ׆KA|ҏK/םWC~ץ]z~p|]h>_Ϻo^[>KKo7K>D祻N.|O^zr}],}ǯ=vK7>''\_?:'/G^ÎKi]Kt}w^:z>[ />@q0/^~>c]KO?_SܞO0_w^z·u\Fݥ]: t]Uҝtm>6y雎8.}Ko>k>{Cyr}_os|C7#/oq8ݺA·K?.uIuw^tU1/ǥB~}]zu1y靧ӷǥO_>CN_ܩSg9o!/ߟK??oǥ^ǥ[By_?]+'/tnǸvӾᘺӺA'ǥ;¥+.:.}{sKu^>ͺ<ǥ]YnqKt欷äKե}Y'1Ωĥ[Wեץn*~5/yt=uwts_uM楛Wd^}>tvny>s7b^}+6/ݾ2]u+¥wߋ.޼~/.xKތ}v) Wԥ{K:s\Й|C}sw:tS_C׹/.ؼ2ov.߭c'&}KO盗'/}]:῿3?.]7LK?u̸uҭK7Nn~.qwusԥ}t].ݾKte].?.1<}x3_qһXWKgvu;}vw]kåǥҩlwN]S3cޭs:?:wpӛӱ.d:uRqܬCZo9hN}/og|җS>9?{]:ucqյ?+\ ͷz]z~t_.|d]:yx]:y}vq鬓yɤ׼t?Oq:k\}bnyS>tS_ף]vǝN}AO]<|ps]/\ʏK9{݀|>/?޼;^.?/Ϻt>ywr:N;.[n=C>zK¼vqiѥ{K]6...]Ko.wץ|@ ..>vt.W}ο>~]:YwKץb~u~N;uӟO=ҷ7?uHw/۾SN _e\z;K\zK_KۨKw/_NM֣.wmt]9ҭ^o~qS֥|t鮓ǥb^L~Ziw7'] ҏיS?n>.ԥn.v]zSg]ءS^.ԩ]gv:ڥ:tq?.Qo75:uǝN}GY?1n|t|2^~u\ukK.9it:qe]ͼtɺus=KwKЭooߺs/n2+\SԥwR]1׭ۿw^S.:ut3cn?.]ťM<;;ļt]y?>o>uŸtۥǥ嘗~ۥץ.|c]{]X:]gKס+s?~r>y8z4.]Cw:uǹus]瓗>t|utÿKlǥҩү7]>yҧ߼z\D]vqC;\wmng1u~3~ƥGy8K.}o>}<ǥn}ѝq/:w|]K /uڥ?tڷq8ΘOїz\֣yqەo.߽ά.FKե?]+:\n_qK/йKK7y&|\t;r;?ys{K>zo緟JixKXW|8O:t'ºby?=.cu']0.}ҥpCzvqҥ{KF Kǽ KK;|v7>n/|ɼtK]:>=i~wqx\z߼toַs]:vc\8p/AM{j gW;4#ל%| l/K\{K^4cv2u|oc 맧KձԋVh}yi7v>eFu8؋aez_{q^߮ND]^^պ PDn@4|V'x;MVz^SNTidw߮-ˉK=^.^ڽXbxzlϮ8ոX:8e_mիi4ue&2<׭ OE0}޼^5^%^ZE)^}|zW^zgWůŠQ"tkRyK}^j˥#B0Q~ݢۇ.;/ ;M?AfQ25|^Fů{BVbo>_u_ԑ捉gŰŭ'ym° kc]8t#3]6\{]=kĘ v85*˭,|e&llyREˋАō(`y>bWχQET^{^::|}avPgxriG L]]nwvWKOKđ3Z*WYjC^:^Q]5Ϯ6~U*2enxO??tˍ3ln2jDAuQt&2^!]`\/(^kANu]ZXɥǭt:=/H1K!5Eb;xۯ I~lw筯"x`wS׽; 2}4G%ܓ5U0$=/=v㞇i뽛vs wB]Swk>֔~*UjF`ݽ۵IW+_)#-,˼w]Q^Zۘ2Z4d+Ķ|v^p- .S'Vv!]F4j!R $Ԟ:8i]QeO vmFS|&)/]S@$]ݯ>מ1NZ&6.#WL.|`vQeCF]tQS4k1S=xȞdί^= ƞ6y}S0:`|uȔ}i.VBaMĩVYK1eSv&j` u#ɛZ[7>Oezs٪ƺү]hެÈ{.ž.rы0[œ`Ϯ`65^.2ih y~s\ңP%K\z9g/aλ]Q{auMv0xK2Q> oym>7k WArRcD6φpW>濳ZΙ!Zyw5!͓!rL̵.!y^֓H2`5tq}F c8Z^IϠbK" s:0g#zy ^+yKORd[|$3U [Ώ?e9Y: Y'IM3/\)7GuۄU.Z}TcqVa*(*ÇSt ]7B\ @y<.<:/=ߣZ6/iC든m΁0VG^ԁs\XHoJ@r sƗG3\\׋c[D"ש^-"W6j. (BWKZ!L= @./.9r%QRˤ2']UqvukTj{rܵ8\|c5q%ez$dRqjSHck@c82>≮O}QVt|IY%x޼5 ^$|q_҆EWc:Tg| Rukrݘ⮜F*.} *7eJ&kS&FU&'60w0c˥Ck* ^.dH<#0⚦:||Cc=xA\ }PaÇB;z\#x6uup&Pa8<# R.U^phApJb2\sp_x okJ ^}=JTWZ W탣Qh@ OauK>_ 1%KpME[`6>kjw%ImFԁ7pvu8< C&PqcT-s ~_$ˤU h7Z@/̜pg]CK:][#PlGi3x'140E{l6&!ӢSpׂg 1DNK5N DvY`Ac*wG3F/ԯ_[0}/H@W#\&޷\-ܷ6w 滿 fE hS;<%}˅}ݏI=5U {w€|7Ʒ\b+ķoCŵmv^w{U`S|tG``x\wae\To[<)B4'*Z˓ykuovkxkzok" k;.[Jz)^ϓ ޚMc3W-{Rq3X=/rNw‚w^ݵkw4rXX[/;OoIT\9$_ɛfHZnot*pOs0=ގ-CHu-E?@HQ8]M-ߣ 2{6FJnIukxu~x[ܚZu?YJl&ɺx\ `VQ5D)?1V!uȝzkO 1ܚ]m[y!U5E_n>zzFu~­C ߴGr{<} n)H,vvw v?4>gFg(ivtxn0wEj5}.tɒ>[L/\=58fRG_-#yI.wmM)b>٢CM^HStd[dF`l# T\y9{tF^(6akMǧ&xWk_4vYB&ǵ$cKa[ܞw-css7y]vCi˕)S>ZWz1-RQh=_mj3Yw Zohg-gE%Y Y/}&L6%n.ہ))w^q&lv[ױfv޾8Ye_߂>׿hY{lvu][HHR>}SN;o?L9Q"ZW%NA,rYQ/R&=Xcc}X:t~b'hXͭ9ǭ)HfEbn@>c \Zݭ|?}g [B uq2 aE Vb-yok.Nms}%f?Һ+IevVWxW{Jju4/6^bWS:]80O?ts՜.*WUz)5\W-3Gxu^Yb2ëZozZʭ1j7y~RwzՒsHe"ӝ!}ۃ.'ehdW8ǫ^+RU1rT9xԫBPU}xƫx7jt=n= O{K'jxMxUkzU^4|6_WE _`,@Ma(R=Ͻ>ߴ<9}9Ukꜯ,ëF^~ӒuūݎW5*5«W5G{۔auDūk.gz)ë?Uw5m>fxzBϮS)StOקz~SUK{L >xK{r-)Rsռꖯë|S4BU-Uk@W / }xUk]^u-W}?SƫNx';zSG23z%l?xF,HpX0r2ǫjpfUWM?U T\մ<]xtyIb^xU~jC>iC:iԫ TWXWSoh{9?x ƫëƫ6^)~7y#^uӍ2fpaFTg >^2xUϓxU)Onj.Kz/W_$)!S{ܚK]n׫vU!P:xCT{@_WF$[:UKW}iǫ~cO{Ua֫jUU)'^:%LM/at y=ujxU}WW~[Wu}Pƫ5u3MU'e.H:˫NWWU+īNKWU&ѫ0V;ūJxxC7xZU=Qī<]xUޅ]|op73v*{S:rxWuīNW`nځϧūjnUi:z>}U]UI:aZpҾǫsU5xv?WES~uA}@5j]^aiĺc_x{W7#^u>Ky=OZNvU9Uߋ'/v;<3%OǥW7^'(ī 6:^UbW5Qj)^ū*Dq bxՍwڪtyս~47ONqk*{W/id^u*RJUY^Wj ^Uī ī:«v1F{S5$twyUXǫu#`JU]SKu^u/⓯OC_7v ~xU:^U W1ۺU4|UMë^5Eӂ+ 9>ݍj1Oav5Y!׃x:ց^] aU"ƫMW58 wz[SۜPХ^5L&2tVM W)r*U]xUsd9ur&xUra]{[ī>>U5\xU1 ^uezU۫rS,΢eAzUaګSq%|v]Og1L)neǫzf^5xUzՓ^j I2uUݴWQƫJBګnx+_}<*Nj[Loxݕ7Uwn{XHnMAwSDѦ 9|Nƫ6W5=^u+W50^Xs2,zxUsWVƫK}C_ƫeUW)HB]ij$ڠzوW/;^UzZ@'8A"5u`hU5G UU7iЫ]W~H >^{UU;L~rndh~pU1tOz|FWݧZ;L|j #ݨ^SqԬG"xUV/2%jZq+᳿ B+xUDC;)b?'G-ǫ·*^u:Wua*~ί^yUUW~ī';׎kfljI{wvn-QxUτa$^tOn}Ҷ$Pk\UmU;yሞūW }}SK!UxUS>!^^YP7Ut)5UsՆW:Z|#3F2iYB bX$*PQEhu`vSq%P7^u2=ߦ {UѾ$s=Wum jƫz1:WRqu}geW5Q*ǫ3ҫ*ǫ^FU^$)H~|%zUU]'xy52t~ ("47*?OxAͲ~E{W-U̻WxUOAUS*-*`RWS=Y!GB 7^3j׾U][IPڃUyUUëگW}Ly=y|5勃W-#U;{׫N嫡}UjīW4Kaī>燒2)~ULx/AkG򪍵tՍ-Dqy2Ѣ-ɣ},YīzZ7UIU?W/^Y_j/ZEcjמxծUY9WdlnūfxXqfYM& y@_iīv *,q빊U7|+RjUSYlTٚtxfzfϮgʻxTëzrë)pc@V7WTgLA^xꞡ|5=\մ>UUg1zUHĩƫW53^{ʄW}񪷸5^uxUϢzՍ@5jz9pZZxbzT!^u>'[A*Gu. \!a*OisT.Wm [wi*D6*zUqM꼌#~"5G:0Y= E{*z[9ƫW=EzUĢ^5Z ZjIm^UW+Z,WmY YIUdؿ]=.68fWPН;ʿ=~3d_ΙN o ^u^דWb_WO/+w͚\W=GE?76Hy7u[S}ܚ2)~!' W ޚ4Bm*x#We&jMCgǷ īzW}$t:y[_^I*kUYDZ2aiƦhNLǫ >ꔯT\ǫzW5:EK M99*r$L5+^5:|E7ifWfMkoMŵaWw9o#^;SxU;aW9U5=mNUī%oWm9%_ו}*Pty(#ӫJJ:jv5NJdQfDY 5v׫tWeR_zUQ<^2@u^'xPq,2Wh/E֓:pk(Z^xUxUVѫv( 聗]@ԋk~.MjZ^UWW}7,ƫ^zETjx~n[8=WEwn4]@xUoUkFk^zUJ7^uWNw%neW/_wl!x iDUO~DaU}y*pzAs>^u*4OZNGīL^JVOjn*Xūzl$ӯW_͚]uj:P5XkGg{o*Obmqiƫʔf;UT_[Ͻ ӫTW=_fM#V^xnUxU&ūZUO_YFW} 9" p5k"16pixUoU훯ZntOMxSʆW"Sj _/,^RN֞"u2xy«<#U5kZ0񪔸7Y0Ttk.7q++b5eNPMOd$qzj͝M8r+rYt[Hnm6Ÿ 0 2ƍʶ]U{AYuOޛ"o" #'TU6Z[,ͫg4,y4k]W-vVXP=[F«Ejp\WUWM@Ի<])= Z̍^tdzyX%t3ـLG_gxU?W l9 )=07a/nʱXʸ<6k}"C!pRͬ+Xꃑ@sWL:@j$WԅUCAD,oQaMWnK**(NMA_|rQFټ eU]qԼ%Uq8=Pɞʆ[E{NcU;Y: ^\p5 s2U`2WKQTíj0)^"$U9NT5s*ြVW|yՋ$xUW-D̫9y~Λ@`j77:mӕUg^j6x"L=W dֆyX @ȏΠzWs3%ΫZ(xUX iVumWE!xU z+ms[uT+}Uje}RYd:L*O^yUZcY uWMv*{-w"uAAGWMūaP4xՎ) `)O;jUo/^55zZ }E5Ql0Uu6,1=UGU7^5]f!3R0J 韐yU+n *U9s&jjY+yJՀjY̫:k^)NIW\3aW  _Qz቙RWMdLHsϥzT-.:LVb@WU/R/a }M:«fK6zUMcU7SG )v[v$ = TjD0WuWj 2«W6O!m)^@j78i}l}U#:*DQ?yc HB,* j`Une^tWuW-d¤ )^*hfNuTmyP#@ @UJp른zC_0 @_9 m}UIU&Z桯|:ɀ4ڷ)16V^K_U!^xq #`}Ut7U ͨydSkejKeӬt0&9"{*^ΩxU@xUk«zjoxa3߅Iūt }UW6rK_ oGx]U"T%"Z jq"^55jXXL": ֝W^ߞīЄu@=ن(*R'Tw0Mat˞ ^n+ȴHUWUͭW%o^5 `«h3UE{2[$^5U WT@IbU5COj(&vLuTk^5Xq*TW 1֌R0zH#*EͫDWmF2g@ yH5*h ^՘jy@ʷTUS77j}[[1L V!p9VU~㨒iV-ljyPj^ii[Mp[tY^!ſWSU{WԀW%^oͫfÏ'=={+%ټ**k Ȼ('dK (^u@XVoEW][ڍZc㫪M*5*96"n~ ^6oU@2U.xՐP?l -vc8U*m^+UCqT49=ǦE+^Dx4ū8WZ|H#*GW&ڬZ| 3E;J 1뫎%*Vò'u ΫZ^ZRT-l6uTP Ӏj[U"$J܄W%o}U3xVl^5Qȼƺ*xˉo5W%ļzx=Wx1/2oA_/b.¤Ϋօn'U)e}3yը۠OE0ëIX*bU= 6s8a|Uq`(U]^i(v3JL&RsUC (z.7^5 ^BZW9!8xվTŁͼ2`ͫV cZD0̫3 ?fgl+xUPoUșW ^5źs}; Z íJLՊ^՚,ͫE-Pxc\lpUɓ1hssT->g% jfJ3kvs%O 2Լ*R4sD,U)þxռ׉W-]IΠzkTkǬSx3|H2zt ^4 z`6b-]AU~.sЃ2VW Si"\5K_5aU˼*WyUx[Xi`Tyb0Q婙H;Lێ&t W)}UW- wy]#Us^ÛAT~yUXūFj]^gxU OW-|4jgxbUjw~ڌ'7UHLpzΫLp΍WuNyՓ*^5 k<5ūaљ u[A_gWdZ<%"^vb7^5Ϋ:/ ^ZWՇ`xUU[d Uj}U4R3+d'Cū@_58x-<誐=uA=WYTeUu3K_&@EigrRݦPr*&T)O5L%bEw(:*Jꫜ@.tuǫWu}{xk!G_ˡ:Y-u^U-+^XNW=U0J-)tNKV ƶP?PUa|5OTK_J@s.k^E8XĔTAjxU «:>U5W6qNy2z j^oſwT)6X1%zǫz z-]j@^SRȋ-glW*x*eV*ͫWՔ ^UO eEQ x CGRdfJxU6~RBAUq`XיVW ^Ռx|ͫWmVnNy\j_IM[ ^XڬHfN& m𪮼'^"«VF;U@H*̫j5YP-K_j]jqh^2c{A& H Z,{z4PXFTKDxUyW܂W=ܖb&RbW }t[깘c[!ë^5NlS8'JJU6<(׽U'j>LwP'٤'k6zͭjWrƫW8WUkz5xU3 ނf"Hf4Y¼flj'UC25m UѢj1 jV*V]@2+i8+~& =O^5C_B%RQmh3R@WkKWmQ'1U2*@m^zW|\<$UW-wj|. ^4-UhjUΫ{}U(«I6i34kK^28HL&RUlPhF)B_yUJ]-^Qlyd'=]^5O]!1Ux,Z+!RTjVSƫb 2P4kIi8Ws1\jbz諒 o^l * K_Uj7T$ͫvq5b*UO WtI@㫷«k^vI,Ie|\?* yXռp0iuږ]HQ楠-xUsZVj'  a@UpxEW ּ*m g6UU +٤h^W+j9^{^5 mUMf a@UW~~Tƫ|ƫLur&؟t| /(S\?O?SŇC?'^c&(ܟSŇEs׋")C~z/ԗc~OAA)? Kɯu~Lz_??:O}/}-Lz?wWqGx|>$s^/uZ'w/>''ttzЏ$|^Ix:}+rKV,ef7o|s7?9 7^̔wojOW;'vT49g]#P؎IIvkR_b;fl'6v-c;'~9w<~LwfNv3M,)Mߟ{%YU#_j,04DX_m63mlMØxk>=1 v4)v4 IxY!i>2/VG&jXW} gE!59֮<vxx:#| a3"FjضWǬArljHq#GHܛG#KTl[pǫD"e_I?#@g=(< <_cTگaHUkoE*ݯp&Zx;˿;wԾ-.oʷ"}dVj{ABQu=L><&y-:.J=qe}ճxhx6h遁vp1P5ʱG:zTK)Tm4qQ@{h}\iUGO}p5#鞃z<REJ53V[T}nXc/1Suq]s18tcL%!cdi(7"5|Lu]{fw@3nvY?fWGk @n/N!{||ww?Ξ\iצ"%{_t7U[I.^ڦrǻr?4;m%=gw@v'3㶻:^u7\z]o>cwmY9c$?#ӮcjN]/);ȋZ\/,6endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 612 828] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj xref 0 11 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000233322 00000 n 0000233405 00000 n 0000233517 00000 n 0000233550 00000 n 0000000212 00000 n 0000000292 00000 n 0000236245 00000 n 0000236339 00000 n trailer << /Size 11 /Info 1 0 R /Root 2 0 R >> startxref 236438 %%EOF brms/vignettes/me_rent2.pdf0000644000175000017500000005450313252451326015577 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908165032) /ModDate (D:20170908165032) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 18771 /Filter /FlateDecode >> stream xMnQWJTv嫗OM_q~jؾr2q5x7 k m5q[պ}nO.W%~_62'/KҴ|-o2MGeWZ)j߶=vkE{?3Zj[\_uR5~_Cq_5bʕb_&;Ugd⭆?oq__?N|+;}gk3q~_ `=e|b2.;V-UHLg;ɓMk ɾwĽfg+.bXQ/Yek+}K{-k,[u}|/lw pq;lva=2"ުl"܋fm]:{C{g$/9Vw~;BqozӢq%W2%q[lOX.*v־pwae_վުRZcope|ִ4q_n""IϣL'= X֬z|Pz.w~}t\hV&&~ՂwbkF{9`e7틤L{쁔߲oȼxK n/}r2ߢgǽĭ1qh+}P[h<$މx[4}(W]~NJѺ8T^ NBݚ v/}!Caen;1tb]MIa)vb\y(1,D:~MbYuj_^kjLLIbƃ24 ˣP\-&0mڴ/@-x,2v2ia%2e|V͆vSqYQ#ӰD+=5-_ l#GgEmq_[[Qtٷ8eCkKxL܆)v_mI[hR 4Ta\9m}_bFB_~eOfkb5WaA#CzѮC;v4bOpv.,hL'^/a@#;ct3녥~lәa蓭tm`˶dX:6mO7E>Ƕt[4+)୆m0|9l>FV ;:W8 d+"84fpL“=-"Xe+,} + VJWr_JUւ2Vi2>y #I\1LiҎ$A,M"2#fDZ}QvB?ѤO[Z,a4MvnFtZOLjRnyb3[!TivԌDl АtrLАs!xp)_а ?AЖs96d_hA͐s3o8}֥qjo{˵Wah|\CA{M` KkJsPWB dxZ|fx՛\0xM6^r{+S|7`/ ^ւwcmZ^lqs+v<4^׸;[~ 41E;dnq;ʹ[eXL b8 \!nXG[*%L~IPJ6hE4R̜Mbs)8T72ŕlҜ~oᰩV6uMztoFEQ`VI# A2ѩ_0ġmߵcDjt_}al^Ƅ̆}o}\ؑ`P2fuZ 3 " c:$P$a1$hv0$eUI 3 X14ǓNL qi, Z1> 3Rn2 3DjM0MlGc6A).Kl6lLLc6&. F̌NP&?NeSj.-wShۇ;2uRro;52C)ۗ o0v[܋a j쇼0vݎ8o%D*~V:n`ؾ4l!.Iv9j2l Pl%&B>R@ٺ>:u@Ú<.?,i-=g㲅4/$s٩l#lg\e2 H+A[>:8GPEVqɨ00R@+d+]O|?V+2Xh؂g' :,eE^ u<^Ejl1Y'.f{{me:GU_o Lۀi2Y.&C,2=e+\:jKTXer?oc]&YMu%^ .[~`*6'r][^X{;űy^:vAp;lRJ leb&[w(b@z -VL@BZ7[>Gz/~[c,ru`cx HaF0ɪ_@_vp@!B[D#?P@}g?ZAb R?`˰yS*A$ÿ8bb&)ʤ DB,&"3ay8J [L- 2Zx<(xGݬrxro'eP15_k2QXǢcڢFL0/BGd Yd ێA$7,m؎Hdಠh[2N/DZxA'y"+3p;Ar;D',--kkز m[ȶ.CB2$2{~$ +O'Va¢Yn2+O^V$U$QW+ǟ Yr-yN"}.b⑘⒐\2.R^".$&h!B*J"G3 -ϛ aMXԤ׃HD[D@*"`^F8͎G/"2пLJ 0*A11fC"qx:}o FH()$!F }.(2GH< ijHEm^ޙV;4 & И'\JB%+d`rܫ niãcGaNY6`cK!kt{+,b ]b玸=ii=xh3h"/ iDw l@=KWpV~&Π MQA=0`PŸY=$aY#@b)F`e7Ib"-2u]@|o) ҂Dq\P뀅4+HDt,Gv-8k_/8 gqPƞec;˸~rK1>d ;[^ G{$ܘ14͖+e7%Ԁ1nU'ܔBcL)Ɠ0Sbt)sXnۄ"R&l/LQ@0St'HaDj3` fqvˀ<Œ7%P/C?e/Bxr^+׍-`2~a /M|B2~/Oc!`E>a.oK}}/[7TeVlYA^*Cwish(;";բ3WFVjg>`R Xi -VJ, ,xn?aelL։ʖFXn'tdɤē鉡“&I5dzNd+MhCK>uʖ,[%s}Vk7 ʖ Xn+u`ŖjX1t.t.BK..LzV+n) S#dL7B9<ŶSu\WW2=o>dcΫ A0/tY?.rX TW(BvT[n+Dc'gbE @-W=7<QmÜDLϸ CrnY E $<\:K"Sy;!qHNE $- !*wMQ,A :! :! SIH8.2HH ?. !BZ<Β,c!UƱED5)zeoƏ4nҶ jE&!UIHfQ@@ &D":2ID&_Y|!ʍ""񀈪rXIDqQU =A"26QͯJ*Ђ0Y?T.{MD$7~MD5VsPeEH=HBE $5Q-"U[HD8=cnzEB]d2 *w(1$Bj{P5#$΀(3$G)þoLX5\ADK0:6J"D9$FDR}S :ʝoz;6 YGïhxMeZ@ii8T3gxBhӒN9[d뇝EKEKaΊ;yd[`'יN'jܱeX6ozq:2)o vr v '3G$:1=I'( v/vr޴"*Rd;% +ЂPd vj}N`C9$e9IMd|SwEo@L;~@ +( P@gQg%d͗pQ HK-Xs 7[W S\Dd¤5Y&:DId<诌XL`"o7h݌F+-`.1̼.j=b$1\O3hJRx&b 1tdJP5f 2#>ьinULHAd4%#jU6B6LVb&/LIL+%F#ɋ"&S҇l+E& шEzNٸ`JwZUTu$F#LFb4 B&єԵ垮/fdB1jl)fBs2[d!/MgUe|1\ۓt̯ǝG̦1Bbr22$fSZ̦|11[%n_/:sf `n fb6[-)fkm'HYn]gղ*idd1[+_bVmy(,^VJP ٮU[dTb2S"ٲ{{3fb֣>Й{I]L KۄJT lt i KFs+yBu XiɮdLG?n(V\)[ cڥPi"KwHVA NWp'dDaEQmPdWnƚHkȬh;vF,%ZYsS\RdĆHJyp2j#EofsP'hg Pẁ =jl1PgE;FB^f($z!rQy_cCfM5zO&]= `  Ms726^GбWwgp` Ltj'uyL6jzab`a"3cz*aݯ0;>7oƾg.*4z,ёu>poi>{Fb`M"gxc@Y唴ă#-C@#b hăv0Lhz|)w?%^!fB#HcL1 L?oh|P/h]u0}Wh&vk@#[ N!f hl4Ns[u"Uu AT\_uCx\4? 4(9И<}TdD9J3% Gq QT5_O;-3̓ʀᓐCțCJ~Ot?'?O:?%9ֽm'Ο{K5woAe&?GtHdP Ah4i?Ń]:vmߕÊ^tly+v9tl`bw+:[սJ)VhNVN:t?צH(tM'=|Gb;*M}8ïg}`EŒA6.Xi)XӃol],Vt"5\Wו8ĊmsrIۤ+JY)q>hE!VLNRV(Ǫ$+B3={IA]WtljATo\$段@G_l-JeXcʁCR茪/Vv }U"SU߬ά5 yvgvfELU7߉m̥+ M~GÅ xlp X@YU(lݲLj;5YDT;C!Nusw*$*V~lV׻qȝ;$;;ΐɝ!;zqʉŝQLT+qgt%w2'SOܩ#$w&ogט qʣŝ*gwq+9SęL}g7-SĝC ɝJw58S~Tq9 .%ws;׻W`Δ8UDh]r;k\(#ř#iˑ3GVxnr֡zcGg3: S0DL ͛tAԼf0!ˆA:0ٕ&t,!i.D :1+QL}r :'{ uń & :E, k1aNbB2(cw751!s mx2񹻘`5:'VHw9h>*$]*$,h]0!D$ D!aULdbCl!aG+qE A@1VaBT˛HS&Df>EP9sqČ6'BB&p[WɈ_1g¦acj~6 hS/}%Gvercp㍭ב{Wǚ8qd}v$;Z&&umHe#U>_&ג#a]36C\]H!x?pdٌ"ȡNQ54֒ lA|qt.n$Gv6yGFCrd'7#kI ա׋#Un/켿Hq8kN*92NNGj# + qő',lB#Ȯy]<4D&He#G+*WT8XOF5u'ϕB3d^iA|n"g,ऻ+w0%;;E;u1{IF_gwJA|H׽5CһP|BI:ǃC߃~?Tg%*2y>o}($yCɫNQ`M)`|]lyYLӣ?OJz0>ca#%9u_Ro۪5/KȎ+*Շuz>|3ks*OȎ-ZdvOȈB,AU$Z;R=ّMW}yԜwE,ٱ?P {ر5ϓu.9XbŪ"48lGVrbǘ2_(!V\jw,vL2w؍8;=Ǹ؍8%‚Ȏ#|dG'=^bA;ʖ5=#E/XrMɎ>!7:aɩv䔏8"`ǙE3uNweT8UQ ';LV#;NeB2ɎdǩdG88ɎS#ɎS ɎSdǩd)"q&eN'qj^q5q˜ vj8Mv4S>dLL)2`C1~?qÊ7ؓ8$+N<ɊSMȊՆ|녕?`ǙvY<`Ǚ58aljr`YĎ0qSȊ=;ĊzΊZ idE/FΩ i^)+N꧆ͤ6S3jDyͽgu7 +xj[xvpFSʙqV .u`E%I`^g'b:c5qYdpdDp,;FFU׬;IFQ=&8؃Z`62ű0DmjG/F#N Ytdbĥ`Dk&cmAuGf1h6 B0ÿVg^kY3dƮDdos(gŌ#fSYsbơ,F2GC&31'3*/fdC0cTsP G 9>ĐEIFE?&bHgCvťɐ׊!^ŐG ĄdȦ,3noi2dʦEdŔMqk2%;S1cʟ*d˔``'ƔW-ɔ]Ydʮ3Y]4dJNd ߘrhh )Xcʲ d3f:XZcNm8v}ɘ|_-1W *oAJK֡CAz`LfRay +W0|'$h;9( #2}t8_M%e= F02uAf~Xi`0iX2S%j^z:|hNtجpd""=.epk Z?C4|yr,kL@y ɲQ0cȁpd,vA.88R4g.ɲqg #SȲ̉*_ȒExYvk-4 JvYVFp( WP#WIQk(þm$CͶM~ ˡ٧σH^_R4"=yT?Ez"hAz"KEڞ ى45;Wdg;!U.#YΤqh-dGҴa Ecɑ4gQ$d95N/MGN^9E2"M޼ K7d5!22AK~#ɯ (2\fQ$iRKF^/o$NF K "o$Hs@4-+42i;7i #Hs1H3I(R\D\Eu ϕ8K-+IʬiBs=I+AK$ϥ+'\! 4<"3?IjHooJ@ ϥY$8z \3IKI҂<{ΌF$M"iiG}@{@ }$Ŋd[H}$Py C}݇|=ʰy|Gs@<ۛ*4-/-}6qݫrX7xg!#:i.e c!b bDr'w5_fA`ս<=wXÃ9>N- AypF6]72IL7!V} dY F$n O9_OBJ9U$.Mvl]2>t.dL1 FLiRύ͢(2V 2VVH'FFܬ)irV"nޑa&cwcbjqxo0psqSl ̈́&H2и*?$q ɨE")ȾyIdId"$C$؇vL$A3A" L?hRI$sMy" @Y }Ni+I-::+S7Ks׭nK --d 0:?-:Jln5>9msI1R$#JܡdRdϯIiϲP$=N̶c R0,E=*d +D:.lX[!k0^k$+~)A(SY Ob',I: xBӒ;ԕR$-1=@ӄG%? KaI|Կ;]c= QӒtCW|#⻲QhI6 a|0MS\fjd129&&^<̅ۦtdIX+$*=6C[*4 m=' ZW m0AzLc숾.m `N`i;,o6jxSڦRkl%_m$^bD`ŪӑqZy3&M4 &BIq. 'Tk*LzZ[([>'TkkD.dGH?~1uͩכKOj.z鶁5/ёb!%:]A\y}!:#/%@Fh~tJB'@ (:G EtLtEj:!=j3yW`h!>:$,% 壓4TT>K8O-!QlU4-I\-ry~cX[ͭ0JB{2@[-Y)&"ȱ7fWSw\:[;coB s@l LJj֔sut %pqʰ&io6272=T}^{kFzLd%@8Xi3R'c&D{ӔNLp^h3&uxPFGF6#ڌ s1 \ݴ*ҥTf[1` &2"HPGXm3<q,$\0?=4W xf >_[2>=2^Dnڢ_[LId7cKXQm? Z7=Y.gb`oyu rT|lUVe0{'H(,{}eq6Dda7\M:۹7WTYࢳBCqd]}cSt:)scSt6\x#ǦxufbܹKѾԲ(woU[~SZ>Gk.s{Sq@r 1m).<ȱVk`Uyt8 gDkte~ip9'RD  h?_kPAU8%bb!g Z}ph/NOF7}'6d3 O>kV30Z-m8!ʼn5<+/~cqV6h5r-N:&kqBDX_k{Y} ^cIؽ8E c+6܈}o辷Nwi8꾷hXu]AV!׍n추#7\سFBƺZ:\sR7!D8׍n Yĝ}^5b]rl?w6)/w>3[?mYx7y>ŏɞ[w퇋~AOŏ~w__}?rTܨ_?ooy5]gR yQxx/}t/#Oǽ-k}d"ONFr9P<΄)^wL;0\XDEqE%w]j){Ox@)^wC<+S?߽mol3ڏzqYTVۇ"Ğjgw?g|r}`v( e17_~~~IH^kIsysg =_ן?Zole _^|vo.ڟb؋qFfW_ /:;_\*w/o} ͹Lgß>9o?CL1+(^w~Fu'y3 3`;x<~<~~p>#~q>#~q>#~q~ǿ+J pendstream endobj 9 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 12 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 45 /Interpolate true /Filter /FlateDecode >> stream x{DoǦK)Ҝ6VoҪLvpvSHTn2endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text /ImageC] /Font <> /XObject << /Im0 9 0 R >> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 10 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 10 0 R >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000019378 00000 n 0000019461 00000 n 0000019609 00000 n 0000019642 00000 n 0000000212 00000 n 0000000292 00000 n 0000019136 00000 n 0000022337 00000 n 0000022432 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 22532 %%EOF brms/vignettes/brms_phylogenetics.Rmd0000644000175000017500000003004014010776135017724 0ustar nileshnilesh--- title: "Estimating Phylogenetic Multilevel Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Phylogenetic Multilevel Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify phylogenetic multilevel models using **brms**. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book *Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology* (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (http://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit. ## A Simple Phylogenetic Model Assume we have measurements of a phenotype, `phen` (say the body size), and a `cofactor` variable (say the temperature of the environment). We prepare the data using the following code. ```{r} phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ``` The `phylo` object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010). ```{r} A <- ape::vcv.phylo(phylo) ``` Now we are ready to fit our first phylogenetic multilevel model: ```{r, results='hide'} model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ``` With the exception of `(1|gr(phylo, cov = A))` instead of `(1|phylo)` this is a basic multilevel model with a varying intercept over species (`phylo` is an indicator of species in this data set). However, by using `cov = A` in the `gr` function, we make sure that species are correlated as specified by the covariance matrix `A`. We pass `A` itself via the `data2` argument which can be used for any kinds of data that does not fit into the regular structure of the `data` argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail. ```{r} summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ``` The so called phylogenetic signal (often symbolize by $\lambda$) can be computed with the `hypothesis` method and is roughly $\lambda = 0.7$ for this example. ```{r} hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ``` Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis. ## A Phylogenetic Model with Repeated Measurements Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models. ```{r} data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ``` The variable `spec_mean_cf` just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows: ```{r, results='hide'} model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The variables `phylo` and `species` are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for `phylo` and thus the `species` variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal. ```{r} summary(model_repeat1) ``` ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ``` So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define ```{r} data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ``` and then fit it again using `within_spec_cf` as an additional predictor. ```{r, results='hide'} model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of `cofactor`. ```{r} summary(model_repeat2) ``` Also, the phylogenetic signal remains more or less the same. ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ``` ## A Phylogenetic Meta-Analysis Let's say we have Fisher's z-transformed correlation coefficients $Zr$ per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success): ```{r} data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ``` We assume the sampling variance to be known and as $V(Zr) = \frac{1}{N - 3}$ for Fisher's values, where $N$ is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that **brms** requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of `obs` represents the residual variance, which we have to model explicitly in a meta-analytic model. ```{r, results='hide'} model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` A summary of the fitted model is obtained via ```{r} summary(model_fisher) plot(model_fisher) ``` The meta-analytic mean (i.e., the model intercept) is $0.16$ with a credible interval of $[0.08, 0.25]$. Thus the mean correlation across species is positive according to the model. ## A phylogenetic count-data model Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example. ```{r} data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ``` As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of `obs` (e.g., see Lawless, 1987). ```{r, results='hide'} model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` Again, we obtain a summary of the fitted model via ```{r} summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ``` Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead. ```{r, results='hide'} model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(model_normal) ``` We see that `cofactor` has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks. ```{r} pp_check(model_pois) pp_check(model_normal) ``` Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit. ```{r} loo(model_pois, model_normal) ``` Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family `negative_binomial`), which already contains an overdispersion parameter so that modeling a varying intercept of `obs` becomes obsolete. ## Phylogenetic models with multiple group-level effects In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In **brms**, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large. ## References de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: *Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice* (ed. Garamszegi L.) Springer, New York. pp. 287-303. Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. *Journal of Evolutionary Biology*. 23. 494-508. Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. *Canadian Journal of Statistics*, 15(3), 209-225. brms/vignettes/brms_threading.Rmd0000644000175000017500000005721714105230573017027 0ustar nileshnilesh--- title: "Running brms models with within-chain parallelization" author: "Sebastian Weber & Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Running brms models with within-chain parallelization} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ``` ```{r, fake-data-sim, include=FALSE, eval=TRUE} set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ``` ```{r, model-poisson, include=FALSE} model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4) ) ``` ```{r, benchmark, include=FALSE} # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and inits is set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, inits = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, inits=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, inits = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) lapply(sims, abind::asub, idx=draw, dim=1, drop=FALSE) ``` ## Introduction Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with **brms**, since its efficient use depends on various aspects specific to the users model. ## Quick summary Assuming you have a **brms** model which you wish to evaluate faster by using more cores per chain, for example: ```{r, eval=FALSE} fit_serial <- brm( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 4, cores = 4, backend = "cmdstanr" ) ``` Then running this model with threading requires `cmdstanr` as backend and you can simply add threading support to an existing model with the `update` mechanism as: ```{r, eval=FALSE} fit_parallel <- update( fit_serial, chains = 2, cores = 2, backend = "cmdstanr", threads = threading(2) ) ``` The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads *in total* as you have CPU cores. It's thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores. - Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The `epilepsy` example above is actually too small to gain in speed (just a few seconds per chain on this machine). - Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis. - Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable. - Enabling threading *usually* slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed. - Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores. - Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive $\log\Gamma$ functions whereas the normal likelihood is very cheap to calculate in comparison. - Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel. - With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable. - Avoid using hyper-threading, that is, only use as many threads as you have physical cores available. - Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort. ## Within-chain parallelization The within-chain parallelization implemented in **brms** is based on the `reduce_sum` facility in Stan. The basic principle that `reduce_sum` uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. **brms** leverages `reduce_sum` to evaluate the log-likelihood of the model in parallel as for example $$ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} $$ As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree. Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by [Amdahl‘s law](https://en.wikipedia.org/wiki/Amdahl%27s_law). For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user. In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector $\theta$ has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the `grainsize`, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance. Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the `static` option must be used and set to `TRUE`, which uses a deterministic scheduler for the parallel work. ## Example model As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with $`r N`$ data observation which are grouped into $`r G`$ groups. Each data item has $`r P`$ continuous covariates. The simulation code for the fake data can be found in the appendix and it's first $10$ rows are: ```{r} kable(head(fake, 10), digits = 3) ``` The **brms** model fitting this data is: ```{r, eval=FALSE} <> ``` Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of $1$ as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone. The Poisson likelihood is a relatively expensive likelihood due to the use of $\log\Gamma$ function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters. ## Managing parallelization overhead As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller *partial sums*. Creating more *partial sums* allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each *partial sum* formed along with further overhead due to splitting up a single large task into multiple smaller ones. By default, **brms** will choose a sensible `grainsize` which defines how large a given *partial sum* will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling. While we expect that the default `grainsize` in **brms** is reasonably good for many models, it can improve performance if one tunes the `grainsize` specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of *partial sum* accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix). Below is an example R code demonstrating such a benchmark. The utility function `benchmark_threading` is shown and explained in the appendix. ```{r, chunking-scale, message=FALSE, warning=FALSE, results='hide'} chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ``` ```{r, munge-chunking-scaling, include=FALSE} scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ``` Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don't quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup. Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program *without* `reduce_sum`. As we can see, the additional overhead due to merely enabling `reduce_sum` is substantial in this example. This is attributed in the specific example to the large number of random effects. ```{r} ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ``` ## Parallelization speedup In practice, we are often interested in so-called "hard-scaling" properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it's not useful). As we have seen before, the `grainsize` can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of `grainsize`s. ```{r, speedup-scale, message=FALSE, warning=FALSE, results='hide'} num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ``` It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups. ```{r} ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ``` The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model *without* `reduce_sum` and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example. For this example, the shown `grainsize`s matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed. ```{r} kable(scaling_cores, digits = 2) ``` For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains. ## Appendix ### Fake data simulation ```{r, eval=FALSE} <> ``` ### Poisson example model ```{r, eval=FALSE} <> ``` ### Threading benchmark function ```{r, eval=FALSE} <> ``` ### Munging of slowdown with chunking data ```{r, eval=FALSE} <> ``` brms/vignettes/me_loss1_year.pdf0000644000175000017500000005236413155225620016627 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170910140950) /ModDate (D:20170910140950) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 17605 /Filter /FlateDecode >> stream x}Kq}~EɃ~\)X؀ DI6]QU=Zvfs{TeeD>*+>~?<Gx?joWӿG}l[{ά/~?صpE'>?#[_Lsz~G~a<>9giBN|v{xuIe=S7]?MӔL1?~P3%gɏ?~LLP.rhbz6sV%rq)9͵<abz`b{65L36Z}81,qF98?4_igW&f +ޙ؞A\Q&g/WQLs2g\c53jkٞɿp͖ u.e<)!. {&'\ڍM&>Sq))ߢ)R]G>M3;\r KM1!l*TA2\ژ '| ?2)d.K_ӥfsn}_2ޏa b&+_i}^k=&k@)%'o[kߴK^a`Od&5hɹsĐ1[PFSƼ]pnh& <Ècb# M82k2h\z fM$A&&H1lph͊UhP@ǖ0 5m]C5.l gmQn,`߸zSxP#2> FsҡF\EUIk:؝OZ'YCL$ ZE  /Ыڠu _}bH{Bqd=uME#}Mv3 z tr=\%4وVƂDuiX-Hn )<6A^q&}^6$_E׿!1_ش}y:2jIY!_K{eGȱ=`-Y^i=Ҳ<؋) yΑL~J!1=CzشB#* _%Qfr"3ЄmK_)\ң9@C+!a RCiw W+iw߾>=i<@(2I!ʙ|yHX>yH<$yH0" i~Dv9KCMY^XAslgQ\`GE3^dޜ;|9>L!gaɮ}T1UC ØOEW E>ʺ\sf&_oXm -{3j5.\^ APp0a%z 8o@67^&jnLB"` P`'it37!mL7kL-@v0=.#FG3ycze LᙁbKqcz)EhQq`zؘn(c)؎ XXY(L7`z\  L$1rZ 0ܯD\ =]nrѼr0qaz4cܮ`\!σ&'ax0`ɭL7 L7@X9J&+drӣq`E wa\tsLw9L7y MW@d'0=Ā'0z]s@\u? @y,Y]qϒЌx>`4% Ѣ^5Zrh|[K4+o}2y\~@LN }]F%KdsX=^Zofڵknl2]84K{5ߴ'YM{ F/xӘ%:!4;yӸvP0Mljt`y^V1މn1ME]1j,BJdƀc٫vhi8+W~,@"ȰA22O]ROkq v@w!CafQ<ğS9šE_+`"kܛMY2O,pMv!g?iHP5$RGRi8ӭ5.ȱ:[/>NL!(z1+л΀'zvm^R}ʡvn :9}5xS8ho-@7P⅝D[88X>s &Bu_} |oxEA #Pz8TRKx|>*|^LcB{(A&KfA0`DR)}ˌ]H' 3#73n ā/pjL?>X^ PJyTPA[F?P%c1S+͸Bp7&:Zɔ3=%C?U<p'̴]'4'YNLJ\":#N%Ǔ YK%ƿn>Pb K%oŬGJR gdwL>d{KnJZf)+g|tekE jeG3~K7/O2TB?@eQ%/18%Vg!m2~>|z*8I|&sll{tȑ7 #y0^]DS82 PNg<-q%ubө\}ϭeݽ[Ml\8ܑmDaQ4<mIJ$5i7u/x wW,T4-i.*\(ʼnE(b4 {a֋Yg_͒Z , $Y@tnv/=,f87bBe\)9k,p1Җ<92*z2(k dH,~A` Cϋ,c9{ϒHWg@B s>?3"#[ktT$D1D5V 2dHw bQ!V (oxXxXY]ߟr,\e (f=+7w(:8=8Fs/5y.:lهHqK͜8n1[ˬc>'yrgwZ )AIBV0ْldg^C.*I]ȼ̜PM{JˤL8K^h2"{&g$׎,_;*tb,ݯ*Jcz=1| `}k` n~G>ܣ0li2/'| }qB?aߗES_եQ9Kңtn. خKfq9_)h&\iM#(T u 5uIz>(uQUFR*iiuQlEv%M2U}hiu!خ r]躄mA{NH5ʘ)5ɔNFg. hMLaڮM`lJ'uQZrr]2]arW]M7 7@ Vl(Qf2/%Uut]Duj AeBPnflqCvR/.Lel%yȕ:TTT֢:UԄT*&*F*հ7Q1jJ "zL l*M\.AԓKդH亰Xx.خ z$=tc}v+K*޸NT}zj opl-d @D+rj*W ȒtU,ae( )d8#|7!On O=?[+(gJKݦ\n2~p7¸t 5, M&9ppdؙ"̸ɳ^&Cgq+.irc~oeƣj9^5voKtk>jP\U^YYؠ,Ȗ>,r=ASf#hBzB{=XEMNY66ڵJ~r=Z&](\z_O{`*=8q[٣gS,k=ewCujrhO-x%(XdMe3 e|ăxTKC9 zgu U+Z˓z ȉ`MIs:2ą'×_WwY'e @Gt ib\< x"*'%E_Ěfyy" EGCAdjIMOHd j2/ 2N۹2ؔi{!B/$(O/*ǽ*;TVn0DUKV!j 1FT Jd+muF^kj$9zI[Nij&ֹo: h^Z_# e T)]!Em7(MTRP˶\6CyUGjXPY/E!9na?̃P >zBU~$dV:, VrB`-#UvkM`U%uծ냕Uy%dm #.J\N/YceXeSvKXi >Xk/!Fm܊"Vܘ}kn)*Vߟ{p{!{X}cO_o ,$yUG|W;o-dɡ_H9iNUbMW{;}ϧO!^. nkijOQ1޹AO ݧBgmm[)HTZ~ZϰlF$)VDh?$o5׃"`6߆+ؓnlZUgA~qW BߟzPp1y+͉Q|(D$)nx#nд G + iPoDt{PK\Ѥ3V58 h6z0F7{GBÚ>XhzEL&ozN2n%ӹՃzRΒF9qcHhbtT}%=ݯ$=k{ $=3>l%=G ;΢q}Єyg䦘 r}ՃbS _Qc}+nzPGfS&7|1/ccc Ϟ7ד+vzP7v{h3_|V6%|W>("}G }U*u}k|P+ H+BEJo^x2z-z.]σ^4w ˞MacK٣VʲWEg%^֟d/M eO@6a챩uf3sۯ/{Mxx3$Jx}£7E{»ļP]*X%m|^߲ l}EPysiޒ|vYI!~F>9)]|sl~ >_]||ˍ6ŧ2K&~F+3ژ?PUC*CoͿ &K.>Vu!mE)cPyՔAׅg4]NR=UܛrriuIr CNUMUI\mݑB葪N7ʓ[}N%0"ׅ(u V9ZS]m]´LAd?}۔lSASua'D}N%%d6%PkqzL0=rJ0===7m=K=@[M#,zf<m5-ZM@Z!@[ #dЖlo]3sjPL׼M1N/eЖ]lWlћ#j{LכmAЖ @[ڲy/wg߮nHlZ?@{rڳY/kE0h3e$0V'Pآ(VP$Oȳj&Fr^>6%XKh1h+MWË(M"ڊ"6@G @X M%ZOӅ&yVtإɘ_xڋh+B ;@[ ϡh/RMnWlъ>h&3f΋h{B@@[Cb5u^~:{6Js5GWivlYyZ8Gݱb̗pܮآۋqCmOKe V]jtZX:D롲Ke/2ʚK*{c\5*w\\eO؃rjޙ=)oڛsG{dk[KfvR^$(6N{[ثAR:sՔgn8d:^*xٔQx x?JgI*]$j2qtM6)vFr;A:C!wR`zpѭ9t66ٙd \˹ '̸.O2ba P,rDcvF%ڕ ~ {Ip C/nkn2N}"θP`xm** .oį\aY~4J/ ZM|xp?c v0Lވ4cӪ"~܏.FQEDǃjxzsፐP6a@SKT*@oD}E 'wItEpEBf ]6>tÊ]B[^zQ%4c~uEbĥgG1iH2 ^k/M h(U@@ ZJIkvzASaCU-TЦ>>m4֬muuC7Ymە"P퐢}E-ZhI %46MPHqMLN5j]KPO Q+P& i椆=sPڑZv]TԴ+J[@Զ0VԷ*wQEREA{F{&5~Q^^wVt l|ס3'עXbfvv^ulOt{?6̻QXBqN?1*7~>J$u^{ge=,3.{farEacxsGuyi1Dl3hqyL22y[UL|nl`vNV:4 NǺ4yOx?vVJnG79\׫Up"}U>VZ\͐0k=ԬbGkқe*}zjZ =jdOT *{cz٣ŽWƔeJ` ˲B㲷Dž!WI>{η^d&k}!Fx̶ ;; #21(wlG=O F Z|ʶ:_߲ *ǒkK/g!doRvh|!$CJ⫝!%ħw{"ܕ"VgiV邓7e׭'?S _~tJ`fOe7 KU|<ˮtңpv]H}*VSWm$t}4Tm{*5*nۮKkB%Tp\R%(ZBUz?T[ʶ"*[wgKTXEBCڍE5;a4^:5\HyCm4_Y ΧJ %>mʢ>f5Pݺ=r=d^rzzI߯jz,\z,z쬯=82N"=I/ڛ=J,ВJ Ҟ%KE06N2e;3eoId_dS{Cmd#C /Tx(EnEƠ/l O 1 {CxB{y.>`Bxtcl[:YEiF2Hu&z M>&QC@v;X $i#HB׻١;;YĴI'+ݕ7ଭ) .| BW̰IW&Ϯ WWѫO OU#!  ~f_gu =tDE: *cz? 6Uܮ¸ 9;>c'L`{cuaߗ,ew-jE*NUW@gVuJ2fSoe3(cR 8Π;XYm%dce݇">^ ]tdFϝxӥ{C-q8P!&iopĐ-%3y'ĪGU?yY7L Rx2۷iր> 'lW)SrL&PHV|^Чyf{@+7Te+]o7Mh&phc5[E;9WEKa Ԇ@?ka^m<|SraO/WWCy S"z2zfBz4JkJxxnP|,R EYi|ҧro%J)*y>gyX^Irke|zlj!}ԅI^Յƒ# OPbx%ׄWi ,N]dSxoUxyxٗPxDx3 :O<|_ Ol~C_|$JfaSO[rD=W%+O>U\]|,)Mff;~>7G&?!_\xD>ًF|3->ڹ!V|@vUv8ɇw.O@|)EK>w5:Q+7 *N A|.s] (ʮua(9jr=0>Pv.Q2]TumT*ۣVa\߮ #RkRc*RA\ȥ*%#0k¥"Do.AKKCD亄Zu ܻMTpv]%ޮ _2UQ)u~ئu r]L]$SXH-SdBy^]Ԇ &ATuQ{uua,*r9Aax*sJ-P+NPP>_zA\oEP)ׅ6^"EFR]) S互 *TYT0;.U+l)-f*Wۮ צLkm*M5&Uȵlv]\.fޮ\ .G*^yȘzMr@+*5`p5Þը/tE]J{p/ޫg$lV'5+c&hVmf*@N#b60ΎK\ZZ)[cPP9ؑr(/b:89y/araJPs!F~+HK #b1^0SxFZ ki Y+02Fa`nI0ҏ#-vc؈Q<UڜEȘ!"ceE81[E;EhȸUHo-&y2u;~1R< yx02 b;9FFm!FIg`/ #B\>`[x0*aC3rW#Ud:O^WF~]GH?Z^xF\pBC၂~&D4+<8n$ݿb$F#p0za#]#wkHbGF_` #u䅑~~NV`_`F`l#M&f Awi20r$FZ+HpV`}wիi2p_z$HI|ݿx$GW /=5ޑh>>`=O+8Ѯ`>DSK SΠ>FVpJ_A}]8&Cu%{=Y{p=B{jā'+g2 \Ǟl|tGhcuUdJ3;^_.CV l=8.{ oc;iճw@84}1zƒp6"p$dR#^isژ]x7Kkr?rnpw0Lk'0g}Ajk;7Op~#5i KE1XJ1(SPFSPMoUI\>_}Gz1Y1q< ҃@PelTK1VEs}ir p@@0!$bmX )!,˃%eQB`n/V \o#OA4 8O]q&e8, `-3 U=`8a7P"JNRa#QQ%2gQ咀BIPXL[Dΰ©Z9Q@ .(b . bx* 'OZE*.3)neŖ( E@B۸(ve(x-/bWA8)~ Ve .Ul.D8t1*3WXðWu3*|tn#2RNOUۺkЛEݾ(}K_#tv__){#E/}e=_?b=}K_q%ox__Xo^_?G>o^ G>o^G}b=}K_@ & m_CWF >Dɦa}~?qVwҹص>t>/Iv 9IHO浱bZ7Fzfg; H~>-Ϗ Fhϥ .X*.xt~|__S7" H;M5>d3F9<^zx}3/<.[j4FXp/oj[xiÇҼjne[}CX=όS>9}KFFƨRpÇo3]NdžP>GtRo< ӻaף꿯H]1/ǚ5wHِ~2֏.}(vww~ާb>5hA9>hj(Uf~F~>Cή?x}>y|wZ6orꬿq÷pmßBJWuM6zӧ,L*s+sIپI~x{OLhb\Ûy_WwuVzD Sgч]endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 432] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000017970 00000 n 0000018053 00000 n 0000018217 00000 n 0000018250 00000 n 0000000212 00000 n 0000000292 00000 n 0000020945 00000 n 0000021039 00000 n 0000021123 00000 n 0000021222 00000 n 0000021271 00000 n 0000021320 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 21369 %%EOF brms/vignettes/kidney_plot.pdf0000644000175000017500000077260313202254050016403 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125183859) /ModDate (D:20170125183859) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 255677 /Filter /FlateDecode >> stream x콽.MGq&*3\@ $0 A>ʌ(`t_־Ɋ\k+J˴ut߿[O?Ogk?ϟqS+qQ7Ͱ<>8 +8?+.n8t\gċSpV'j/?juY-X՚gZh5wNm7? guwY 'Ɵ'.+jO%<p O%<CGT@^8k$;c-64e?gqlvSpΪ`YqVypV;pV8G{T<:p`W?h.\o.K %#{q)\ןfgu>x;ߺDZoݮZvpOӖ/ޗ;*>8+FЯhgEna96C}V>ӱ;^4 aprj|2x~ԴSFxAf:z6p3 xD3‹3;}ԲAp7^@XkNgi]ma۞ak9Cϟ~9^Y궐bE1T^u#|AdX.sPn\ިE P/BZzu{zq>7laWأ3737z@-k 8ޗ_%n O14~_hʦ%*Ꝗ~ S<@׋m6:?Tⴃ =\xwGW8?;Cupa_3]؄GNxH-@=@t^_PP^\ DMق2ee˛!9jzgXc^n}hTzzejLMSt-9oc*&t:Y-+J6cs^7g,c ZR}UE~Qνj>*T^jB8FeM~d]/?}W8T0UP^X/C2V($֫<|[{ 'xx\ER{+zn2}MAB`f^.3J8~z<3d h8?['!sr08?yp|ڿ!P^|:C8o}­60q>A~0\WZUl|>={ƷzAuʦzAu*M-aҎ?^~p<+K︞mM0YZ}kkt=G%|=Uv!+g;i?_ Y1Vګo Kyұ,>L ՇEf+ջ#~\K{c\&3x]73IEI|}񬑡;ywOdw*ډW:~ފ|Ⱦ0 ފ<ޭ#,w+]{#wĻc9"DŽ|wyNw"8&/ӅaKޖO<ջ,[3FH N3#ĻNsMĻk%ŪJvﲤa=0|]d&xznwO]eCǵWߖ]pҺ53K;Tp=K|+ao;xw2ջw0f#W;衧)mf#wŻ͌wKn3g(ޑ晎_[W: |ߑ-rmi>{||zz~?G\W<3_mn3m=_|s-^ϙ7x=Gl. ozesw%S{<6x~Gm.dz\l9ŻwGoN)=9Ż#ǻ _9ޝxw#^!zC[\y6xw{Sz1pwZon9ix˛KwZoNN-~g`3 x=GNN))x|1bkx|O/O|;; 7o|]wGߕ+}|n਷v>8p[;8ꭝчS=;r[GzKQoxxz+;9ޱpnd}~Yߩa%g}~)ޱ_#rKǏrJzK$R?J9;lfLo9Żyg}~)ot8;+nIopMzKRRU䨷ԯ9;{9ޱ_fS9Żg}~IoJzKg[7<|]wedUF[z\YoViYop9;oSc+ƕbݓݓݓݓuݓeݓUݓEݓ5ݓ%7$0+6⊍chczqW jlߘ%ܝܝܝ۝۝۝meFƕ gQ47lID!R` `2dKf1xBYͬfj3봙e*mf6FY4s0 &nF$&$P%Q1N~$\)Jɺ$̚ldI6"YFc#Zld)&$a$(d>;EkcM8RaNx%ueueueueueueճYtd[dy<`>i [ Px k P K P[X-KVe}ղjY]IrtsauÖˆ5W KNڗO+;/[y97jnGK}GI}GG}dT*#*/J6IIoA1>~¬At.&{ 8݈w 71{8'G};,=G\|8AlT&>In#0AGObO=!\bbvH.@72eV1o(q;B<9?I+Cx]x-0!ݺH:Gsrq$ްD[;!p귒(/ PpkvtzS:HvCȃwtSX 8O voc!aͺXf8E# 0ioo,fŏiih 'CZUE:_,iu_5 ,P4Hw|~Z.9w@u?-_ }`qx7% , cxvt/ eq 8`Kv>,ioFП?$ HOȿn;RćE E5q[+d=1t7=>TZ^5$ q#dSxa~2eĭCbkגgI1CH񄌚1QȸO -TQ3zk;t u-IF!%oPJ{R7mb\H;i\ 2cQp"j (2,5[c85AoLg(DxY!El E;l1X/q~WWoc|qcXWAc]j3sÁB1>*p.8Q0 c|uK2xqЮqzŘ|kyb[ ](UEh]P^燊qd m4+8DEITx1-k1W܌`FEUT+g G(*~"*pb!+]z uec5+k\\ iu4ؠQc8DQ1]Hy-Yl+r̛Xr ko|1oziL:jQ165^q6@E*B*KywcDE|[sc|uc:ܡƭfr*mኊ1oT -=ڸuj/6n:|ё w,p{xpdQcbA/UGq zgt#"[3{G xcot8F;AEO`tr11s:j/8#ǟxx17;|㇎{(F[oX::} zc\/:}Bǔ1):,dw珎,C }_'^#_$Aǘv{ qfLB|бMo<ޘw3n:lzk C7tY|#BkW4 Ļ6 ^ #X4濇wC_@Ǥq C):26ޡn:NjܑvS}yh5}::^EGpȣwJ}7ثw ;z`[z*6wP::]Xv}ߛ[r\-wAoqMuE8O6xqNH8?wW]}JpLZU ĪXxep'ˉA쒢{s>ss=ss?~]?ϣ~Wh>\H.H nnC_:,^L= \I.kvnx=қkN)Ix'=8;)Io9i=N=;G6ﴞ \nz \h=)i8;x֫[wxpﴞk8;tpz%;63w+;Gߕ+}W>|]wG19-#G|r䨷>HNWwʧ))NYߩ^N1ީ^g[Io9-c"xzN䚮xzQToUcSpwNYߩ^w'zꅑ{ꑛYꝇWw)ީz8;kg}zo/;Փg}zTώᑣR==r[)ީ{w'8rYߩ_pww|e}~Yߩ"rw׈#-R?I䨷ԏ9;DN0/9;DNg}~Yߩ(rw=;NJ[[w:\RT䨷o9-kEN^gw;’BLBDY8%p(%0k%ȊldA6YFc#kL`’ d,`E2J7HJ9vžԕווווuוeWϪgճ EǕP< vIbT?jI'4[ P y9%V eղjY^$S 6\Ǟ+=[\6jXrҾ|rXV^yAw˹Ws3/>Z#>J#>:eQQPy WIN㠟û`%zo ީ틉^AxĈ-><;6p,{!u\9`5\67bG|z@I+q^F|qK@@%x0K@a. gqBxxBh>; gqų #'>E`$S`{O:sU_L]D|]jD\<+7ŻLx_]8F#Gx>w^O젋6'I78s̰w~Z;. $ ?H"8=?/#-‘q|~Jz,R*O CoE:v 1Ԧ w_$)$C`2h &7D)e#i)b8 ޓCԞQS;7~zBjqOB%#.RPLiOld,xΡ=Α5# Scy@hsd\+2ý hNd|(GFؘС1c8r[욅7dDF{.#mLG?5]17 9+?1Q'bsi9179ٱ͊1 *Ƹhs{xw)sٔC98aE zc:!ըwŵ9'=k.!2HG|'<7TD˘{{E 9dE{@f)*vsŊ1OIoqO1wh|Qoni|"i=;ci=Q4濇 [Ml_Ao &*\{vKR1sz ~c'_UT7D|c o=--{T̍xgri{ƈo_AoxEtL4VUǁ1s*ޑ0/FcME-}]l:j=cxMI-}K{[[zбa=ڎ|0FGG1;FG<:J,vf 9zt8cw7kў73C­G{C@az?ߋ|z0#ȯ_z(:G~_xб+Awxh(A xxCǕO8!4߃[t|˸O8nm?ݚ}h5Bbбvwty<7= :񾢣Θt*}t_wMn;]מxHxPeG0aJot?Ď=߱>_sC):JEbw'_lNow̺yGq {Л>wSsyw}7vRɱ[.:>+n; Xvߛ[rP$K zK{lczQvT譪ٮHm;]?ÁߦxEG17;]Qбt[`c8A뇰/p>wc;l}='/x>踟c;o,.3vz(8?W:|}Us>z O8l}G1 S_Xׇp/#חSK8t)3YSֿxplzX{8>|} G<ݘ#:N_dWȭ7/X"~ txAg /`v/dE:vv6Ff _8#h'r</ʑyqsHt$9Ñޔ&r3OǓʔ~TD~*[vji;:vV0gG)3:̺g ipW(m'$xJ:v:n'C۹R:v*iϹRΕ$-<p$|3΅5;MG΄1ߕǥs؃~}::wŸ]#t :Jw큎]#u2X= G.d'8 n:ȬʆCjCtnfR+|!qw*{\W<3_mn3m=_|ף8pKk%_S`sKWԆ \Rf=XOz.pI_c3d5]OMm'[n.9INNz:pwSpDN)1_txx|Uϊa[[̧Yo1wx|f91_xxx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zTe}zT>ّR=|]wGߕ+#2*W[zK[[eUjwx~1ީXxhVbJɺɲɪɢɚɒNMšFyfQ\` mjB?؅#55 8/ met@AA8A81Bx&Sd8×c8TY,fi3˴U"mf6D;aLp#ru5cIX1Np(uKFd#+zld96YFb,I0ʢc>FQtk 8R:91{ PW^W^WV^W^W]W]=Eט-~LK~̔LjٳMcX9O +zn̫s-R%RQQ_ p% k$k>|_~,衃^AxĈ->?rAЇE Csq@Kr_3+~WWxu#><|8v9(Sz s<{G5Ȉk~zhr pG% FAǞ4q๧7/A'Wdz㙘 ك8~[?8vwpxO SO"8O. /ɱ$q$ƢyH:Cse b=S R7)@+@(@(@amΎGXzS!dӛ] ۻw(o7F,}kݑXH8Y2z_8rKK0ܒ~% qb3dSjP_^v,Ƃ08oXbk t%Y2_qV_8@2`FFX:>?- e!v{EKn8ǚtrQ$ D0Vh ᯟP4f#=1KBbȶԆH| 1A&H=Qp2!c8~JTsĻ;z4C"3 &C H? kWJN6n:oʊ{ĖH{g>H523Q3AȍAD;c8 c:sOx(!)ukRdBFrܓK=Mz7qkOkd\[p_b Z7%37?116c e[{[1#m 32pT"mLG<ဇB#Θ1(DݘB{ɴ+=M+)* tw@/swH ,:v燊[8{{X"G0*&[y iOOzc8QhxG!7paJT ?r̛*zWxV ~6RIGiT䌯7C+zt{Ϙ{CИx(~h|=荧y1c4GEta=TTǣ=Qq5lĬ4UE_g"lccT$#3*D1oA8'A?8wWԍ缥eJZBG*E/+Rɥ!͎IDtcP }gBC`5:"G=tTG>?vd=ѱQ vtŎR8_#x?Q +Ht_A?8} 5CǍq z;ĕA!߈':Cp(2߅1?ր:,ݠ@Ǔ1[bǔc$X%:4ݣ/d#7uTeǗ1:|~Pv6hOzwzg%8m>[r̛z0{ Ry[fǜzc3:|ytc|:]/1: 껛P-]x︇5;&M/qv)G:FGqtcTtt~C=3:F](u/>xBn|^AoŶrCGg8ة:R5^kJ}Cǯ}wwp`HIpV;]DGz7=XKʱ/c{ '" ;}wvGq z_3;}ʼn߇[{x%::}:~ݸ-wp'_aǽ=AKF~YpWd&>C_|#0ޓړAR zBd,@ X{?H=Ó+<#d Ht8q zCV+C_XY{7\#fgPj 5ߟJ59cg2:~v6lch(r*IG>IC_tci'hKste9cYy&l;IIa|uRt9z%1qt9ο ySf?A}pl/wct0{;wLGMft8w&(G:; NN yUpN1ο 3ߏ NguC~)]ՠԹ.X5:Zw ]a#9A 2tz CGбAtZDGpk"y xGrb0ќxpD'Vүm+ŪW.XU_dɪYG*ZQx.'W:E*A16ll.}?~m$σ撮g?o{<36x~g|sso[:W6x~Gl.^"o=;zlsw撎zH?O5]OUwÁSx7|x#[wZ{-^SK6^%NN)i8;77^rzwsN%;xzz?pMOuʐi6_zSS>dsN%;cN)1_txx|Uϊa[[̧Yo1wx|f91_xxx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zmXMe}zrXىb=|]wedUF[z{[eUZ[xWjwہ7)ڱxJɺɲɪɢɚɒNMšFyfQ\` mjB?؅#55 8/ met@AA8A81Bx&Sd8×c8TY,fi3˴U"mf6D;aLp#ru5cIX1Np(uKFd#+zld96YFb,I0ʢc>FQtk 8R:91{ PW^W^WV^W^W]W]=EWϚ+)cyFs¨~Rϱi[ P )@{(u-k%V eղjY^$?\:rsaeÚ%''Iaݝtw^ϭy57b>R꣤,>:#>*#*/J6II|. AD|/߈w"k = /A"pVx"pVxu#>`zP8% D\Qħ@a.=DF\#׈CAzЏń>8}>g t\{tx>Ocx=mOێ}OcOk 'p=tICL8/p~")O"zj xw<zCxr~VgzCo,ɀhstAyOb=Rґp%oDC @ (B;|ɕam jl|Yaņb}GnIpVčw}M]¤)ޗ(A1>2,~F(6]cYe؏K2C&7Ptcg,&q, ك>z9{_F3>0v<{zO5Նك;N 7Ć\7~Z, oō/(n4!`8u> ;"yae2I$M )KH$\ !sɿdqL唂N&zdcEY-IT A|H0Se$^tSW/׏VɝiR1CyJCҸ(3n{6*#W eƮt ̌^ -3E{2cX)3L.UFYrqf0+#Z eLdeF ikt2#[GƶhXft Ao=.SdF,:q.r(2#]e 3hxǒt3#2X1zў̸̌p`3c2Qd_bEkTɩx~8Ԫ=/_W}GEV*x wE6:Y15|Qqy +2!ȊMT:T`WE_'<㠯ڃ:XFEn<*^6 N>oV*'UԪ\՞߬U7V[}Ta6+U_\`EцY'[{y;-U9YԈ*6,娷pfEߑR![3֞^{[kE_CŶVtG^ {Tþ*/'_7ގCG=*az\H%cww=6z~TБ;~6yhM{Db'vdT}U{cWv|T}a!U_d`=qbx0cq̎zq~_S5c쨱?Ao=:rҞyhyVVuÎ1c=:ŽJG:,:t?d ;tx~#HCߕF$;mNp#} Ú˅_LRGsx\0uDAt{tT=q/#[_HaǶx/#ܗ\4([p*;}-셬׭=/ni?Fǻ/Mt{'Uu8Fxv_: `  dBxpwdHG3@"la? [pL\  0 :2q=7aKuE7ў $8F.}O -)C NJg'=bt_J px:+A\ʍ9΅=#AL_ch(\$I2}a$s1_m䅜"O;JG1w.krWpt9aK_Hcysp$/0HgY"_ȝw!yy | <t}p^w̭1x];}8WTu~pDc\?΍l?/7~|]ws%u-%3#G=BG$4y=w]ꡣvu#:rw]wW==8C oa؃8|#=xxpD'f\w2Ax ۉW,X5?3o8wAqUQ?^kr'̿G糙;?3?Kc^~mWߛ<敟撮g?o/'qƣ wqwޗ-y6t~}B3t[:~Ǔ%^ωG6x='mnz/x~t~[:5_U eOU|J䩊+^-^5^@ίS\H+jC\I.6SOx~sK:Rocs#j;ٜXz8pwӁSﴞszdsN=_ôz*pף)i8;x֫[wZn9ixv^\x 63ף|D!kwʧl.9)#F&qw$N3; |"p[ȇnYo!f|\y |x|agwGߕ+}W>|]wMb>9q[G)1xx|x+;Oe}|X/8|g}zCXH3-CXOI1cc='qMSSc(qw7%1ޱ8;g}z[X ܓb/pKz=]OIzIww&1ޱ8;sg}zpXOсc=;q['zQoxxz~+;9ޡ pnd}~Y߱%g}~)]3ǟk&~Qo_$pOz&[[W \sCKex~)8;%Ng}~Y߱_)pKzNk[JG~)ޡ+bGw7 g}~)޵O8;O?Nz~W[ |]wz8ҳ*-Ҳ*5ǻRsC?oP>xhV+ź'K'+' ''˺'''k'K:4eI``Wj?ŕЦ&1]8RXSJAM,,,,Vn+K7 + )3(#(#W a2UL1|q0/A3KBmf6LY,fh3K4fF$(d$dd"Bt0)ُ+E)YY,FVd# rld56Y,d$d ;eg(:5a P)@J=+k+K+++ ++ˮUWϢgulб< 9vIaT?j)4-c=XR:Rǖ5VeղjY_,ZVWm|}.\9p T6jXrҾ|rXV^yAw˹Ws3/>Z#>B꣣>2꣢>"ꫡdat͝dA_qWhSfnčxĀv7!?6Vh7bXTASeڈWҟˡ\ zgKPUg^x rnī[>7bokAre(U o7ڋ6"0p gQҼ[4Cޅ]imfX= =N { q¨4>s:܁MSQ~d4Vi$WZk9Vv&rtIs}=\+w['f8mcIES~4xQ.OALTZ)5*1L6RT:)q*;;) `yTJdU«r92CTJ{ }xSVE(; كЛG)CEH_SV)p+֣ PW)+֕kJ5Eye1%{d[RW7s1`1.,*\Hˌ^k.B7KЙo/o,zlD\ &r.Zڋ.(KJ+7lVeԝ{/kťP\(eȬܬKʽ@ }\ÌK}\qa\ 粹r[._|h%w㞴\7n1z\̛}-8ݱL<ψIV=R V0_FU> &/76djхy MRAs'\lsC=1eTL4cil^eѻ I8>H!5Fo&COm&^>̘jlgE*LSfM{x2ִG43nZaFqODeQCFY"NƯ=t1#ش.3rNFʌc/.G%3H͗!rX3#ګΡp1cbVoQoAPv驌s=m1~[3O8cFK2c0PF9y)#nyC=$q{63#o8ґړ~sjfp G%+]{Ҳ1s*H<̺ht/U<^9+"f䒣hyT\.9Y gÙANKeV";Ug= kw钣_8{F"f/AUv]Vұ̊aTcp=V/}UXNJ]?cWÒOdrWG=ɮY{8?Tl=/D{isVGxGV/=iOsVha`żksVԻ-Agp#>o:Y>Z=1`f*8~#XaȦl;:PŽG<:&:mr|O~1]{c C{t|CGHÛ#{d{vt:^ԑEݱ2;Z%:5v^dw=荹g<$ܞq2;v,Li?Q# {Op#++ޱ#+~c [%Azb-u}SIQNMȑNQ0~ci|rSqwcwi8-}i{Xk:6hGG\1טaQG]Ao"ut<]4t8xi~wctCߵ&<˗k|mQU:Q*ee#ZcqVG`OEړV=Q:N[#Z[ӱZ9򗖍t/ȿpAוꨵe'RwU{#͎][޿[[>#͎a[|lm>8.:m~cYyu4W~Mu==ɀCǴCJh#pģc.9 }:#uqc\_4RGyrCU~Kuر^vW~OJ"#q͎ygߡaGФ:v X {;_d: HAǁ"GB{wh㟎%䈨Q4ucCII9:>O:BcDP9J DS9Rvñ/REK9^#F]9fFa9n7#Gie9v{O9Ÿazxa^HǑr$g_ Iz9ɗ#Jy9Tj:vS쀣kF: G.1I}e8̴>:tEcч]}t~c{G#?hAGGY?LǢJor4n'JrL t\#xJ=/gC}XUI˒AQa{U&74IMnfqr9x=O܃>fnf>>>>}WAx>Ipw;޿}O>~}O<t''ģW';q⥸ }ⱸ;\@\}W[+p[ʇmnYo)f|yb69)_yx|]wGߕ+}W>|];s#G|p䨷Oчgwg))~JN=;#t~Y)8; 6Yߩ9;+"tLzK#-S"GzLTω\T/McS*rwwNYߩv8;T;ܒR0rOSR=r3땑cSJN3;[g}zTe}zT>ّR=|]wzʕVYoViYo]9ޱ7rwlxhV+ź'K'+' ''˺'''k'K:6eoI`;`Wl&1}Ha81K;+; ;;˷;;M&JBJL2JH"JBLc!` _2sKFRmf6PY,fVi35M-ܓ^GؒZ`JJ1N~$\)Jɺ$̚ldI6"YFc#Zld)&$a$(d>;E1 c:7HW Pl)@]Y{]Yz]Yy]Yx]Yw]Yvz]=kc[QϱK 9VKa>Ǧ l)@'uԱ:ZX-+Veyղj㓼saυÖ- k/'Ա, zn̫s-5(訏j+Y$]s'YqwtK'bn_K=7ٷDC ->}xbЇCqp];`xVݚ~ ~I+ x>`q_]ǹ_lǵ_.C!Xwq<8ϧ@sAx>P8wM 4.Qna{}CoÔx] L'|Swbr4f}s|h}4|qɵͤ勺Uqi۰= {S~$ 81"8 }&C]8? و yd(|mAZvn:p7AB^t?3Ao7q.LJ{]f%PCH5v7'nQ pqyP]A:톰67cQn-솓z PWٛ[2Y }`藀7ba/QEg0,o Y7©_&*CmIoK2CXr_A=vY(N-@] LCXvK:o(5wK~q^¸_ccl jClKn8-\77yCbo见D@Y >#`)C=/#鉎%/ '7uq_@$>{BZ-HAG .>1~q$k ⴃ1Ļ;DM׼Q$ SnS>ڑ2gzAwBR:{.9ojP40foAD :.32j!Œ_4Cc{HqsWdqz1g<(5yx;MG_1SDzƶEFQlg|qמ&?:18/ŮYx*ԕph\ȫY(ҁ1PX0fԍ¦a{cbl{T!d"+X[c!xp_wxwCT&}4k4 {#~oo#st 6j{sޑ`\ަ` xx#6ӡb:.q0.AZxRT#sĘ_av&eQɑ(EE=yc GrSw.GPmܞÀ{fdTɂc8Y]R:3ԇ|}.c׋k~"c#q5@EIAЭxPttT CEG.9_I zk?zkHϠ >Q}T@בz*l='M{7[ѿІ3W@E 6C x:o8Iq}7Fťe y=']#Ao\2aAoxE'X;*dEXm~"r8!ŅYQn~kEVQ ݸxŻP*dޟT1ߵ+w"7wt'+- zEljOp7! Gb'm؃ E/}w!*#}j8:n#fCz!+k7OM㣣Y~8^$1w/p~N+tވwZQ#(&G(:O5L:t<%8$:OV4#!ƃWdsz1P ]"W;G[x?+2]ztw72eȫDp,&~p<n>E(zLGgxx[>))%t~&&|̤Glo7N:<x=p&^zNYw.L"ީ=|ČwxGnw3oWsf#;g;xwΊ?x7 _-2)_[|]o~YwzyYwjmO/ݞ|O~w<+|ǣ_īxw;x?ow~wwz|zE\#.^_=%x~AωK:AՋ'MqMz9ŻwW;xwswW;xw<#;'{w84 E5L-^SKǜʫSx/pw?9isxչxsNeہSx=pKs/pf>!ף|D!5;SKwLj =SSyx|UgNzK0i5-Kw牕s9)_rSyxW]wKߕ/}W]wKߕ/}W򹁣|p਷<8-G;&rwω\4_9;7ENUc|4_vINm|.4_)Ioi>ҙc|K/4z9;^N{:4|9;G_Nّ|x䨷49-_~R|.XOpxd}zYߩyd}zYߩ"rw׈O[EkcM%Üp%5YzF^#뮑eWϪgճmQb?I?Ijy1j4-(Y<5(C%(YK-k%V eղjY^Þg{8lyްiÚg KNڗW +v<[y87hnܓTR_JKH}/DԷCM5;ɚ/}CGqПϗA-Qkc/L{#Μ)6vb7Sg->>:tn؃ l@hdp(rfqò mRxVkj94 ֔mR{xC?bB{ ^ \_]LsrD L/q8],(&52b% (6?<D3_A0,b%7n%z @{pM ܆zCVA]: ûfl wEvV;*ؽ"zްzijxiwrzjװzl{mlzn..cO:m&& LjXC?"kuD}ICnI݃: m2?eW٫-_\UB{d{ 'GbJɃ: 7H5^gf9M[(~݃ˆܭWdo z+-" Nzifd0hv09(C;9h CZ@\?Nu>?t݆d݃63zC[ ڀͷA6@=(CYջ }Z6ƆUGg=_hXP?p:/C|'@-렷~д%bH@Xjl鉃?ZO:X4ԟ"Y#ibZ̟f܃ 6ptA!YsT{؎DxnI"G)$+} lH?4in:ܷA{H{c5H1o kQ;=qB3(+7(;LEF%GdJQU(,Rr)94'GE0a@dL^38s%G%2mɁS8+ǻeqa8QB;e=O&2ևDF52⇋3q ĚcpP"#a:-o~X'zc~?hQ33qB3!1q{یa7̈XaC>mJcFұo3.wpr̟6ft15 gAS+fs5_kOx:{6efN!$یa\fϰa)o3rVHm0; /f}0cxx1"ݬtDbƱ <=1cy=譐S|Y! A+J`ƴQm"f\XYc mI'A{|cFظ`\3ʇq?1|H7_3և|1|ExT8n1#n,͘rԟxhOn̸7_3 O7ƞiF?GE_8?8ZQQ`/kT$1_p<*+䣣9tQ1ax=ܭbt2CGEa:X.A~q~tC؝6x'gro?#8荑޴@arO\ *+yzVg8N#8VV51?[\•ﴢ;Ao_ĻY8.?E{W;g;rSdCS#w;g;P#?wΌwqīY[w Bo[bČwx G[x7<3{9Qsf#O;7R#o;Usx8sn z+Čwx^pd&f#?wx +ǻo$ʗ+_|ʗ+_|ʗS>7p[Gh糝<8;3-q~Qo|L|>'pMSS1|S|*pw>ϗ9g}mY|sOz[[>_)Io|XcNϗ:|9;u{t~Y|sw>gzG遣x';w9ީxzݓ.g}z%.g}z)ޱ^#rK?IoHzK$R=JX9;8^&rw]Nhz o mpG'XtZ8$+\RpA*  ~(*sY48pZnk .o 8l.WAu)ܒCBG'bz)h̟tۏ;7oYib.'l:(y1R: --R_z49hO) .!cœn/͒rœn# dQ "t=9#t 6:)렯 w-)v1@&nPB0"xz՞FraH(PK$2W؃0kx5X~n-TDwxp[fJ_ו7?^j Ǻ _+JሷScUVObjyNV=Vkbg/ay[Xn-9ԡá}w^pCnqV+88}Abk6?^Qm xG:*h/w\-s6?^nW`EyÞHOq~؃nq V'od0+[~]\AQo=-xzo#X<b'Ҟ+zqa }[\񃎏p72*a{8N4rglkE6LFwxxGފw+]ϫxx(pUaMy Sb;P#?wxp\6GV╎_[_ Ybƻw:.;g;nx*93a 21y+93alw]UΙt|x71ÉKpX*93u}Gv}xGv}xGv}xGv}Gv}xƃ-xQĹx|sN5SS'pw9or~rSʙIo)ܲR>͹f|sN<};; Ww7:ʗ+_|ʗ+_|ʗS>7p[Gh糝<8;3-q~Qo|L|>'pMSS1|S|*pwgǛ9;O z[>ܒ=]OIzKΜ;/4_zyxYirwe}Yi>rwώ#GQoi>/Xpxz-ǻ';+\N#;K\NScF䖎R=I"Gz)ޱ%rwqfLXo9Ży.g}z)Tot9;+]nIorMzKRRU䨷T9;{]9ޱ^.g}zYߩ-rw-;]Nxά׻R/ϤToxIzKSccX/9;[^NKߕ/}W]wK߱4rwW]#'z˨q#xz1ޱqhRbǕbݛݛݛݛuݛeݛUݛEݛ5ݛ%kX,FqB1=` l,`w|RXcHAYvp;뷝mgv[YJBjL2jͤ`}5l{`\\l)~qŒbRmf6PY,fVi35 Q0p1%u.$`XRI1N~tq(Ŭɞ,ɞȞ,ȞǞ,Ǟƞ,ƞŞ,`X0,bE0EƵ1@vI #(.F^#Kkd5Yu,z\-^\I1%#FeĨ}`ӼRbM Ћ%(XK[X-KVe}ղjY]+yS=RDxR4M-*XtEFZMG+ B+ F+ JkW Ĩ U #4 Z+ ^֫dAlm~+^W4A*-ߡjA6ޡ ;]m>P`XfArT*X\0F͇-xL rLתGAu)ZyBdW+l*: kAߕ RO`0 ((\E|Y~SAўXa-p2Wԟ? _W狂}OزUg4П|C>zp ʫa8(\E`Bye`d}0<.'CD<& W*N}wl-4tOG*2K^8 pR )ahZp_b̠ahA jb>,h9Ȗׂ4-@ÏO3T|>\6X2hX*\Pix*\Pvh*y zK+DАU&\ae ]+UW4#؇џPV? gghO+ kF<] o4}ud;P4xGd+񈆿y LaPse2!>]?ȐX iC53'ca>E E- ZxWӍc~ L_M-+Cӗ^0n+MJPme8 ZV#C:]i2jYbx E -s ܳ+Rep&T{rd:TFp)л2kyq,a2,oeh.>chcߗv&ޗ0[~S}~`>A*ߗ/\'3;zf󠟵ce `tCIw3nmIO/q KX|Gܜ9<%`bS YE,L/Z+YY XYw^ψ=l/wK@`hq'bK@̋XY!b^dzpų`ƳwqB?KE`.ZqK@K@K@ij)-5TI I=S r+bXhZ.b%K01(PGԈ1@AF 9b P / /00R°"b۠$b !M.6^F[cX-b P])@axQŚƨK PF -l5lc6t8%`1( "ŚRK PHx8"1(dS"\ř29hiQ t%aŚ \K PH9"qqŕ2wg P%+T dU*YAJVP%+(C#adj l g PL_|bb)=(&#xVIA1;~1)(wRPG y-<' N9D †CtG/Ź!@q&&bPljg.&9IAqbRPأ "x % N9b.bPbg/&ʋIAq3bgR/&ʼn؋IAq7b6 #a O P8RŞ/8/1)(;Xp1)(VD [<†-IwGaJ- a\)@bI 5 cBIzRP,O["ՓbŤXwsEaê5 D †CaQPtqZO P.&*1@Xv1)(V8†e W6m8ae' E UycgPx1)(N9JVP%+J/fUBe(lP1 p^lIؠbM K P>6iVӛӛӛӛӛӛtӛdӛTӛDjT2('t @!9PD(vzbBӈQNI$vH;)Nh'y:ZIiEł2x 蝞VPz4bA~qP(hh&14I ͤfB3頙dL*h&FL`t N5XFJ-0D  <,N+_yyyyyyyyy؁yȩDQ?`X NBDI*# YN=FHf$m3Iٌ$lz5=ɚT |mN+J 8/] 3 D{N-F8j( :Q6bZ1-i$LK %Ӓ~iONqniɦZjiq阜Ic:;tV4ΙiJ+YdJ*YdR$Ď(ni}PfiV+YW 655l: `q+GGڣc{8L6iV}KqۓGom؅mѩ{ѱrh΋Mg%[5؃N؁DEoV?Q[LS4Dk* v6kj`;^]]ju)u,ܶNh":ڠ_TM(M/J fm-/f ЬEۇHBUіw8ڄdz}sp:o8ܓW4{We8khcxk1nWY8}L+lñS8p(NO(M@ w x$|=6[<#Ί1nzsέz=ZiJ8n>qw7;On*A;ۂ/$ rSpn=m;3pC8Ew{(O*;v$&;[9d{? ߹ 6[?b+0ËPGg%C1aY)aPgHUz> GC>[ !Yvun筷wü>鍶!`ܵ܆5:vnae_BpC1T0!ngoogaIR>?RwraYb[1?3GcmCΈ(x2' 0CbЇ6H_ zCcpSDCEdQqH|=R0ľ ׎hH !(Ekm)jfomBeШ,&rX' 1dZe L[ȎH/OYRY;8<6 봢?Cvg'[6 +oF&yơȑ<|pe0VA~shrd?AdNa^ocۋ|Y;\#2'^=K(fJH~wSqoZ` jHc `fy.o\jR'>lcR]GmsKt%4XXΚ>*n&v;F'ƢԴM):d4*ƍIY&_Txh WW.cu-@HړƹCNN 6ZSx7(Oj iA}Va _Ckm+5PuԚ6vKk RSkT#jfRW@UkډzP*@Y!./m\گVz2 `Q`%~nitߡ^`j&&jSkrrNN e׉;-t <ƁZ&VdAgw.0VɲWj #BsSa-`V``։` }`Am{],u8a}ygޗx^/Bj@w_ +1/htѹ+?6Ɔo^C]aڕGݗ{<Ӳ! Ǯy4үѮ ݺѬk ҇r©FYuJ#}yItnx " ,0?\eGu\-7׭b8IUڕ\yU\tnexGO\4Ґwc ;.ܸf{qwӮF+3N@#>MomZ.\p7=L˂̾Lˀ9o7Anۀ a^ۀg MA]vg#Lw8ۀo8[i%G+ [@xk3 07O_ #sX"QXʉ,GdӤ6 s88&4tqrm=2@9"@ ]n8q2@9x06q[@; 6&j_bU5PͧQI| 5_`͗_ͩ|ck5?Y-nSpV K8D <`g߱ת旮Wұǃ:;xBc a %xrl<9pV =BcgpXY)8pVމ8pw@5΋qKnѱK.ձKرijRWNTGYI$8pV.0K8+'DIec gɱă%jc=\ 9/A"1(@1@4uec P.O P.w P.ʼn=(Ė{'<S'Ͽ?ݟ??+$$Rqck oſw 7nل_rp&fEKo_\owyd6K}Y;EW}$Yӊ(Rw3]>&_9wxV·k;x?X={V_=cl?{ByLg>gğE3gf.?Y~.iѭUs(_mVg3"aqOS~JhSI9cSV3gE36x? ks/N7?Έ 5*v_ء_?c?ŷ#olBώCg$+tqjNDG8dGQy(WL~M @@ǶtTy)pq ο80z ҟ_7?@6 6fS[\蟟~D'S$V_\onBe؍_ ,}UV_P~|ag_#`SKQٛ1wmLqVC:'aQ|5_ٖ5i>k?nm_n8;>a~ώC ׶ѾG{/.7aVmW}KkT[l9׿m\ض,,n[YbğN1V 8#\#~|n='?J>}۲e_my׿mn[Q۶ȿuS^3BTu?GW֪'ȿާYyD-׈}y5޺Ǐ/Z??X]!G}y԰fE :Zt{a}#NGy9=g -EzwKE/һ6ie8B1[fnCzG{#_^onf*fv1%+ʻ|t3<~Ezy3e*N|?0fƙ?g] I?g]?g}3rYϰ ?g=oIXbtiOӏg-5uߏ?;c/q:>_Y'~ݬfçN;LSO+Nc_ǖ߭c-ßr>o<}nt4[{C~_7_熅7W?˾Y?s<ߖ|h?#s?#}{wߓm;0::W_~Ym9`[k> ZCX@|q~DYRܖq30ז͹ϟBn5c=4<֪̅٧}c ^x9ɻ4_h{[c{1TUuvw[Y{樥gڍ<2s)^~wX͟A>fާiɫDCdk,[\ve=M^~_YrT߃ۋx7'\ndk\E~9 %=A{䢖o\Gŗ@9b5><'X*KxVؾߟxv="fw_x}e?lŪ_a# tZ3.{x=8%[<8m5p[<lzއx7m.3$WrGaYx >Y1lI>L}N,zqüf5kg<"x=4}lkX9<[ƹa k)ةiXZ^f=y_ok|ck.2Ѿhx__}~ڣi?lZNx?|'9?lZ_Lv>韟U{VZ[+v:sg|׺aƳ]XI\vZ/lgKEJBwoUlkՇ-5ƃmmsY{6^_?wEWħԿ"#;+;D}׎SÝE'?wۖڕ;U~xfxmlgG+~܂ס=~oOzNGV]D<z^sg<K+^g܀@~vg7g){#q(}}6^>}YN]xu_/xoܯ)~qC]AکO2_U?sWoAʕ#oU<+Wnr~_E#?gR$ކu6߳w~:l+w:ΊC>YE0C<|;Wh߁U__(z,g?cm+|ٿ".'X>|׺uWDߩN_ oaZK p}Kl]g׻ѿnx[+x;t,,s@_4~y_Eׄ_?/G"U,j;־y$5=jDy_s')%] :<8_7_u p 'c|F1磑3o<=>~=y0~-OO{Co.> k0D _p3Tz+"/Aпj7Q6Ww]2ѿ6]F<_'<zpcqxa3A?H'UJ>.oOLZ-LQƯ~UƯSߔB\bkC%;W_9^xa ^+տ~ K=y!XSGbMuB 8~*W?wCʍχ7Wď+Cx6D{CpK_:p^7שϊocZ0YއEnԿaߛVBi+ǫ7|\ U_υ2Mr ׏şJ^H@"OxZG}S?-c:x_+1:1P|yQyqqnWGQ>A{4~@r{ֿY'%'xֹy9ޯdG%>υxޱߚF`2ĝٗ|0dzBܩ?:+xzzxpO}0ӋWn Ys;o7@~n.2Ï?~5w_qzxAXzqzxan\/m| 2*Q~?_QjÜoeJ~1hpç=/7gLOR~c!q_>_??o3諾5~-=K=ѿݯv|׋x_;@Q;n @>(_wǢQyUznTxzߟkr:W=4^=Ư~_?gtyePv}q8u|Wij3 sc(?x4߅6G(? 2_ϒb??&#w>ܑoB_u^aޯ+Au[cqbW=l?zû{,2 Cs?ElS;[;lG7OUCOG:u+{s;0~Ç{>C+ޟS_xk xy4뱎%3?xL凗xD|;o~,_9=,UOSѿ6=x6oyvujd-hI^#Y4~E?+7=c1+ mofE~XS._'ho̦/}φuòTrd֓@̎Pz9F;qx@t 1aIyϟn`|ak8}͉z'#?oW|\hU߷f{Zg/\_B%wv2IU5TOg=]q[s s=h;!~GT}ާ=[_xA}v~8ߍUvmx[W|WEZ/Yc<[Wjk =7SxОwQ~z͢ϙY)Y|zMz~Xǟx5>ި/T~fOW{)?W۽ok|Ff{P&FfozyC^SzMj\mko5^k^s^k𚡿~WzM/f^SaoUx^krB&ևٿB,loW= zͮB]8ϯk"~;4~ zM?+|zMC^k6+wjoz}S#aw=6gީz!^]hl|?O=wi zͮտ}yu}[]zM`i05e~<^zqㅮ5ߥUgfdS^uSWk߇UߴɌg]]տIVKV="OSoq~C%3g,7^?0kNh\~M^/afg&s/!x} q:ȋ~I|kN|ף*y^s~^wij084~XͿFxKEsZ}׬:_kw]ɚd3_k{Ud!6 Oc&3sfn~[ꭟzMW^U}׬=5x[S>Կ"ޡ;ľ2WzMŗ6@Ѽ^KCkГOc&aο|^zML/}]$ܣlh9~E{C~cox:XYi]h0/Q/}?={F;}=ϹSD>87<}s=q_Oy^黡/hKRt橃/2n}/:}5\_zo]Nf]B{#y~79+T]7#軞'xB=ooQn]1n?'_z}sy<9;g|OK?:~"O2ŬӁoOI|]>W0_9'}TO |?/>Ñ/%_`/7t"˗u:E>':>/}ɗ+/xK]#Y/AWKoq}sʷ/_d{/}y!assҹҧ|U>}u;ҩ/?NQΔIyys$_oCsV[ga=sϛ4H9}>AYG|S><$7sVޡ[򙷠ݗ0Y> |Zt՝/]'}隗p_/uDKaWKoʗ%_:%KgSt=t}җ|Kk] ҵn|7"_:}~xI_z}~KZ7At=Oݷ;<)//yN-?K׺k{]u?ؙ^gqlOa0@`ө,;~$+ݝNخJwN\M[?G Jfߺ~+YW:d|gKåeނt]W ǻ]zMM֥_:m9w<\z\O{ǥ{u:<ոnxQl}?$z9 kӥW<.=|:Kn]ݺq>.t@n^(\zu~K7vygr{N^?ugr\z߶G-3Kou·)\zq>K/.]:N5];_>ԩӟ]WM>.L:gt].]KOgK]ҋN׏ҥw{sեnqEۥG.t}sFv8|!qYd_"]Htҗ7t啸IÈåש\=W83oq#wX| nSθ&]zgboWkg߆s^Q뮺s]E{\y^^rc0?\C7Nf3Χ.}wKw8]ν:SgK/ӿ:yteuΡ3/>.sa"Bp`6y":[ҟ:u篾<8+'Odwt}ӥg;'N'']eK]z:YN>CGD_ I\hWm]C:]zYܟt]yftm;t'׻.xԥ93^3tkZ o]y'q7v<]zWKog=}\yt/\޺tuīK7.]:NP./\yDN .#]yuw]Gf+۝8mu/ۥ㼛x֥2^~\yRtoZᶩ#t{>k8?mvmg+K7/vҋyׇK~ӥR~K7 .xtvg]>k~פMfW+-ut}\tJo..}/Jtu;v5;y$.xt~=n4]Y]ÙWX_Ofѵ6O~_qxq=K'ojKG~;|K/r񚷮x=K7t[02Z]tG̋@GK׭Sѭ?x|/hӟťsեqe3^3]ӥ3_NN]vBG!pkԬ#T?.]'ZkvkFSt+um\ҡzItzc5m8ѥۭT>yuǥwnoxkN .1/S'?GܯpyӥNh#uggz.=_]Uz|^g:B8˺hqqҍ:\:N~¥KwZ^;kxKW~1]zm:M|>.ϐ.]K?_zqWtW]zq߮׏Kե?xIKo췦KgH~.K/̗ӥߞ#xӥ"!np|Ft;\>.pJx͸t?]m5u%'.}uҥ/{]#t.]z۩;+\zottn҉ҥMuұ8]:^Y`b=P]z _ׯ3oY{Y_yX|åʯu{=D\3&ﯙЮ֭m?1:.9S'."Lp٬sN7ӥ7|q]}{=>K7NK'N^tu6Nz8|u(GsR<\zy/l<3v+zYON^77Gi8q:v%}gxҩ/~ٟQ_2N֝t뎯6n..q}\-\:v]g|ߏuN닥KǝK_s]D]z/뜍vcݩg]z^ws|Qu u&K:3QGOޘqڏq\$OcV|9k~ԥ?ҳt˙780&L3=pҥw/۸nhè}9~]z;ǥK:Gw:'xץWa6mSINDtgueC7_=:vб/z霟X'K.?9 ެwKߎ>uЗy"Bt8b9Х܁.=]sq|so~]qitҥ?ۙ[v~zcwr:'9ӥK/gj5OK8.}mgn~A< ]UtW#FĮtgbIW[7>ܷ+ҝ.ۥYbG3l;?.}pҭoK=_ץY+"on.N~L#.L^^qKe9Z]kéǥ؝Fq|LN>tOѥw]yStl]zkO Qެ>?t}[/8V]qt'OK:ty?ѥ7wgtUKq͗סc_KO'KayN>t%tt䱊yt~=\^:[淾iҧhFatҭߪKХ[Zn\讗z6q6sΙÏg}ѝS?Y?<:K.K G:BΜ<̗nxKoM.=-.ݼt/tvؙ⎋nzٸgC7_K|?YK׵zCNqK'3[zG}h֙[p&U5|=k>ǕgXԥS;^:M^pxt]#@t2қypƩKҳ9.PKwAnW].| Enӥ9]zg/Ν+n=ᎉۭ{&^]޶3'$U:ut`}^Gft}өu7ҥ;ӥS䨗~{/6.g]ҳ}:tԥû^:wfM^p35ҥ}Kh+m5oWMNOqץq\:ډ4.҉MKͯ=ׇqyΙGK֥i^KftW]wzԥT.EC>9WWmnPes O:nϛ.YG=aqҭ3wKz.]/g;u 3'd~\];%^Z?қ/ׇogqLХ[RwK_n]]M^{YGh.Vt]ݬNc'^3U:s]ݼjӥ#]:tgԥtUeێOҥ[nt]/҇yyiOo|~bw>usSp8<\z%.d=z\z~#N.(һ/#/_μFF}qǥ+ҽ~u^5dɳ|\MեǍ8ץ7}k>:sW_S/vCg+`һӥԥg*]ÏxVwe҉O=tMG>nݼsϥKou^bu:5]gS[9ffGgkf5>.t]]9=:]cC Vo{kwיǤ6ǝ/\/[&^SGK׵bu˿nGw^瓳^n<~K~_~:;Օ qks\zM|Sܹ޼uxMHK۩>'^sz}nsK:v3rttz?]/}Ku~\:ҳzNn:2^3~Q/} >]ҩNffޗ/:t_qkҹw]r¥YnWm\Сu^9k1?\:ǧyՆΜ93kN+WYNnKo:#uK~[2oi'^kZoN~rK7.8>o~/qf|6up/;ҳ^|k^sp}\K7/vatu9Х~s1[ɏKǑIt麔^mqsK8zw]@sv?7ǥs_μNҗN~ԭn|uҋvt<DZ/.Xo=9:mKON\?z+Y/x~cxҭ7KO?2ߡΜq'm`EuúO}KNt:]J^_׎kz#?أ3gfnkm"sZXq"]Gѥ_:o.u^z]:ʣ^:;Oo>9z7ʾb}?.ݶx?;tnveK/xG< Kƥ7lgts~<,G]ytO{MmlL:7ɳ:CK.}`{h#_NtOC'}9{OZ/.p:gk#]q:c]׫.Y$sur8u"n_Ouq\zt|9͛Kϼü/|a\?^z\ۥ/?b|i\z3.ȧy6SZuKyy6M+gtgt-ҫtE\_#En}\\ftK^.}mN^52뜬K7ECr̍mNqѬs.닳9w=tt"EӕwxqK>.ݸb]p:gc}XlvڌCgGnuߺ濿7ȓ.O׈ե[I~qY/'.=.}/kusɿqGhMޗ.֟#Ptp]7O~s`q5k707 _:s9qsvw7.=:'.֙p~nצC'5޻y>5?]];y.yaѶ^ݯzxtrXUz%.}qץ̌יOY/BĝKå;Kt˸:\z8+ׇu꺹A+/ 8i\.}X;\z>.oKҽ_tƁҟԩ77=>~e׌Нc>qK:u每wǥKw>.ݸN\zw}n.~w|őKwfK~.ݸe]zХwK.}Zo|8^zש?^.K:s:s0ܛotl>mSo.93~/\z>m~\:qқ^p=]K/tċv;nX/<=^zut$:ଯN=q?]uWW\rm¥I!.]Nf~/_9ץG~[ן/^ϴ|8a.8w]zq|ԥߺzqGҳ:.Kևo|t_9y_3ӥiKOKᄒWm~\nN^u.b>̫ןvS}8_F޾[G6+y p;yc]z6]C>C謗^u_M~>t[[ѝO[^:N^o]Х۟қå׸tӥO^wtq̫6kR/]v㸹X?ҥwif&oo\^:+]:.$]:yu .ϋK/Y2oiK7o.Ou]g/:_u3^39|s!^ ]yytML&qGqzw+n^]ХЭshXzq/_KokotK.>znvy~#^3't]tԧtꌯҳ|K>uҳ:\Ц?z^:xͩ.WZt3ץ7gf8]/}yx֙3r\z}#ե#]z:;'НWm&sk?5oK_抮xͥ;oqҥ3'^S]zיڍSg}3_/Ut6]EwNc'^gtuAz6<;^Swn.뼉׼].}X)Y/}Kn8p鱍v.}n^ ]Kk^֛*m4]9#t\z/lʿ\syEz|KK?:uҩ㻢.}.ON]tN|8:Գ7ݺr!?/zg:h&m̏Kַ_}m^myҥ7pv0i#?7]e5:ӥK.Ct:ҭK'*]:ҧ$q[KwKKNZ ]/Ѝ'8qGu.yY.}ҟُJ>uөOB;ӿwf9YcC&-?Q߿bWJ_\wԹ:G}s~nkw]O踉ڼ]7w#MmpW<[;!u8 ~{~ɿ릿]S]7ѵhߺ^gkOb8ѭgN~u_?# >¹EuOk֯m~׹y(G[~ܗqù{'\M8{hhwzu\?{=pk6?{m<[/q>nqug{|PpY}D?:ҽ~{Ѿ^mꉿsG.`'=+x~M9i~n;Y? {5Onw=vcszY:'?q~u\uW듿릿Gg׎/gtg8w{lg]7}ù>Yuofp=x]8߿Oݲ? ^߾~2>}D8?:|xO\oqsϯNz}Ϸh?|ϸ?q{+p_yQ¹ƻ¹ߎvtp>ùѿ_wugwtgԯM~b|5/F8w﨧}EXu|=;]kگҽ8_'/¹g9p'pt 6k={Ko6k:+~Yo:uuumޣσ_1s?o{yuw+W8p+po?qGD;Ź>Oùۼ"v-w;g2o»n.۟9~sw[џùoF4{}3jpSg|:Ff@= ^_w{5ת~||:{{MGO|+W#ڸk8=sW~;{g|ףy¹/oڝv:_g7c|33tsϳpϗN{8_ùS]7}v~Oq7KgƑs=Rj\o8|¹}ҭ8_.O\o8\sso16~8p~swpzڟcs|u+㌯}tmssW^hJ8wG\_+O,띏_u~W]m8wЩ֛l?ù.~>Rhc>sù8?O{>ù s?op+w6q~~wq8s^_7m MIs_:k׵8h1¹){>_mf:lX/y|=ת_qp=G¹'hk蟘Z+¹pgJ^5'7 3us}i#@/m3]۟}믿߷Ц?8){q;w:x8'_b|z ~h37{DC1s?UG=spz4=WwS/>B};׹$޻^^h8; ;ο>i-]~թ_7ݴ^XosF{|c8y7Ipֳ1f8۟1_ֳ>r|Mb<%r「?K.tI+3]P8=~s_ùvKaׇq%W8s{==hۓ%]nSb@\¹kv瓩{ùK뎯|1~w]:vG <_ӱ#s8\z=8ݴ}$>?+:֎ 'W<{^׹9vϷ?{+Wù+`~b}<pއsG۟4gYjpy!zw8w~[4 8:H׍!c=>_mWu8yZY+]妽X֭8Ebhްpog:eFpֽ>K8z9s0tO\ӟùMYmӟV>}J}¹]8sXcg|׿xB5¹}_q5s}܏sj^p߄s8}=ss߿Ѯ1ѿ@׿rz\>s]=q^b|%/)=׫q'{Y#opem>\uw_v߇s8w|;{B{8~{x=q~_$֎%hΧUο_c7t_'Cz\v_|-i=w z6ùEscA{S$@6@wz⟘S1ףp?s?;~wԙù$v|]Y:fgM8wiG}s羟ùpW+כ}sI{3>8=ޅs/O?y3O8<}?sWIb 8O<;ړx+^8}sϯNU9]:%p߄s=pss/t쌯M8>c|<y?ܟߑW`9_0;o:x毗x];]E|a}ww2y{yoߠg|mqkg5Iygu|=txf&߇xM'swև:kvՙ.]'):vGn}3._?s?Y~x=V]$^ӼixF7yޯxMGssyxMKO5sǛF}_xf=zk}8ߋ{g}Kw_qkhUצ{g|tx{Gl2>دx~>8S}Ϗ鬻q]_>;+q=ѱ׸_7){Ʒ2^3{p{3Q>5q8k{;k='֎p{?uiפn}t]E%_M_~н#8ܯ¹y8i3ùpo1iӟ];MQO5{?8?ױ8__'i/֓:Jp&m|Sxi=i=͋xM qߋsB{S/[/\'I|:#psFN&}߃xM6$postK7:v}=q 񚷮r5/hSOxM8|ҽ1^P}=׬nTY_3~_ss=>b5[i͇vcu8o3>s'^SZzwu#'\=5קk3_9zO_n+#^n{s}=8oǹxͬߖe6tēĿ_/=Dxe}lYvnxNGA{Ij|SޕxͣSg3Sx͢opyqy+f?8\¹| )k]|6;$z5+=}psx'^3+#^3¹ϧsk83瞞 ~z<k>ŏx_#^:{G}.篏}O85y]CHбkNjx=瞾 kg}ugU>|so;s~5Ib| 87ZNf?;>xn֝]WTux{ip {s=羯6Eŏc u%櫑"{Opjkv|<k>:y5JG'o&?=Wp${pp/s`{s>s]/P:ťWUwu+^]{_߻_zևN^_^^^tYhttY?p}pۥS/=98qYK*.v uMG_suĉ:t~u}s}ñ;spXo>9Օ>.ݸt ]Ftycw~ 9ҧΧJX_IgܷKl3}q:)]zùҭ:gtuKwmq:v~8]:åS8N:smpU7wٷu#%W+pyҡWܦGKҥ?\.tһGèލ Kҥ]onf]`̯z8ɧK'qt{rsx<n}ҧk:zW#s\yX=]t[tvqһ/`oKG:gxtsً=9cѥϐ.|CKo<.}tCGxәӥե3Хo|;RuN.}X?6ku8]ҭGK\ExNљs϶K}j?-9Yѭ덋 GuK/Sh?{]vsvٰ3}s38}_qKGts\+Kn]z:bޗ#A^\ҩ?K=LgKzѥ*]z-}.=OsYyɧ.yn:g.=wm<5:#qMgKoKo.wt+=~[OuNHou҉֥wkt(e=}ĸtz۹9ucutѥOS]z1Ιʋq}֥:cǥ:]q\<tq[wԥb=tog7]m=[g?.v:'μx~Jitޏy_Y/dt`9?#uuۥKY~_u_:g:g?:y6>vtҍӥ\'.=ܮΩ;ouoť?.qt҇0^ܴnjK7NOť6mǥOn:q]gZN~[?Yn\.tϧzsK7Nuqb8ʿ.WҭK/]:ӥw:׬yX.g^kťwoҥ늶K_Kևå.8u\ҍs.]8k\zݔ\\u`I|Riot룕\uU:sYo]>hq֋ե7:gљS ֥WߺqUӥuNޏK_:nHuzz|t_y_m|;.ݸA]:Q}Gkܿtҗ}ڟ狼 b;]=<қzztutu;̫6?.}X|n}8]tk;|P{tT9gKtꋸa]9/sJލťg!\Xkե[Vq:SNqݼ ~q}{хW"N<ҳ{n:\53pn}roktҭK'Wp~z:Nqq鎟t.]KϋK'ҋ~5뭏c}.].]Kuwy|yz.=t ]X:\7.Oޭ}_<wqKg~zfMttsѿv|8tӥ7]sqKK7o.};p9._ɫ.ݼt+tu>/ҍץ[P|utcN<܏[ǥ߾W0Zg:;\z<å0 ><~Eҳ֥O,t;'sWn&]i?^v qtn}[|:ӥO>.< ۥw3]'N.=]K^uҋ}\uv9q_\zQl>t_åKot8Jw=6KGztyo\|x>ԥO\>tP]oKťKyqq]ХgFNt麡kK֧_/-?n9q:8ݗ.  SkѝӟQޭ]p5I>..=]tkgu΍럴omݜB\n}L\m}ǥ{+Kg:/tty_ӥ .K7v99]z}wx_?yt3Oz.ݼrn!\y|ӥ;֥:d\z:jҮC'xuyKåwJ]_.=tDͳKҳm|ǥå.}޻M}>ҥg}'ItGKw|ǥ$t,qDqMN,Nt'ۼt K??>Cg _9_9SKKwof!\~WÕ[G6.YGuqsfvsOuc13_Ku_֥?.< g_m\kg|}i]ҝҧ?5}cLKuNW?wQ|d^5za Ǹtx#^3tJfٯgkxצ#g.ݼBۥ҇n2OM|S^ҍKn^7]2^~\mܿ/u._tĥmIK77<~W ]:ҧ/qe+-tM^q_~;צ;g|ԥgjpK+qopܷyw|%҇G7t.=z\;/|_.x1]ǟUkGdKNp||KE??.zߺG.O˼+6r}.YG謿bxMU_zykv_~/So<]:oN$6.ԩ#G<etSW^so?#[g}gn;k:vXc_N'[5l3|<9{Ef7o.ϗ.M|%]mtz9kԭ7iS\u~_+?tU_aRެo^2Ut}Y{z5$~;58$N??v-Ew:\ްCǥKOW;_xǥw{s] ]z:>"qփ.zNg.K~ҥo9ߙҙ[?&Nұ;~v& ~K=Y6K}3ǥҥܘ.S/=}ߤ=sFb(޷Cg5;d}6НGK>"n8W:󰬟Kz{һqOn\yKimG=]/=w]z^yBɓC΁NzҩW tsj\zw3ZGKwF>^zpvKǑ[0O]:qҭ/K/֗ebt[vzѝyp.T]볺tͻKo^q+_oڸt,b<՝љWuVݺ.ўåjzܭcץ?/å 9yK5WW\rͺҥΤzkOf=q]/]zY|?k:u+y"/MqqI>:y/׶?ߴlȣ6t뱙G>F-'ut5ҥߪKz<Ӹ~~o\^_/9zn^åuxl>z>#??.b\ze=\CK?.M['^踇ǥ?׌ҳ~z=[uy6~Kϼtҋ#]zK^tKtmMQ/fPNχ^ex͇6Mq]ץgLk8q_o-^xw|ա'^wOf~t#2^3s]tX[/qQo{=r~ze}vW]8񚗟/\zu-lyt>ѧK/'!vmևIt\_ҥS?p'.Y:tϯKo쬗./:sݴq&N^lONDn}M]љ/,3SGzͼ/z68X~2uO.xͨOKg?*]U~u_Y?=z问jlzGfѡ?^Yxp|zyӥKӥLyo9qu=>zܴM^['s+znxMt}z׵Oҥ㨗N=#ӥˑ/]z>{Z޼6.}~\Sӥ7ҧ#?~u:ҿu]tқ75K֥%^x~ ]z8<]Nzҗu]9\:8O?]9+tIuwt}]ugSg5\ҭWKw?>]yEtե?e\zxWׇq8wKT|y.}˼sIK>Nk%<tc~.:~ҽKYuK..}x[7ǥ]a0/8ypygz|=?.tGKo^x2M<C0?>e^5m}^ӥ{}z8׮3/Wu_G]'Eͳ+?rtmWׇ|xiלʩ;koxͬD|I<X/zNBѩtqu,}iӥbtK>._GX \җq|rZ҇p}Y_},]zNݺ:uWyk[taKϼI^^]EΙ/ :$1o_-t׽㣳^:Y/}qקK:}N+]+\}>ӥwK7]:uҥu;/xKԥ.ߪϥK.ͥS~D̿uxb8]^|3_o]hcpԏ6뜷qsUwezs~;a?8XLעKϼ=ts]n~Jqo>;8EzMw8ng'̿љ5J7m4ڬ+=N^|hxNYs"pD8҇vYO&ɭc_>Y/sE|g:3g۬s?|u:ȗ.ݼny9ӑ/I|h]HgN\{=v:m9upۥ>pY/ҭpk.K>.͵ӥKg?us6Y?yXg.utѥ~ĭSgsڦ~ЕӟEK/:o:'sG7Ktzs}t^grK߯:籎.>v3.bYs:':9u`GKsgޗӥq:nn?9G.z>up#9yz>'𨗎NޯXM~Kåt:0ys\t]/}/~^ò~:ҫuNe>C7fMaסǺQ/=#n3]e=\~usZop~\zK/۝3{t}nHt~8ήCQ~D68_Y/ҩiߪn}D_7N͛K.Wc=뉋t1HsƥzU>?\:yX,8x6=?w.yet]/~ӱ; w7ѥg}8t踹Kt⺋q6kO'&&Nfm[wN~keW .}֬^t.oSn9_ЙucK'&]zuΪ3'?~?\z'.䨗g>].>Sә}?Y?Bĺz;g|}tAgNl; '\zItt_{ӸG[WN\?3fGK=p_p˶y6tM]zƥ.=qv9".n@]ҍ3}x\uȹΙgscng‰7,st)ҳ>9.]|}oKg_4]z:naĥҍC֥KoEҭKoeKoĥ7қq'f\zӑwh\z֓ǥg=q1]:q u7"]2o2.լsþ2PNws6۬s>ro?.^?.^ۓ\>|qff|t9+!]UtהK8UXEKO=n)>uurȥ+Kp'S\urgʇ7[/pȥ[SOF}ir- .2^Qt9:t&w}g㫗^\T?|srg:}ʇKɥMʥw$\zѿK#E=b~ɡ{pɩ/6u:ppcs'gs\.7>-wNk8" \zr!r3Э;.oN~1_ʥO]/Ӈ҇\W/].=7>ۇ.3C#2==CWoNNx!\}+]˥79Ǻ~p6u:|">.\t1^'><}KO .^k_.N>[ \}09Rw+kx/+ mlk/ߞu:rwiSwgE?tG}S'^(rįɭO]O.rӺyrM7.= m.}|tH&|˥;~^z'[/ȝ+\/c,r|\n+}K'_\zU:E=k_s}\}.lp_9=g8So&~O~ʙ_g<&\P.rp˥g_>Q&u륧~:OF̯pFj/қoipwakæ0Ƨz'՗KG'.|ssMO.=9z[>ȥG.=ɥ͕[/_c5t9:t~S_å?\zå˫MpպWEX[>WɥKtwK|\\;\9 <қzʝWƟǩ;L]9tm.iW׿ʭSwxS/ն˅|vɱ̯s~S/.=9~ɥ/nOr?%?&9"lj{r^+Z^=?\\\\}7\#K?˥>ɥn 'zKt¥UCĥw9sSg+3rs~?\㭗빡~MX=rQ\zN.=ϷD|\&^xcd&ڷup$pɥ_r7 ltm9ϧUҋtInnb}S.>k#59տYO"N}ӣ=c.|\ͥqkxs5Wj~Q.=ij^3\Mt ?^Orћc\_J}fr|KOnzӷkW5͙ߑ}!ʥK8u.g>;_a'x,9rwxԩ׌qv9$8bKO" M}PҋY|cqs,wE<5LJKϘ\7qppצ3'˩#"9;"T96Y35o?lj^|nb~.Kg#[/=rƫrw˥R.}ȥ?mSY/nSqRn.^mZ9H=u\҃vf&zry¥[OzS{Щo^z[>\\Mj˥7l<>pM=zWzM=_S/=8ͥҋϿ.rk6lG*^"\zҧy郳֟WCwʥ'OzrrKgNN.}ig&לԓ$n\9߼SG(Yy#]G9zpk[?\:Vr ȥs!^\}}srU;'?z~HSǿrһz5O.}sp_K3ғkO.pɑ7WެU\߃K_5ƫ\-\kp|q_.]\P>ҋ}0y^>ܜ:z}t髟\}"҉!.~cw?G>'~zsENlu҇~Ƕ|˩_pM.}YW+\:}ŒKO=׼<\D.z?ҫϷeR9rn&!{x/n|>QY/淭^'\Kg\zi>.\\\oQ^pCnRw9sqMz͆p?tSQGpȡ/u9åi+^zUoZ꫷#_r襣_R K}uX_gW}[ :B1^K!G¥_^|M.>9rAK\:rž0rW.PS9'*ZBܫY)~  &_w{[W޴5cóuvChMn0&儢@599_rs}yaZ{_ygu}_p ^sp|7p+`m-7]+som\o4\y=>{V'8[rkh8}=uupQ56ʛ+eaۅ]:z5?9_yu{m??crw]Oᚗ]ew}\kpAq︞{^ˆc^yE|M[Wt~kz|xد~u9z^7ы=bq=5~ʛkmpk<=`_Erg>}SO0k-;r>rW[ys_\{kp{<羮=ÿ_{_Wpso=ʛc^o_y<.lkxz_z^7v ֮/1[Pÿ>xڠZ=}ZgG 둆1MoJ\ >?}}~#3ؿ_ѯrOܟ R}}3#8{ڟx_{pk>؃%~{˭?q_o:~c?/_y~¿ڗ%8ԃg':C85~+]ϿvGN?-855^?5kWxd g }==cWxt=>-+n[Kn_\ֶ^{~Zi?1'x+op3_yk>.}]ֿj_/cg<.q?c>}}<GZʵXqO׋Zzıpj{%|P񯱾 w򯯟cKg~֣_Ͽ9#^H~Mw<{ޟ||e=?{k)⹉=#:ɭX_Ds}}Ks_~~FMp~vW¿x j]Ip]νWN s$Z/z1E79rS8ջ νupso-mkc7v;~s:^1!?c=ȭ߬_[rܯ=#~Hn5}=:WarO[pWį\Dlp+pc?s_{p/s_| ~cU9. Z}Bs/85v%3S.F]Npkuc8E{/_z Zk }K-+8̇¹)>c>Fp~sO6f_Yo0~"8#[S'^_'\cp~hr |pc?o}?r\;k{kp }ͯ7v_zW }*3U8: W {Ox:8uW{8~kIp&?|y>+|({?x =opޟs%?oGԯ1ruipk}'3 x&^ }'g>=85_6lkqb~s=_{ w`|ֽa~|q|,|_C~~D~ҿ185_Չu981ނso8^Uܗ?O9 \m>N.w_6x2_Ϙ列}p?;9_g~ }NZMpg__k_p۟`65 3_ ~8{+6O{?FgGaUO; kcjͧ6<88kr+W~EE_Ы¿/z_唂ss}w['kUѿ?ƯM~:_n^y 9s_\{9*c~}®wrnkOqv;3 -v_asz8=)׾3Up}}w//6$vg==;¿/ } Ͽvܗ?_')r5Wrpv+?ƯMOo`%}os88 =z>8ޭ /6O]p;Dx }kįM.Q=_羮`ysapkwcs_VlWGw#丣`sp8uWn}ϘϪa }=uIR_9ܳ}e s_~8O8Bw68gso_{W/}%WxνWXO_s?ÿ}~9s[Fpk=*'?38O.^crw9|]?85 lIFp{r_ }ݿ=~59u"/~_ƿrfgp?pYˋB_zM_pY734}#z{s쿾#p=ppWk7`{g }W*6j_7=bs_xa[O"E<5[ۜ:>>5k };ܳ=7s= sK}N^pso[؅pîG<yrkaq9+8jx_KÞ}5 ;8s_]]W8j0=މv­Uuaz[{ù~5{8~j8^QًcCTwIԩ'"85<į^515^| Zlq5x=9^.;8_}Np[K9ر{לqzܳ=cܳ=3 1{x(ы֓[/4{{ùoP/yܗ?=8yYYO9_(7*G3 o_~gk{_\S~o_ыzTp^x^3x+\w5~^s֓GZ?|^_p¹'/(>~%: } ocw~'o8rW!8ewl-i{XϾY75sN8Gs(8{.l"^c|%?O/ ZyOuKooyN8ȕSwYtItvҍS \\zrܥݩ|U. s?\zw<) IyϓKu:pyrrrU_rM>CΤʵ}c!>7N,)s?yGrU}tt&N?!y}W\n&WyҭKN"K<¥Kw>K#i#*'f^ Zw\-w^O.ݼ79ttR:*x;3\}6\:y҇}ҋzݾ/¥9ܹ68LnxK|f{aQH.>Npɥ7\q+Mr#­ghvǙyN8k􂿄K+zuҍK'o\.us6lƋuM{rlϏi.\>\zKnaxNQgr} \9ϓyN6ptҳE.]BtLɥw!\urʥg]\uɥ\:vn.].#tp7^;f>krįQgnppٷI}9{~Ϭ Krօ˥i7>үȏtl`E|_I=vqns\ur ˥'\GLuKN];r9"S _x?ȩ3f_5fZpr]|>Yb*\ur].?_S]n~.8\:vtɥʥzwj~8'ov꫷\ҝҭ{K;cG/}'tfg"tߓK7NߣN3)'\zS .sg_.=& ?WǾ/Sg'~:b/qop-\_o_kKirZr~y׬ծGnl8*N>.#!7bKOO.tr鿴}ȥ pMwrKſ\zrr;fɥi_ CupV?2~}< G|[+2?ɥ\zrs(ҍһ'>U.8}^l]/o.=: x_\ \uB]LV="?'.]I.I۷å ontENzW:.w~Eu?3έ'y񯯶}"0Ӿ/]NRǤMz}tʥ>rԓߗKWo^.>rKk\:z޷}KE.\K_m_9sE\z/S=l>MܾO+8ȕ_ѷν_]ɡå^'rp&[ٚ}_SwX|5Mlt֧o)py|<rpOɥ\zvk`/tÙY1Sߞ\rk{憜98­ݣoSWտKO.'~ENp|opɩ#ԴSpה;I.&n>L.=cbÙ~~0>\__:+هN.=9å;_ʥ\zgK?׳!tm.=ӷ 59t9wqX3uFG(ͥM9s׎~lj_9~r\:ީ#4LJSzMGn. 6޵CνC9t+tu9tS3#\zrU=tkȡ5K[}u{GǍ_ppn.p|Kg|9%;" 0$}Y+"?'\zS]*䦣^һ|6ԗ/O.~tK)k\-Gn zo.ͩc&K$۾/6\ᬳ^ysrk]c>KSGwrK"ޗ;56\:ɥ:U.}s> Sٶ;ɥN.}8޺zɥgfS'.[rNN!4w˝SG:yG\z/yK䬣^39NVrzُ3g>K}vxvrW%.dK'~Kw)~ur'&~pį:M1"W~}KÕK\9v\:\zU<<^ANpL_tߗyå9[$6Y\:kc;C͕KKoe뫫!g~KWX.z?v[3%W~G 1ҋwrå'7R<^Y7~MKKrݼ{w}xқ7_:s>rL?Og͡MS?X&H.}ȝ˥˙?'/N^6ʉ^9ZҝOһ|rS$#p93^>8y"wݬ>:rs[wq<~#Ot;p]n]o_ edM?åWǴ?mt-KW\i߁\C}D9s=!p3o!7n}>3}V.=gj"חzSn+cq3o9t3>"}KC=^zC/}ا`o3ɥ?څ:u9?Z"ȁTK./yNae,\o9y<\gˡ_cYҳFt9ͥGѕy"WξMr}^l=rp驏lj;z|9zꥫWt~or].}ȭ쿉z~:rKoC/Ŧ!Ϸ߷m 7Ito3_NY.r6\^L~'z t(K7ugp['NC/=}>twһz[G9wȥ7޷;QSOe='N+8:<\eȡ$n]ם7K_.\raޙ^-w'2Ko[>:ξ'9I.ʉ7\zʥK.=PǀKo ՛p}`gO.wN|"zҳN .=-tͥW/>rMV~wpu>ʥʥOKkotK8\Ե˥[Ǩ^unrr[/x59v?SOtKor}K[\zrp&M}һKpU9멗n]\jK KrI.}-.G^\돃KkK3OtT?>ㅓKcS/tɥO.`M^zK{p70r#b̩@;ÿn"opr艿Z:"׮^ɥ|!. >娫kꥻ\:ϳrɥW9t+}rå [$^|ݺC8ғ[P>\0~ ->:u׋^z}gK-H.=9u0Wr"R?]iåͭӯ;ǿKw<ȥ˹ɥ]ʥ?KO;UK:ʟ/~{[S߽XC/zKO>Vɡz\==c=B.]bsp[/}`c?t7\z=K:u:SN}C/=n>n\z·n_ қ}mG>ѯ|3rEr+[rKP/l~rֱ$\zrUZҝ^:}{sss}KnR.??t׾ׇKO..}xp]..=3K8c}$>'oƯɝ_Lm.m}u'6/K9!R/]!\p^:uɥoKw8L暜9zsExAK֩r pw}yԓnsl[NN>\\\zO.vI®Ưz˭Q׬;{>̏9kS'g^zå r#rE}~8MuIҶՇKtr3}ɥK\zQ{kʩp76}_K/\z/S66up-N}8tn9tM>V^Y9 uveK$tNɥ9$Yz>|t3tp\:kqёKI.],>ԣoCF.>vK,:Kɕq?^w5ٟ=һ:}Ы٨?Zjҫ4\r{2~}sBzkʝڰK/o9\\}_-5__J}; ȥ:zߕ8%rY759t5?\!sɝS?~|;jk9ܟK7 ކ#u}zܿ-/qKwU.=orå>ɥ`P\OSY]æOpS}ҫǭ'pE}r5vK/Q֓ +ގKF.}e'g nS.]=_W=u9B['?|}9v\}JR/}I}txv6x>$^>:kwr|4WC/9t׸K?\#W}ߕ9}˥xkF_SG(\}3zK!8| >䲳^s~It8׾੟Y:$KWG*Zk6lkCǿNψgMSG;6ɱ݈\.[.='åg89s#{ꥧ>zϋǩ'^s<å0q/Oxҍw/n,t^z;]r˿&^Y~y^#|G/ 7WN= {kr~1?y1^^_.懧:'nlrď?.}KY>y?\zp-rrre_'Z;[Om&+\ 6y)wN>\zK٩\xry-tFr76__m֩I~åO9s9v򜗶g|_}DmKw^O.Srÿ檜s^zѦ}=9~9:u:n 6 z6u:_m#B?._ԏr&ΗK~~Enʩ}t}^p&~ o.]>%wxK\:7S<}rmzɥs焃~SN}SoampKO\\/\zih-#é.;:~/ntu[wmn NM#\z/NyrKos_a;uKoYtu_?O3޿\g͡ͺ>tKO9ɭ˥{ܺqeɥW.׏>/63<%Wn:yѳS_]z=Kr\KW.A.ݺ{t9t뮶^pcKOrt9Jt(!޷>:}n9tqzɥ'gom3KrVO.=ʷ\ur~xkS'o+S.\zM=kbsy9KoϗKw\_.}K/k?#}ɥ?zI$nҹ79BDnCo }|pQNG/aWˡP/y7]8Щ.]-t\u~—:Xȥ''>8stcz>\z?tɥQ@.=GpY7rrY$ȩSwXΉ_3ǿV/>&pgҳY.U.e=&nͥ9uKGȡC=ݎoꥧ~k!{'wK/S/>r#tW~{u9^oٷcӰGoo-wNGҳY.37װ3Ng`'jzɥ".'%^\zݜ9ҿK/8~'^\z7rMN=07m;&? XwXЯ+z>\zU.r3ur9l.=~䗒KrU&^:[&׎[IG.e_p9tÙ{s1>髝\\rʥ~KO}]\wrkv%hr]oirr9rwt'n߶ͥ='޵k?6fpg\oDҧ;,ȧ'N>M.}s̩\ ȵ{ʉ_z\zG_SG|)n_vrur<&7WL6襣ލ_ݾ}"=x!zr9K.qw0?x?^6NxS>7uЛrW8K:ԉbիW/]_.=O~K\T_ѿ&w^#^.=ͥ'gN|ԩ'WDs)Zw:K;Ko \zoursȥgٚ+u[3ɥ'ښ}"W;D>(.$n]O!7azrN9"n۰3ܯsc:t~e>N{PٍMl;c|}sw9t+_9vI^KWK8?gO[/Ү\z{—y?ȭå?ǿu_t8דz7[3.N^w+tcȏv98}:6Y[WU.od~X}jKO.> Snnjjʡ_̙ϊ6$q'Kz]o.]Nt[/8?5Co:]å'G:BSک~ɡ۷G3)wn]-ٰDž\zgx˥mߗM_5Wɵc0tQp9uW85]¥Gz͇+tqp7|t rM=*.=9yPCtR/yK/b=m[_zp#tɝ^.=_pޘYukǩ')r7}:6{O.}ʝ?å|k6SGhȡ_ҫܺ:~5˫^zG/VC;yPg_6\z~Z񯯜‡K/^\z/r5E\:><å?*NC/:[N/p9ܹ҇:B[-g"g5;+9v".up6}"l.z>}ժ:ɩF.Kr.mx*gN|k/\zrW^49[ ˩<Τȕe˥'ttתSS+n$C .jiT~%6v9[$g˩/N".!.7^9zpߏz:}69vㅊ6:B.x0tjɥ[?!.|Ρ>[Z{7gfpI!8яO!~ה+'~Pg/^?^_gߗK!SO=Zza9\4?|˩Q.=g,^\:xrS|\r_r˥rǷY]rk69t_:<KC/ܿ?}zpz&w^xᾋcϮהCD?>uR?QO\:oR_S<;">ՏKO=f:>\zuʝ?K/r>6kWty/χ꥓?KoN߀i= wNN,K>tQɥ_u_wurqv9ȥ9[ zKOn5N~8{rwr/=t_sIKb=+w~16}ɥ"ǝ\z7Kkr^s\漣^{zOrWmWå$=ts-/8{ߣ^> rYO.KM9s)^^zkx_.]uҝ7z|0W}/<^F_aFKz^3|k~^xok.Kw$F\}:ҳқ6G\z`w?.?UY־*ߟ{<+Mۿ?7cC$D?______Ͽvsc#ɹסqpAu^r+o;zT&8!iJpur+oZ:Zs}_y:g w>\ʛ^ʛ!g}rY6ʛ>ǽ;_\q=r?[?oM<8:R|Xwc?,g\?~g_rO]Ͼۻpg98:k]o|oEzoʛ1cNXyӟq?WKùZ"]_ǫ|z7m .28xr8߫r3g ^b%^7?r3W[/x_olWt+o>_x꽯zq$~{П_yu?}}緲8oD{1~sܿ٠wE_5p~v.1sk+og^P}M }#3xz|o 'v 0k\])g ]lxO?r=%泾+_87]cܗ?kںQWtbx?9M+_ճ w=p%^+_yDŽsO:)?n+ֱ{<_l1s>z Pνvrkῂs_vc>{{*?b>k[o/ẃs_c_33tsc'`Mn垯k7zِٗ{Y6'?|گoSy+885uW z0M:~s_c/_Os_`wlϟO{<^c+r?:R/wտF_(x?ïzRo}vc\y5{|c=o={Gn| }]_|~|6ԣ_}Fgɩ?b>Oput9uWd?mjw~7Mƃz-k]{g{ 뻂zoWG gHp?. Z#^_:rOW :/v+_c<695m팿s_Qn }4sXp7;??\r˿#8{Gpۿ܏7¿w$8'v X's_][Oܞ?9l zlا==8\%l5צӺ . o{+Ws_.}|=>|b_<8u}W_џZre# xc2?Z? l3֩׎=qNqㅃs_ݎ]~1G1Yqg f0?8/{ǿ^{矛;98>ɵ+W8;gps߿78=y=Op+C¹i{yipkP<;+ܗj3W/ù)?zz{gr28n{Dʵ_wroSGgY~Y;agn ,}q{3__7_/8|{ÿZwrטOs_gܿWN =pkqcs3K587pȟ+?:Kmzc~G}}Ͽ#9^s_8"~U/,8o/g(zrW]|ַMGW>q`/s{psCp¹Jpc'~E}|&{`}3s_M8=yWߎc~+߷Uݩ^%#ۗ%8u??}'ʩ__z؟oW._9y88߂s?F}uϏC.~ ~KK-hp{<Ɵ;]'6]_Sofɭ_c'ś>8/s??6ʵ3 ǿ_1%}E~X,8=b=7}?W~W>߱{sܗ=8=~s08'8wos 8"_c<#ޏgԉ_~_Ůa'/s885߾į;~{pz&8)G^o3 gr+^p }cpk={puw&^fWn~}׾¿'8oʽOJ䕓/9x^Y\ms k>ks}pyxk }͇6M5W"'O{M(8/s+8&kGs_7+uvp?ȵ_c}s_|\rW1'}q,ާO#k«J>νs_g`x_ѫg} :<p;w}_s_rԓ_CGQp p;{S}u^¿^ `&1|$~"8̗ùnG}\ֿv7S~;{rQy]qzGs\;88n~8(_77sz"85}ܳ^=p:6:/85? g38g'%rW_ %8s_h9yWN Np[o1?^|_ Z._t`w~*8xoo ~~qpSg=s_?Eע|Vpk7\{$>8C9vq_|Y vWs18=}6]xע=y5osAfrkigE}]羞.ȧ{ pߌbvkV]5Kw=Ư7v5%rW :5xk?oחUn]Zz*>P35{_>ݾ֓M }5U~O7~}gSϩL=z5Of^7]34?L糡M~5?<>=^W$Nf[.5rkpLm}2+~Q!D~.zͱ9v3_ pkuCmkp'Bpkv9WMp~ù'o8/pY .,8\!u5#ϗz<&rpk|b/8s}gpkaGk_<$:$!z ڞ:Q }3>s_Qط{=¹!wSzkrɏ˧^39rJNUک'Ӧ^7_}r7s_a_=ܓgs_Lr3ߔk_O@3{xpį[Q _cُg|}׬=#WzcE_a= >S~_kdxùg{P|䌩rY9ԝˑsܳ }o羮cCTlzpY羟wp?z_ݎx}'9uy9v#pk9׌Pkǿ^pQi>os3އszS8u\|\_ovs$oxpυs믲5=~WSbw[}gJkswRY>ߴs__Nub&O{¹'sok&tpkMq}J=;Gx!z͘_s^08WR_Q W~:ub.#^tѷH.].ݼ\'ϩ>ҹɥ'~y|]M~V}Dͺ8{P/[RO}"w]Rɹn.\AU>0v$ș\\prE~XQ/>K.fʙå;}p.=+t|ɥ;?ȥ{r׎rp>ɥgA.].rsׇKF.ݺҧ_L.m>/rs҇\\zׁr\9Ip7.=^ɥzaG߱f.}z>pg<'.r@9%g޳ƍ>b|_p$>M0qɥo<}Xҧ< I8/kS!|w |&n'opهK.m}vm\z:ukl{'؇(n_0WbؗKOnA.]"[Kw[.]nD.| v;3g˙7 1ҳ/O.=9u7B.x}x?/ҳ/+%rgUN\o~=ۇKS3{s˥r͗uȱÙ.n.ݼ\Y\uW9Nԡtuٺ{9~Ι_.~aUK׉/]Y>ͳtsUרKN:4\k~.]Kw?ps}p\țK[N\zVK.,q:5\[;ǥ|K1usSK͏åN_C]ߏ~q=u67cř~t_uq,7_.}vr ǎK?;?jn:Ne|wکyv{\SoX׿>.ݺ_]I].=yqc }ތY/$+׺}\zy~j\73:K'uús K~izp>OK7 ׶\z׭^xϚp߇q?t&uӥ|quSǥߎBx\q Yғ=_u8]z\znNg>q駮JcO]yKOKig3\}ҭץ;ғX|uX qƥ{\z].^L~i]ĥkkr8^\S.%*.׾.ttc9twݺƥKޯғ~8}K\\:}=Oz~vu˥'\k]}t.w~y>̯/gKΩ8wK.X`<ѡSQ׺touaѥ>Tt}uOڙ_/.1}ӥ7.g.tzq8rKyO:[q}_z`_]9uu:u?\ԩS{z]8uׯ ˥OҽCť_ű߸0_>zMG?o ?ogpI8K]-o\]>g.}sӗ`W.ݾEt#t^_tϗ.ݾQuLpҽХ߆K.ݾ0t>tԥSgQ~>9Uw>;:vɞuc_ү5ݏ_ҧ\n]_\e-^qyyҭK\z9׿>=ȕKwF>͛_/a>t.On6]w]yͥ7֓L?׳ǾOt \$2.繺t/틴tݹqyD\}#u:;\zpqpܒtҽ?֥{KKݼ|[g2=9oi]tNnV]tq7]n_8\KO7}å4]}hK/Od#T]}!q篸kG3qqԓ:߸CWpŽ28ݏk׌;g>k}~\M]mKqݟoZԭt~嚛3țu1$uKNnG]zʫ^:z]LvwpYϋKO]A֓t?}gCғ>/XשеүKǥKcK^>tK.>f3qk^:t֯kLf.^{uͧWO.g?h2_9^OqӜEĥХ\Z5OMP79Y/ҧ8H\cg:u1]K7Qn.ё^֩Kwp˥ǵL}^>OgN=I7%7u_}}kC~u'zͩ3C'^S\֕!pȩg?89zM]r=ǯrt/tc\:yӗ=p_ҧg].0qK\k't]3.]O׬nsy[];u+%d|}%tyk:nyOpkΓ.XcX}nwݿ]ozw>^ԩ WzMOX%Ok .^ǩo[^ԩz_>NKO<K[_6tO=D^~\.եK.z{/?.\٩+.}r~ĥvrN93'w/u۷/..ݿ.}Wwupү/{{\4V>̛ng#_N2/}Oޜ.}noƬÒp_w2vSw} ?iΫ;vKV\nrt\s9ͫ?.KG~nq'/|E]˥O+M];KV's\kss=Wץ\:ct֛f_`LqypnrVK7Sn~.5Oѡ܆|}еۿ>c=tϝ}NxoV~yr.A: .}{0\i0gx爷_>gK.=˼ǥFN]ƥSG1yHq/@\;gs֩;އC}O93Г~֭;'/s{6^}κ~ҩ;ǥ'w\z楳[>J\z_/u:}k>֥k^} K?-˺PKߚ#yqSl|å}>fg[]kc?ay үKסf_]ܵ~ީ3´]l5HMޯi>n77ɷn'/}ϸtK';.}x;߾7?\uqѥ]Z77qڧ.Ͻ>yssz}|Kw_(.l%o<.c0>yá/@\zAy_pӺҽ{z/썱}6 }·1mp:XssP}$foơ܆]iѥ۷D>{?___O\Wѩzʭם~{zcWwN~|K_ӥ}S1uUK}tg]k&/|!\4_eѩ>%]ǥǝǥ~q&*n-/1q#}NƬ_qSGsG6/\z$u1cҥ/.ҵǥ?~^ ]zҭSԥϿ\u]w20ut~:K7'uô.;5?y۸tV^zgꌩcɛ| ]x|n]X>g=uAu~yuG܌Y/<:οq\=+y:5/ݾ.{rqW\:yӷ7s;O_6oXwH~c]KO>]u=\'c Owz1͡C73ԕg=߈K˥Wݷ..=5\W܏u_/:uiwpK'?o]ui~_ط㱏ynҳӥ~!^c_izgs_Qn~..>.:t .g|tƥHݿnμt@ׯuut>s]yӺtu^u:t8Ù:tԙ_o8?.eteߗSN~tG>.=yu=[ycwny}p'w˥Y.>quKYN]i\ryyǥ;K\:}O.KO]NI.]}K)5o)φμoKuGb7_n[ò>Sw|ouѥgHn?.ݼG՗zAJɘCW^Ř8~yp߮_Ϝz?ׯNKw~NS.U\z|lyKOpugo#gԩKסp۷Tݬ'5wǥѡ5n:uXᆏO=I\}K?s1fzʙ_q֥K:U]ƥO߿aߗGw~nϯ7^.r|K?O?{K'/w3қy'UKy_N{w9q_ť?y鸸ͥO>ߺtgV^zwl.Q]:qqo^}/W^zoүq3ݥ1Kn߈_ _O&y}_ѥ{ХogJsgqq̸tݓ.qKOK]:uqy?+kki.q鏿o|s>^:{>Q7׏Ko~M?ox_ׯ>Oׯӱ녋}z]NGN}HKL&~%_^9wػqSgz9yWOf}Uy1f~M~:+:yn}RO]Op\σV^:<>[y2^sucs_U:cljK...J~->Y׫>tK˝~%\eDwkG^cK?v'~U9]}mpӭKw?N~U0q[9}6p≯k~^ytͥ?srғ?zM|r>tsh5k:[str}3&Go$@g~0_܌K׭8-~_a:s֯ǩ'9?.ݼԖzWWN䡟sեputOBc]O5us~sͧ~SI^}5}޵5}s!ˑү5eK:oݥ/.>_u3~\?ד#qtߺ#KML&qĥ>uq8>]g~泻V:1q5_lyQs\K?.Y̯q_篸+}tw]}|-M:צSg~?֓ݥG0yW\zwLf/+/WA9c9tq3rzKw.֭'.ԓHP]F5OoN>VoY/\z9u֯9Ϻ?\ 2qrǥ>0??.}'/G8޸#&<.8}7/K}Ekg`L?xO;ݺsSסs67c`˭g򺓗._҇ooƭ{R?꿩3g_x9O_һ_+gK KF\^+c^O^zW\˩!773?~a[^'*R~{M*OQ}]\Nvt򤓗><<>tu.ݾ˥:WOk\C}orMlt9n~޿i\9u:NMrթS׏3Gg|sKq1||PÓ}Nqϩ\.yc"S?OǥGK9F\O^tlk'աқ-.:-/Օ>밮cg_)I_ ?.g9编CoԕΩ ~9ǥ?Y}ƥ׿g^:Bgҧ}!KtХWMMһ}NLs_G}qSSqԟZMǺ9:ץy'/=)Grq&'/n9sD]M}}\:_9ou:suأ39bwL!}Nĥtkߏ}NԆ_z K?>.}xL ̯y].>037kť[tq9ޭup爏c#▓:f~}t>_3fo\zҭ3f_.OB~ߺ9ogթ;[g_ѕSO_Ǽu\͹n.}z}Nĥ }Nƥ{=u:8<gytGƥЯc]vjUwӲω{ƥK?ͻNͪ{0/ZΜ/ߒn].eOQwUK4?{(.=ys#ǸZ9ݥǥm:j]+/}|\n >_]{s[KN>yKr_?2v~=Sg}Sw5֩[1v+9\GK:Ɩicw;X.:{9ouK˩s=}N\yҭKեV^:K\:K:$]}pq7tؓ>3v:}k=uC~ǥ(y'u?٨sZyucc2ο҇?.>q鷯ǥwK~Ut<'oM]?>V]gL^u^>Ӻ8;7K6󙒷^ե:f\+/z{wx\z8<\0{}uS7?g@\zթy}ysodzwO^.}_CWNk޿> /N]gq;'¼tuquǥt:`]0.e_]ucNX]z_\u[^q^uz}\;>;ݥm,u/gқN}\zO\9+]}mKOKonݐ.=ymtuqfe\üt$ĥOu:c]zq}y8^g+].>[_.}ѝSwkӡs=_Kn֥{>ғ7K=K7^Kr\tĥ{?l^u׺guXSқ߼tӖK{tڇ}}u_MǬ_357c#k~SK﾿#O^ԭӷ7tݤy:].~?ίu<2.˥ͼt{߯:sp˥f>Lޜǹ_k~φ.0S_K:䥗2/Y\t_έq&qyZy<.]7K>V8:ZK1,3⢯̯K_eҽ?.߻tǥ߾+}D&:؏K70yף#sgrLK~|t3ftrOfz!~_\}6K/..g?`L_K{t8λugR˥}|ׯ[^㋺u:9u=\y釮;]rq㚇CKO-̯[g8y3)ǾK:Sgny鮯K (\:.W9u|uqdL>uסz^8tݥXy霏qy2/ϋ.ݼr]z\<os:cKo^ϚN|?5/=3\z'a] KЙt}urϕNtDnNWn`L]?C77u%o?OҝSw8^K.]\3vԥu'#K=Nq|K>tO҇~9ynݼt~\7.Mf,yuޯ:t[y}p0?;9B'w^+/|䥓~XOsc 8Sg;'OSY.=N>ձ_㴯-}>Х^_z^::>ǥߣ^qM^3 !Ǿ5?q闯g?ҝ?tIG^rw 9BKO5֥?+_zkRy֥_:kOݜ_ٟ/Wrl0$/Øz+^~_]5#D:y.ݺ-/ѡz͓1:?tեZtKO.U] KK㿜_]yc3P雥K~).=ݯ¥gUKo\p:tҳ>1/3pӭ?~.=U.!q<ץGL>/yr֝_}=Ϸ+^|t#9pȧk9K]3ԥo_O_r ;oi']y+/ԱSOR.F~OGg3:u]ǥOqW\Cw?NNf3o9u}'GԽЩ'<\\4!/jW^z%]zԥj^?>~u|~I91oO\ǥtқ#}"7ݹyNc0KoGky:tU+'K}Ww&ou21~Ӈ\懟t]!_;ϻpkK7W}SY#qkЏ:8{\׳~Kӥ'?<9BoϏKg y^]z31'p.65f~%MN_5.>t?OarNw\::yޖz?G[s=ӡ{2z6utp\ztg5ɳN^ҥSopwH~OWSy0y]}!8t~r_mե'D~Nsoǩ[+.KquKn.vx}\ԙS9ĥO?>yzһQ.]cױ3];}K+zi\Coz,.}'_>Vz_OO4t:8o)papt_ӿ}Sgw_/퟿__ÿ>ܫz9kߴMkߴ'߼Mu2::RkߴM]t6 ν7m׹<7m{[y%/M{[giUۭ}uoڹ.u:ϼkߴ%OM8n(ν/sCԹ; ;oA6t-otJ8~|{cAޛM[kߴso{}Ӧ;ŹwEq]yޯ8ο䩟uݹWm־i5Źs}y׾io:7m'ܻ.^z6'νSso'[}6Tqmhp:վi׾iK>v?üR{my훶-?W%νq:N~ν_+_|4Ot8><jߴq}Ӝ86=_j4=;ν%M8FR{>ݱ׳zkߴ?soW5?\z^<οs:L{?V0?My8f{{>?[-M{3p~ʼn׾ip>I8\pu;.8kߴW[s_߯MSƹ7]/ν}n~7νsoCg^m־io νnq},us=}ӟaW?|{3 Csms=Kj {νM7mc}f>4νm}8}ޗu;t?k'8s+{O ν7u\t3Xw~:7m+}v|uצk_{e׾iK^x;f~=׾ic_Smvr}Ĺwܛ}p-yo#й[z?jߴtХ׾i{oZo:lY7Yףkycgpν7ަ} jߴٷ nϷd|q}W۹\{ޝ{νRqν.pܻ p-pZu8R̃~ܻy8?kߴ[soy_zsYGq5㷮87mֽܻSso} jߴY׎s{g~v֯חsoܛCù{׹7pt]Wso[*ԾiNެŹ\{>oU&u8ƹwν,p-yoù{?soS]⨻qUgsotn~EކyۇqCӯ}Ӽ8v-.y7k4#:9͘uν :ν7˹:qq6_}t{Źscx֯q׳䡗so:){7҇zVGνYׅso:{ש[~98wt-n^2~{^swsw}sޛ;zk;qޏey9Pso<ֹ7 Թ?kOx9>{7o¹7yq?ܛy48~sw}soqܛ}Ipm~[ν% Usw}so {_/7պA{{ν\qMs{W8wkuMDžsoAXg~%y=t_~1˹?s?8wu@{>8v˹W9ԟ8޺p~%߽{νs 3:?й7q5so֙ܛν:r{kܟ][ׯ.~>u<͋νѧMνDq/C9so¹g¹ߧsoqy{QssosߺYswP>ߝ{;Ĺ7͟<_p>ӹpZo>qй~羾v[~v{:rùv~7{Ww_˹wS8wt>ֹ7νMνA9w8n/=q{q6?~m=й_sw?G˹7]ν}sr<}</8wܛ}pU^'ν]Źuޟs}O _so&{'{/B{pYƹ[Osom<8Wsu~so/+=}{w{3rOy7ܭѹ_?osr]ν鋭syj:n=νӧ_6= ={{p?zy%Sg~쫅so:W{_岇׺_¹g ίsoG{q?ù[so ^}si{~ν7.]s}z3vޞY^zS> 3~rs}soù8rmsߓwqƿso{}7׺۱\A:6/ͫŹwJuUs{{:rY_ܻ}p<ס{g?ܳsyνM$8rssso8 {﬏p_68p4o{Oq5{K}96<{cùE{oeOzq|>{soq;9:܏۳SOR_s>Jo{νaSyx_kg_P޼Ĺg{uUS?;_q>786;pYT{p_ù7s?s{#KpK:qͼh{{nuP'{pۖ~|_6tW~=8Z7];;vM8v/^{}s~Wq1S|sz죊s~};d=܏#>t5pӵ[=={Oz {[yoIjwܭչg3#{pYoܭչ؟z^8t=S~1z*: Źc5_W]S׿¡^֏ZߗKm,s|ѹ7[5}s_?OkչC%5uOpWwsg~ֽoݹ>s}¹?ίqMso߿5y8wu-}g֯Qkscg?.c5ɛ\~SI1$swZ_'ק^Ե?q~=t윏s{;:^ǹCWzOƹg~ùsy]syν=ަ7|vνsR쫁so[g~M=V{>_8ˀsw_싁soqU۩so'{:a~ǩ顇ׯM;痟~?=g5qלҹ75:w}νNNν_so)¹soN5n#:c֯q̯?ϭ>GMչǿtPHfs|^_ί:p\e:wu۵yƹ[os+9:so׹:7sLޮsyνѧW?pܽ^Տ>_ݥ*p?ҽԥ)һyo˥>yx9f֡Z]c7җK>;$ŝS׏[K|3g?u>guιE]c"ps|o:X8&\q:e]|Y.119;t͑>mw9ovy犷6q鮳kiް.>RtuntmucK?pt-_o1!ez"OK?.L<~վ SvKߏKMsuKn!.ϋ.}ڧ:fͳy0s[.}|\m_ilw|z0vSwK/[֥Ok}t/gRn9Х_"Nai2fvZnқNխOCyxЩ>uqsN]rq_s_?۱˹1.=k|Wĥc[}> .ĥե_}_μUK~l'./@sCw}N>ϩCեaZ[׋һ:등0y}3gvygg33_sD7=ǥn ]}Wt7.=.Rq<Kқ}Q63g;f~\eu?07S+ƥOo]}t+}nĉK;gp֟./.Gn]˥:uq?:}pݺ\z7?F~_t\CN>'8nNқ˥ǵS>_q؇xutk&u'7{?s:빻\/]|[qAߕ~Pw;C.7u?K~0.]ǡK7FҥB.=n=_sDqS1ίc0=<Y}9u'͓ťn7r̯|d}b]ׇ1uc9ώKqsSg3:utpKy?qθt.=Cn.ywgw#N;\zK:e\+> p~:ױqc`K.aN^C\:u~o>rΩt_.᦯_oo^KK7M~~ғ>xNt>tcťqf].n_A]z#.=u˥ϏK7X?3;Icq[tҗ. ֥71.|G\z>EqwKKwcsNT]zs֥AǥǩyF`c֯CwtGҭcץ~K˩ǥ$qq't7멃qKirAҝt?.<]]˥K.~O׶q{9w/ѥ\9:ul$?ktֿn]+tO]nե{?Kw>.WKХtХ~[xܺ×q]_]z'#.q{[c:_us9u"jǥގKEҗǥuw.қF.OХ?˭SwX˥ǩ>:ql[s7\/ K7^̯^N9ڞ7i~r}:|]:}KJ~/K7R3.=}p>?KOxt t~ҭ+ԥ~.wNΡ;??\#\z8V]yt}n>&.縮U=?}0#<DZ~ĥ}ѝp}q燐?.]ǤKw}Kbs1_Uθ\z_˥_שwdכ1uХMn ]qsK.YNgN]qvt?/V:[.ҩ"p׸t }^Kom.=ϫs>on1> } ʥ[KnO~7}tW]z?ƥ(.6/~kcSg~n^.2޺tcp9t}ҥ_i\z1.> ݏ¥>r=˥ϏK7?UnU]}=KǑqtK?U#|tnCtt>ԥO-]}Kϛ.o:ur7_.<7c$|K׉қ}Iq鹾n.ѩSO|vrp3u:ϻk~.]|ҏn=^3Mf_VKoy#D>x\וǪ}\z>8:ة[\,nؾ/x;gԙ_y?}p=܏#=.'8qץ^ե;}"Owݯ8k}^uC'W|'}:crҗIL\zq-}K8q.?9BcݜկrL_5qҷt0sgGn_\z&GȿL髆kzMSt֝ҭψK,.},gީ錙_~T.Wtߺtt+.=.=OK.-ˏK}rй$wLK̛OfƸ'bL= yϪ%ߚVzͩ3w?Ng~nst֥曧^s}2^8oԙtz\:y\;Wcơ;gt}w˥<&.b\:qޯ}>֥{=ӥN=.~2>6~i\:q\:Oqs.7}%>.Yv9U_zͮ+gZ.֙'G曑zͦSg~ׯ"?zM+˥NK?K~Ͽ\-u7N^tݮy闎98s※y|&9u~:CN.;yӿ$y輯pGKtĹ 'T~>0WgN!|1L֍[uqۺgwKjKtGL:/t/WwY'N:wwq;myc[zpn^ŜOߤ)yq9^.2 ?ҧg~%<.V}΃1}6n:;w>'uH[^:n9;EҟY͏Kߜ9mp鿉3Yus~:W_\:NtO'Bץ˙yoq9ǟZǞt\0o8ǥL\ғ}YKw}a^:ãKngn>=Nkoҿ.|{Kn..=.~8鋀K ~ԩI>ԵZW]yaSk^z7}Nq#ť[KYnIKMi?ᶻҽK$o{W M^zׯN~Nk[˾/]w߲yqsUw\zɷ};Nӭfw>'KO^u~\ -.}cU͑7^}=k}nn^.|ɏg]O҇y3g:ҫNm㦻}6RХjV^:nHؾ/o֫u䫳~re^uBίz\gBn]yG֯n>KoY/:ԥj^z7c9b8W ҭץ_߼v?[.ޞ>{ݏutK^q"ǥϏ:cl܎ouq}ҽO^NHn.s=ӥ?Kχ.=no)yهqǧnW~܆ީKť|n-F~rҥwy:?]z\:t[Ƹt6.Kc9t;S.xn_ \ύuW]u+/˥ǥ~_\u>zNC}?ust~]zuSZ.]N~%O{=ysu"<ЙsO:y8GUW{?u&qKtq͗ե?KS]uί<׃1u:=zcOyac[~[Й~?.=?y|gڇ\/]>yl#ϻvs?r36/1]Ki_\:_y鮇K'/ D:ܼ8|KyЩzᛗysgғ~׳C_xs6]ue^z7tǥ&.қz\t'KϏK֙_~V>3'[\q|m\7Ìݹ_:}_^?]øt+t>a~'r~'I~]a\C_/'c\N!&oαϻ^S~.͕.=}tq-y8~=}_ץKtK.s\tZ.=߯LK֥ӥt\E\rgԡ[?_]f^Wm֩'ίWf?|ҭK˩Ssǥ;?GHvlf}}tt.aKO޹.e&u)sk>:.U~^ԥ=\ϿK߃˾{\qP7$kK_O\zunCts\}ukțy̯tťӷ!yytGgn=oS\:ͥsѥ?O c\:n!.U]P~.zM#z[~[2]_>"/8kƵY:^ҩ8]9ϻK!S7_aL_֟q6\zK7^x_N^:t~}txtԓġ^(Kr=}s=_~WݽukOfӭ3&zq0fx:ĥ*.ݼh\}#>C L+_OSץۧg^y=9BWC.'>y>̎K?Ϫ/)׉K<ť˩3:Θk9_]8tԓ$?]7>t}n\ոtsEқ?yNݼt}u̯uѥ;,N>w͑wݬ'qݥ'?Vn#9BqMwׯñ}6jzCwnw~+OzKn=ISu;>4/=nzMtl.>7Ň}ժO.6zk塳~m:ur^[cꇿy^Kw7/}snxgǎK?=طO\4.]}/>C|\ɺt].铗~TW^:K?wnP\:9qqUOrKNǥ:kƕ/ť~頻}Kt7tԷ'/~InyrpЧ$So^cg<ݥ71.Zy>թS9|=}^ݹ}_>.=C$\1W_:y?ǥ?=}nKO|5ݹ.kƳJ˸C8t0'?}^BeʟٿKg'G6=yCgK:urK~p^NX譮owspK?uk<5.zFrNxzKO:sѥWH~ʩ|t$uĥwrS6^O]͘|q?ǥ>uKn/ӡS1ts]\:үyxq#ߞt꿩3ԥSKUgs_NJCĥy]C6=׺1.]j^NC>+_wަoˁn~.~3K'_NDә.:uǥy'9"^o~]:mt>O^yW9\.=yb0U系t^^yԙ3O$?|rww9r9~訏SNݺ'/ tyyx?.Wǝ:qt:tO=KK\\N7W>ЙqKߏKO>ҹKc7kO^zK':.>tKե:|tmwt%/{qr>{ߢ[=:{s:qq&<>j~98ԕ9wynsҗqIƥN~9n}κk{r/]ema/G\:s10ޟǺC_C?n]ظt/>qMwξR]ϙҧK/OXg;GKO]i~wһݥKIҧN>MW~K9Rқևnn|uo:?$_tt9qNS.=\ϙ_Ƙcǥ'N;.:]5_:lzfԩ ƥ[~/Lu?XQu&J}w;E~~~}WgNNԭ׭[݄.]瑼8z!YYlOt:n8~swFOżs^ p~Ł_coNǸSS.>g]tRn^g\tR\}pցĥ^\z7=9ξk׍ߩ?].r'=uY/ܻKMמ}۱ݺԙ7wgu.gt%O9\ys&F~7޽]KoCg!߻g鿩d[>x\U.}g?7/}>r釿z!yO-:\z.=utݙ.0S'ܬI~:n.:Ӟ䍯nХljҽn.ȭu#9бғ~ҙLcs^'/=__tqSK+C/%OgRCt0uχ|aq_.ϣ.?ETr鯯zWN?:KiqOFny{_SmU+0yͼ9˛}_N1ߏGgNߗ1qGyһ .!]z??|Xu̸˥ۧ3.]ǤKOׯOKW.ςKo_?!/|~mzz}\.XW:H\ArKv.]zsu:cg'yC7/}0f?nҫ:aoۧmѼtOw\u8_]xs?u}=乞;S(.OEY^:tWa!n\}:Vnݺ~:|Swå{ť_ǥ_:tO.]z^wa?eK{[z.?'?77ts>.pݼd\{^Μ]JnU}t|\z3OQny y~qrusc/zA\3n~zɟnί_~u&r齯g՝9uƹۯ=\Еt<L~Y\zM}Kmu2?]os;΄WUXHv`380ىq`v۟ެJT_mbZȥWp3/knr&~KK_ҫ:טҭۚ\uҫcݗ)gNȥyty+otrM}gKz֥K.9z\?.=rɥyȥK\zU_Ru}rp&џꥯW?=pwQ6vʡ;=؃M_?\WK߽z驯>O_.}=W`9zzKS."_*Ǯ>S\ߍ{ɐ;缫Kꥻ?KFp;n`8Aѿnɫ6\Gr鮇ҳ.zuK7>^zԵM.|S/=꥓WxpUH[ns~ɝORsz0{=zU\= :u_}K7Wr5l"N>#xr#%qrUS/}ʙ~_.C.]!ntߗzw9N8{t ʥ[gR.\}^˕Ao]5һn.)>/~Kn}Ksrٯ֩chsKw^?_.ׄN.3L&N>IS'?9qpKroS;һܺKOnU.x|M׬K[::BE\.]NpSWmʭ迈ɥWIrqПs?!n]C/}asZu#*6u_כ3RwC0\zqSN}yj/pW^OftZЩc\Mr֡K['_:=^:ҿ9˥~_!{Y}L.^.{s*.7`SɝS*+u9Yr#Kp% rkXȥ[P.ݺ rP/]*n.=gpoҳ.Mҫ:V؏*wZr\kԳG/7t\uK \|mλڇKo>i^|)E~O]~~?8t&߾= |M8׼/~ǡ^rM.d>Lӻ2_nQ:̷ףȑ_'*䰛u"sK䲛59wt å'zyD~[>uy=uKgޑ89sc&.ݺ;5n'Ь\2_3sϛ9$]!!B_U.~˭?1?Ή~:zC|ҩ&n]+WwC.ʱاM.+-rԱ'ܥ\ySn|ͦzrqs>墻ɥ_S%S1>Q%tH҇N_3gZЩ >'qȱO2̯xS=q^\#G?+'>[gQXƇS+[ဓ;K+N.=9жΆ+8ǒ:BpɥOۉǍpɥߗz%k=6\gEkp;Kr|f;a8om_/!SGˡklG/7wI.9+G&Oyɥwꥣo?>ɥsz釾pLB[9tλܺpȝ߱~|ͬ@fS/ֵ;t$S.ݺ\zor驿\zzupyԡK.].=9ҋ˥{=ҭs~߬ҫzkr盼!z_'n.e:B?ɥW9s$gN]_b;a}oxM*N|xɡ>|#^wԛƇɥL.ÃKGo=KNѫ^..ްS.{?pꭷ7_åח;K?K.K.zKO=򝯹ŇK_gfgoʥJ.ݺBpY'efԝQ/#ON_r/-uK9%΁:~|߿j;_DBLa_׿wC {O}ɹr;=-=A!;}G/{MG~qSҋȹvN/\gIK+rcm{S?ɹ!W_0ȹoMvp;n~wp;n\_ϯ߇Kwusop~?? ~/=t-}wB-Cp;S{krWx ν~yNc[68jW;nǿo>]pN=|V広s/M}?;nӁڙwtGl_1x3a;s[}ufg:;nڊb`߂ }j;[py <[/?b+3_[p?n~M28֯xso]#{S}7/gMwO_s߿U98|ַ-<7nqa=պG}aq=㱪;/8wȹYi1_sLqlvt?sۿe J}˱_ }ώ8 ;9W_W_Ko_1Yw!8na}_}cozz}*8=vkrw\z Z?tsܷ¿~|G=!_z:=kpa?wE|듁ϊqp{=?c>M_nŽ)/ܔK}8 -Wb> };6WW0؋-q룞'>z%~57牽b}_ɳs߿g`߲}$up{[W{px\鏜 ?[W/sz[~$8 |b>kr#kFp?ʭ7#[{g0νp }}L7޿w|#8w~߃S¿w߂o{g3son#{_#38z*swr%Wc'C }k_ۇso#[rO?¿g Ikǿscv<ʭ_1һW{nxlrOWo?9<1-[`|&  >sg~ 898=tCUn2 x[/1gؿ&~5O:8>O7C~Y#nwx=\s ]b~Op?1s~!~u ,;#g<}ko1ރs:78r=<3r5ӿvpހs#{^]OoU~2U}b/o|kW~|V_x{9wpτszIE.w@}xCxvr5[g/8mw_sKkrxNuă_ùg9~{"¿뽞{oG| }ޱ 7vxcoyvk/s%c]xɹo}cO,*^/3Ϫ\-:xfgq{WrćWgog_ps3&=ד!^vshs~ =px]__~_;8ù ~ß-8wފ\DpLs߇3AԻ =pK"tϻ";8w|相'ƣu_s4;h8w~ νP矅c.[y\Mp{==♷zksyO{쿊zna_1Mؿ6KZm]ć\'8;8w={+νs%tkrkׇ38=s6Wbp_G}?/{M0uzWJ^6>,u;^s~>snB\s"^s_9܏__sϖߊ̺,TpWwɱWp= x\r5vkr?qUG%8?5kj_/:~/87^Or{ù91}vk_ܿʽOOR}WpuyOs0.|&nn~.9'lKogb_~_v_O<se_~]®o>>Epyǿz~"ypsUxvu 7]ÿ{kWk'GEn:8w~q>XD>SG 6yQ}ok'wȽsޕ zɭ񾞜 }羷qxamp{vpF~}߉aO+y<~{YɩO=ﺰrpXosc{`?r*^$_{/¹y*^OL:wp3t?^ȱkZgdo؋BuI|W_\_;:0z8}{rizU֙yk?hXL59WȭW淆c~^?K M牋|M5p|{nUn;蕓0grwės}̇xUKn<8>lk&> oӿGf_!gw6_'ʽM|[c> }xܽ~/2S/\u_S|3$7o^#_ g| =/pOs덉M|ʱE<5ù{*'y{/3] {zۿq^O/lk=8܏7pkpR3oBO~|I3 =ss߿b3't~hpO 8kb[/gf3yGfmriXM-W]oZrǹDZ~x3e'<=c|n5\>ņs냆ϳMn<_nb;$_\sb7MrO׍=cGf^=K }W'#٬\/,IR?xc[wK{qwGZuɽ߲|g_9{?VUG_jW淊M#~|~wӿQG!_wc{Z/,xx.o%O+nI.< Ω>#\7wɝ_N\+t98u6پbރK/3q&G\c't2ȥW/tKppƙKOs}taK.}\zr2O9C.]rpɵqN9or>ߗKOv9ϻyå-w;D_<>,lԟ') ѓq%"N>'~ɡw6yS}ye;g>KnGnχ8gl>s_̋r< 7ȡuE2/8\zr;;Ƀ-rWm =&}Dҗ6W4cJ.'ho_[K/r8K7R.=ktɥSO+KS.}>K}uG}G_.yҳ?gȭrwO.ۇKoe^S'y: _EaG$WMhǿqN;녍'g\zrSM3/{.\uv˒KO${ɥ_u\zG^.._9gCp΄׬rɱ#/ҋz0|9뻙q׫~M|ѣI.ݺp}Ht=ҫzrN.}/.NN+z:v`'9̯w9t꾤9"uq!\qk^.Y_͓K$t+t~K7O+yrգK7\zsϞq[_Krռ~?qNwtɥ|9Kr: z˥g\8zENɃ]rꍺ^7xM.2jx?ᲓK_.zr&'^Kr9uo.y*r8gr]m:K/'^8gW9tЉs#\n#VݼȃKn\gc<.z.}a?GǸsȋKn\zOKϼ;tw҇ɥSWa{wr˥Gyrr'rm˥\˽KN}I,lpzp>KT.:p<҇tɼٟrɉ'K?/$\utKOyӁ#o#6mtnrMNrUkrWq^:\=. [)ux -r- #O-# '^ԯ-3q\DKO<ҳ.ҿsi{K.=?<ɕ_˥:5\u6Sκ/ZN.xLrK?rᚋpGKw!^C:Vz{ND..& ntҗ?\:uKO.μ?gk#n]M]Ks|^\|pouЍɝ_cK/K_tEҳN \zUoM.^|rEΜx\zrqrة^\ ̫KK>XOW ksKO=;'^z_kå[wA.]A.].K.=*7r}ԉLΜ<~OK_%W>yɥOE=t8sx3uc~K\z`S缫ɵE~X99[cgٿ|[.{f6:ɥ_rpS.=2Ҧ8\7\r˥wr]kr7K/9%~sp?[Ν,9rrsrp\u]/ɥw^}_\Cǿ}-0{='+9t$~SU.=0\uҭ^cd<K~0>\>\r3+r|K˩?ҧ6:?ktrҝ/һzW:v?Kw='nEtɥ[W.m^κrցK\zrEZS[w׬ .n^79Nӛlo'7,^/͋K.:p?'r1ȥ_\G{rS'Dʕߑg^o\ꭏӟʥxYasޕ}~fʩ3gzɭ_rꜿ|r/-g.n:r䓠}Y}:s"^ԃ渿"grkG9=og~Sr}\m;WjݗX_ʥ~ߵc\zw[jSåK>׉M<._v'`O.=9E|d>Iv<~x\3'ߓ\z;ni_Hn~3\zy.`_ȥ?..(n&t|?^wk69uåW_}gt.ݼv/˥[dpJ>S.c57pYwyלrćS_B~y3̉w+7C _c~{X¥O қuᢍP`yλS0=4ȭ}kq˥/ rú|%N^%NgYyK\z?ҭ+yɥˡ.\z5+~pW.|K/.z%uʭ[ׄ'_Ӻ r\zå':Bɭ͝o .=\ rϑK/ǎAZɕ__Ua^759sdݗ-n;M7{n@k"_3¥^+\x/9ﺵG/+ə3ݓK2_3uS_)g&z߷58 zvr}.gN~\{z9rM 9s kF4qC|͉xsE+k._׿\:8ɥOutrU`*g\:ixrU.=9/t9E$^S}~9gҿrEۺ.}YG`Co}9åɥ Vr7t-gM=;N!8®/[:^#g΁޿W_\zUׇK'(҇zkWrQ@.y#\>Wg^:rGmϻ:6p#ϻ!Co_pSN~:K\:VrćKȡH.Nx:u.W1\:\~a}F:B]/M\*^gåw9k.Ƈ+_KK_Gf\:u.}ȡr!wb=x|=?uk&kSn~Ӄ5폺. 'O.=ߒiqɩCd;篩>I>\端.L*~!2+~_kv9tuqpp霷&ȍGf-p|ZݰٿK:x/~ɡ_'|a|>KQ_;5 \zU߼6'Ntrߛ1ҋukK&\eɥ[H.SgYҋ;5G.9vK\zҭ{,rraKor: _.}srUr?jUMOg>K^2hW؉K__ֱzЉw~C|Mt^K3_3~\:ҭrKb҉$Krޙy|6#0%WNfӏ|Zp_'}3)Kp&NYr{~\z}?}pIȥ?r םq҇07֭iK.͕.n=C/ɵ=.zɥ>x5\x9#‘ԏЮǾ,tK8y9>\zW\\eFN}sDf|C':t"rSKrևK_/qN9W/}|zպ襫*o'Ή\/Ϋ r%^zʥW/1ҧrSG6lԳL.]8'r9SG!t੗n]W/λn n .^zpuy9zKN9ɥ~}rUN?KSH._] ~/cGn.97$ǼK7\"\K.]ۇKg'˥9u9\:qG;g}~d\9"ݷyɥ߻=Gl#KrEnw]NÞ]w$Ao&7=[sʕ\zΧrA7aهq}+>Էə\{zU<ҋ.cSOn|X KeG'/"Տg _+v?i>^O 'q^oə|fp/_.=֎KG:"gNKN}Xꩫ!ι;/GXrAoS8^9lrrr\z~5.P.=99k;vt_K~*>K,}r9y^n)ҝr#္a}0lkpާyKU/=rVɋrbSg#{totarmt^EtNj\-|g^ĆK=&zɥ76qΘҋ6qqNFɥ^|1޿K?\#o6u_' s3M[ĉ}K\:q҉^!'w8_s?wә~}G?tiݗxҭ2̉s>u2 woW.<+o9tM^ƿAE\yr橽zw\z.G\\\yrA4O.8ÍgMf^W/]K'Ω9s[gm~SKKsKrr֑xxҧ;zw^*6G/ݼһsp鯾|v˩ '\zknKK7W.}:[N=ױ>It9Jt{å_# y%үKNrrYrr1rǽ\zS?%N|@Uƿ58?wp? g:&~ėK?ЯT9R1Cn)Ϭ[ڱp_y1{p̟pK.]$S?DO;tK/}zn-gԱ'jr9^ot\҇zEnȩ?GfҭC^-_8cg~utՏKW?N.5`KCx硗w7>ܴgzɝ'r..#z%&ns< S]=Õmj^\u^{=\z=>zʥ*,3*ʡ.~Dr)ʥ[Htp&^c|QN.=9G KOb^DSpgCUW/ntzv0/9s:yY:u"z8ryr嬣 xAѿ~裗8ρU\p$å_}ҭ&n]AtVȥ'9y/l0Vw}& ɥˡʥWO:χKoe'\N.˭.Nr9ɥ;_ɥxZʯӟʥ~x3xp鵪^t3.\zpR/ī.=g\zҭC!^^x^g]59s%>FіK'^''ni+Kr霏&>7t0\z3'}:x?ҭC.x\uҍ7>KO;\˥?zs ׈ȥ6\r篶3׿j_ɥV.] nO.ݺr]}yp\kg.^c,M|'99;/ɥG1tOtLkrw\=K&^r%yқg5XU/Xɥ˵\z\u^.=v߾sm37m38lrw5>5/n\:KS'$;pYH.ҧɥ'ǎ!Sξ4?\:u.ҶNɥiffS㱿:u/mq^9K\u^.>.KC/XלO;p#4s;GS ?\z)rʙ }}|r㽞Bt3^g;G=KorNf.$N&\;:B\-ގn3ЭMf6_SNjuK^:m&nXҝ?}?ҫ9}xқ:5Y7G.ݺr:O0_'n~oOKkM^?}kRW!5܇Krr=S// C^\s~5Љ_rȵzpsEN'I.]=\Cn.絩[ ]cl:}MΜpO\z9uI3\zUraѱ9zpuޣ.\cGr¥WV뫟>̓K}Ot^zׯ^z?pskʥ55=,81tާK䰓Kr Pr3spk&|_KoֱzQ.t9C[Jje>\rSߙ﫩zr^:7[x?һ|s;-n]p9tDKӅKܦ\zLǕK/׎}>\OQmkprU}״er驇>Skv޲cYKz+0K۾9HϗI.r'>z^z9u%y9t9~_ݍX*~pkvuꖢw^/\r7ppͺjN%>?/+Or9$O?oͧ^:_9rs\=\zupKW.]ғ+:hJ|:zSN:h.דOw{e9uI{K7>zC}KOstz=線|>S?3_3Gr W|Wk'_\zE#)_=k)Nf9bW/).3{ݺjp멗k7%yzCzNt>\zK\.80>#.tSz=y:å;һ/t^:ᩗ/Ao}wx9"?ttG?9GJ.]ng7CS^Y_?WE^XpWrsuK=G^\zUU.]n8g{^zp0r˭`{(>ləmy;ҧuS?.97o#}߼S/=3<\9fș7 n;ҭ czrpZ`O\r7:yrWXK%åg"^/9Gn\:uқ|99sm;'Oy=6Un}33bW }Q/sKrE3Ul`9\+J.]_.۽'9G|Щ^ww۩om7O'>&.ܹr#vo\z~:z]N=ҧ47˫ǟzp-9N\:NKw>Kå%&ιK|lȱ.ꥧ:Irpɥ9x✗q9sXwNsǞ\z+o߼}_tK/>כ\#8'sD/ɱ{߭׏^x9FM*W^zCÎ8`>o+L:å7qqNٿ½^z\:9o9jqΪ>իhS%rpd}'WN!qΪ\ܦzy榜ΚM^X7oK]=ҫ˥'W~GՓK?5?LpM.]KO=uoM^䅽z錟K2C܆\zM\:zԗzsə[S=ׯ^~sDsȫKWoU.]nA.SGsD8n)t+/~'ЭKk_ol8KBf>w3˥aGO.{b?\W>:S'lG?bə_1i>z8p5å'*.W3ަ6ß&NDrD/羬oyrqn\[KWO[2|.=FN/E\96p9~kW.]N.=ҫ%S?R.ʉL=MwKM|.=3oW.]Ld/E˥?GO9uKM.S'}9bpet2ϺٿR7aZg#?\zQ.qryp\#|˙_CgW ι`rP/]KsLϾ͋zr[D.ݺ)rr¥[1W/=m|&.nQ.^cW/cدI8v9ɥ~K_n7o9Et9dtK_'yrrr iy "br關9y5wM<:3r?^.&/"?p?T/}u_WK~O.ݺrU}Ftɥ;Cֿ/n䐓KOΜK?zg{=u_v0^?\uRҝݟ˥;ȥȥKҍ'K>\:i^zꝓ .= ʥ ^/z+@.uo9P:Or驷zS&\ɥȥCqyu6s &qL..c.}InkC"tK/>\\uZ^otXқwӿAwϻn9r_'nMt r~^.}91kt8ʥ7KG}8‡a|MҖҋW˥꣣\rKor>2>ȵ{%~^]}}\-wrEn9sz_rֱCgz˭{~KsKw.n t/뫗^"<"Nåf!Lb,N\zor䟤^:-r૗Щ5t𧻓Nܿ>؜W>?.=z76$?KGo)ҋaҋ$rROJu߾R9A.!>;SG~rzѻ|MϛKҍ¥g[tШ}_rK;kһv3>yӿ|). ^ҳ|f{?8ҭ%zyə/c}^)?r59뷕:B].=g+\ΗK\חKKwɩkuys8x5=K竗~ˡE>ҫܺ\9\z_뛯)uk{9vt6ay<ut[\zf>C}esDs_ɥ].}M ~_?K7L.=0SW.\zp?WU'y~N> %y=:B.xEr)ʥON:5K.}kȥ?{g楽rݿU S.=W/ϫ#uKé.-}Cɡ^?"^\4r鵾.=W/hkrWoȥ7WsɥO??KM#n|Xc\:]ɥ/#_|C/=Rt9Z;Kpr^:6H.|?w^|ͨ^uo׼+v'_].ؓKy^:׿6uu&N.^rʥ!ʥO,~M.ݺ2N +.}^:3s<ʩ\zw|,n] I.i9ؙn?O/|'N[n/-ʥ[B>-\kʩ[w3wpSμ>\T~ߗ:B_.K.|Ko=~)t$ntK~_3 Λ|ҋug_n>Ko?QY;q3_I"E7ìc7: H>9s0|Xiu\rwG'_ZgaY'2دʥ^M.|&N[>Ǜ\z^.cC Ω[M.{~tg0 loτ[Ƈw7'_׫uɕ_!~yM۩RlǿRw._:\zpr$>G/\z4ux~oꥷKϺpz'\d-g^|'KK3Kғ[/y=רn]o*/n# .O.}ʽkr7U9u+zWu_] S˩_H<&Ko׼?\zUYֺ˽>M׿_Ͽ~ ~=C  O}ɹ׶swאkF ;u97װ~ajr;n>87}zr׿{ʽ׹۫\ wېS9_r{ ]@-·}cwtU{DV_O!^sq= Oywts__r-D7'^ʶ;n\sooX?<~>b|-ܟ/rw\}ߏ9Op;n_s٩cn+ձk{G.~E`supx >㱾x7[W~M6刺vtgr{>s>w=u|sss߿#8=kGp~`;W-~c> x!>>[{3=?? w}pݱ淃se\㦵9_ߟsx_/gwt].Y]ÿ/uUsuE.xr-^M_z=T% ν6 ~պ#G}|u4s٩,vpoXWؗ&K1 ~yp{q|Vg__7 \Ϭ|W=V,8MznX 7¿r 7;_8;c>s{m+3Fc~W_[g~}o'o>t?r'}|6_y`)_w>G^\pKps~/_k =׃p[GOz 7qZ ν5U48}?rWߨ c<_rXrsO굯\Ǯ1 r#E}1ɭ]̼s^Ӯ1ѿS:@._z~ߥ-rOx?kg g||˾¿G_k1 x\~?#+8+_Sr%羟o=/4uk7 8pٿ/8sO o'@e<} c˽k osKŮ;¿~EFO}rX~;ÿV9Sp;"zSn:8{> }s߿w瓃s@9Frÿ@龾ao*}_ 2p+_Rsa< x_"!hr7 ru԰"|ɵy. 1#[{Ew1~sr+j^kp{>{3c>So+8Fwh8=_l1竊EϻȽop-{Upx};axd$8W.8/U3rYE݀Cv< ~yoor=粝}^_LN:8?rհpڗ^sȹϿίϪռ{Ky\Ͽs( xƿv9v-Wp5+p{+;o7.XEp{>;8ùc ppb&io1c! }߯}nùo>gψwU9uggrW?e|e<}.pWgUr_o9yl89}? Ņ~* _+ߜN;x+ua/Tw_!8w^;W܃]Ono_[W9[p \vZX/g˩?_OsrO!?;!m6!lI'y}_7v?\Nh7&w˽Op]8=w3> s?ExsߟLN(8k>89¹_>}~|+ _:ƿ¿}g'6|8r-Ð}Mxx.Bp}{o{ƋJpyof|t ܷ+lp{>'?#]p;yp_8sg;W }l __ks5gėпc>[_sG3*~+1cgڽ^rwOg{m2c۟{U_~opo7}p{#=3 oL5o$_|<'ƣIxX3ɵs|I87~&: Sޟ'O_\+Iz:g&gk[Ю[UN~*kpo|e_wλ3_85{ˎ|n3=K"_3L׬rk^ӿ=Oqa?ϛ p |MD? ׷羟nUl6W|k׍=}-~~c|Į,lλٕUnaI^8>ulI6[n’kǿIࢋM|v5> םOn/58~!9v_6C=s3=ܷ?>?Ά]M]kʡ 8ù=#{}?_9x {ݯ?sx%{w23Wp ^b~G=~}s9|Mq~p9ùg>_??|u8mOWsQ8\¹q~8'}k\I\;S)~=y8\Og|}WsgzP~=kW:{?O|8k'_˵3=r#z+I<%8=ٿmGfss8_yC?qz8?|z39uҿlǿ/^'n[ۉÅI]L8os|g8>>8؋{דs~6a;W>?¿ZǤy~|M8Oakf+%~a{ùg<}6$bwhk6$rg1'_I888/s-o8?j5IOrù1{#p{ȱ?ݛM17^~z+_'/ Kcå?ѯɥu9;:?"nw˝Qkɩͅ~^8e;y3;!OD.WaCܼ]y0tɥEwt9Jt/n+t/m.=կ/'ΙuK'tyҋ.]rtҍk$>$t90t9 ^9իK%.%>̉sN#鹢\z΃\zU.V?=✝?N.S,^vrvms y_.=9q✷3q|/'Ι5qp}zGO.}h' .g(RoݼÆNy <ҭ N^kr鏿oS̉s.|#˭#$wp:1?$^sD܇ɡ7qNIK/~8u_r .}\z'o0tKK\Ძy:å<қuB҉'>է8ɥWƷ\z39YE.}s~>0t93Yԉ+/Oȥ׷n.Fe;q_.] Ø/3O.]R.ݺ1pm8ޖ59tl^垉sr\zr9KW.,]'~˩Otr>O/+\7\z\T͝{=O.}`[c/89Tҋp7?yɥz#pɹʥO}җ#9\:p'>'/"ۺ3/lܺ:qμ~F$~ۏ8g?8vցK^yr8!_3Ι:uȱr}'rLκ\znROgow'[?^ErM8rya~npYw8gSƿL.ݺ"rS{+;ҭ { .~Nz9um>\u\8gmZdrwuS'i\z'/mp9+29உs \\qywɥ7~e=seLrW9CKCgNw?\u{;1tɥo 폌sppu6Rn=~!_.}}t_.k3\6\z٨rsh'YcKYɥKoQɥw#գK\9qrpsy✑GO.f|ɥ'\z}ʥS;tGt5K7/\.=s.>\,/>?\\PZW }s>rr\zOr;_R~y¥76טSW-_Wrkr˥OчN=YW-ڇx+cK\ylɥ88m_Xrb;YpryKˑK˥7mλb}$BrMK #>"\\\zCN.=G.]NL..國uO.=柗K_^3Hrݿ\zCKgK3'l,r.I=uS7\zn]|ݽ֞|>J2;1 X t^qߣ吝`-=۳b5] kƥwxutϟե7 LKo. \vO}9]vq9tUw~}rt/ׯş|6>..}ҡN5os|qxǟ.>Pt߯s$?/]:j_5y8[Kϼf_p5.fqoK: gCg`/xkҥ/߯i<.=럜_u5UϬg?[<<ӥԓ~.OǙ;~Wv KwVv~M3]sǥ;?ݟ.8}_ݜzt:H]}\tcK|y\ǥ{Bޖ/..n}st]zn{}9]z:ӡίE]}tW+ǺtWҭI;ĥe_uK_]vӇ`Y.=~O]zμ~nҙdy/:q8us>N]=]zOt鏿?spQߺ|U%]Ǐ..}갳^k'Yv;^uz~el!0yþ҇n;5}}>Wi8ҭ/LKқ}u9¥fL߷'5c.=tӥ.ǿ]Μ]߳]z_f~3?.tک,:l5'-uk_O.=v3¥w׺t ug\zW'oؾ1ksu|עC'w LS~ǥ.8ftׯ8׌]zx;MO= :>>rOa^X>x׻ [+cl髖Ǒ}ه8~`^Ǒ_Lt:I]c\a3stk>s\:.XOuO;3Efzԥ7yÓ.}"uaz˾Kw}.nj5o]w7ן.=\.|R]TpO0z=.=.=]c.Zӡ$`qқ~9ncҥq56~KwKSgPיs@uӥ?ҥgBn9Bُ;׳I NIեwk\yt$Y8]:;gz|=_1Kºt.={_}vq|u7\֏Kz1]Mux֥gt?Ǿ/0ubkK7Z>=ҥj>̻r36O.qs8[zcwWӥ״6]z5_|k&;59$yɺtr ҥ;g~-:vט/K]z|^3Df|KW/כ8r\n[]zҫڸ~ӥ?;>kf^c._.Y^^ҽ(#u1}Kw]֝gCnzMzxY5sNpѥ{=Kw~Ƹ]5.}{avkR.=ZgP9]z]۝!pK:p}5u7ʩ׼|?ҥӇ`s0|_u9.å'^O.}-y=+Dkv3LNtKKKWcK%]z.tQ¥ҥ{ץ/_O1w72G֝1}3q~.=:Mg@n]z7xҥ>~.%.}o}Ywu+O~:,ܠ.שKys.Nc[}NuL .yYy=K/gϼu\=;/=u|uyݹωSf3oՙKw_7u?ۥ?:zIcť[ե 9D>ߗ.=>yۥ떯KonNR]}|>.Wq|җ}2/=}NGy"˿K: Ko q>ҥy8}ԥg}X;.{G{]]YK??ؗqamt\>}!t~u:.ȳ&=9q8gsv:.=/tKyҽK~o^O0q"?:5vқuŘ}ԥ]zt|\z>g]ۙ{f0u^>åw,tҝ31þ|\zs_ϺһvMw9];Os+/[;]zեW^]z/9k>.=jo>o\יף$]:{u>?f>y3_X нXuե#ϼsӥO]nҩ=һO7.ݾK鿙[SO}ĩ+|緜_|Ko` ]:;ɗ' }N.uҽ8\dL?t3G_ǺUn>Xttꞎ۱:;3ǾSߓy鼞:v^S7/t9_Cpt9y+t\uC'_åW 'I>S7u%[gz\}tz̩{oC_ǾjK>tQ.Yg7ϵwH^4:]usWsf>[K's>;QwK}qmu;ӥWrsj>w拎tUlj]>һysZҗq_l0ұW-{+/YNMu:][ۥGv>gqrL>n]vqnN_\z1O}NbpHnϕC~UKn~yȌFw߼s>:uo6G}eޜ>Ҏ|]NAn.]ׯK7Bts1]/ӥ9}_p˹?.ݺҋc`q񙗞NpL:Sgnu1.ҩS1y[7`Kax1}#/=ݹmtO?]/cGtq}ҥ>bSwX|>u|~;1u۾:v<:tIӝ^Sq13O KyһnQgs~ӥKΩ~t-v=޿|.}nna<>2o2.g]uztϿG>usǥ83̾/>ҝ_֥ts:tut>'Xߓ.;c\ϣLǷ.}QKoy1?ӥ?:;\c\>`ҝtu0/ݾ'^+եK7Lӥg^|.qcs.}Bygd.=ݒy 5/lN]ҙGtOt^o߯~s>֥߾ks+ut݂.o>OGK//~}\N}y&|8q:f?~[wd^߼tuK'b]n.sKN.]}Ctaһιԩ~Y7֥_ۥ_>~}5.|KglŸuK_Ϻt陿>\ U;u8K'=|p:6\>.}1Ù~}]z7>.7.~ӥE^zчK֭ҽ?.=eȘq鿧݌p]ۥ?:_җnp9.κ?]bҝtC]\n.)WסS_ѥƥ}8tñ}_Uo^Lk<X_qٙ|.}ʯ|r:]M^u<1/݇K2K/%{.q8Μ5gtoIUN]N̗|0v~u<1'!_v:f?nﳏ.\z-.]p8j=Iot+֥{֥^֥ۇLK/.=W^o7oqǝ.uЩ;t-\X^upLC'p9g^:;ykts>u絝KO.. {1ut]nu.x]Ǘ.>Gn9sgCw^ҩ*_t?t¥[g.]ǨK.ޫq1ug;tׯ>nk3/qץ{˟4IsKf^gۥg:}_Kwg^yǿgX]/C~.'?]];}6:w߿]?/W>e>5|ե{?ü߃KwONAá3?\zi7Uǝ.}\uUs_]z[_oܟӥ^^3Kt8^Ok~.p~e_KYGfUХ;3/<5ө3]ǘzqΏOk];k棻?<֯tKߗy̭ 0f~ea]y1f:uj[COOoze?{o...o]z|>}՚nzK%g^z/Wkf>:}6S[G^rW:^OǮ֓o^u1GX>9Byof\G^C/~.}ln^bʯx3}<q?ץKtt\ۨK?]ҽҧcl<:t֯^]y1x{pB\u~\ XuW9:n|<$q{KOpә[9~cſyUN+nڷt˱^M^wsK13:ks5o\u[եw]|7k~?I5\S~yҗ3 һɗO^tݎzt75/]7K߼vkǥ_tøtqK^tg~Mt;GHu}7.tJt}]>͓O|^u>Οzͩv'֥p㨻Kxѥ?GfY[oqr}$yU9KK9]3/ѩ3Ghfg.<[]zzMkuAkb߆7OS~>8KE!oqC|{>+:yQ˼t].>}3G(Kv\O~mt%spMI99Bm!.܆å?:trmҫ.gΘ}YoJ#a]5nߎ.@u߸~ҹ?K;1t;һ}#p];+'y`][>}2G!ǥϮKϼltc~Хwpy;֧utzCCT糮c^3lsWȩ״..pDZSyЙ_c>¥ݏطKסS?]/ϯXv]M߾¸G?쫆{'/Kst9}tCtڷ8>tץׇ>M3.OO{tХ{1Ez_tO_5]9ҋzϥ?a~?r\{]p OutuS]t҇`1yVMKL.~$~گѩ:K/:z9?7g0uq|ҥ?p{w#f8:p|`8ǥn\:u4oեq߼[ťoKwp8G77u錏K>WYqs|˺æS7)\sO5KKuU'q;9 yDq>l~;&;ɭ[~ӯ9>Koy>Sw3f+n>ofN7牡+f: xԩ{>.]үӥ[g?r_3}tיkIy鎙'p҇n}$]O~}8ۥwag#}Nޯ1K_۩x?hucaKב.ǩ˾?LWN]?}uX;]n$]4}ι:0ëuH]fЩs1];k39o]9y8wՅ3/]שKx5/1}8KϼuoK֥S.zc.}tcK:Kx8~w.=ٗNv|>uXӡqK:R/0 Zn8G>Osⴋ+s::֡Ν|8s}\z1}3t:|KgQ/?oaۥYeҋZ3f:uԝ77o]tC73.q~x>:C]z_ۥaSw:,ۥu:r`q>Vq;ӥҥ_nxqA1/y'K.aY3~Х;ϼt竕7y}ydf}]z9pQNv˼qsvNg8c\3KowΜGN]ϙy.p>Cg~ŹK.}l.}z|O._Rh<9\y"sK;]:ҫ/kvgǥ['s>t>qu;uYҭ4/ts6u7ҧnq_W1uMw\>s1qtE_sǥwky陯>'ytͼ]z擳|y:W\zoەqױuyһy!n .wK?=Dq}1_vzW:8z9/Ptg`7o:tC]utO]kKg?*]4wߌ}ΦSg?.t~^{;+懢Sg3zv^zsL6T]z3O;o>:s3Vݺu1鿉fSoN߬әs\7/]KGtKKt}wG~nyNqSg~|{;ls֯t:;]: ԩ[٠r~m>nߗpSNΥS|F^m\8amq_Y⨫׻]ltwoCKY'K.|D]ǺtGK_qsq<~å#0_ܼXgcg׫G^EKѥKWut?;/q/:֡SuhSg\zM{eacc\ҥgD>t޷]:vKǕK]DRn.K]:ҥ}m^u̯pKw}Kw}KnKu_/:p;/S~uau@ B:NN'Kն~\ѝ\N!߰OB8.=:n.ɛ?}:x}u|K=?v_ХFpMK.]KO|?\yܣ+wfqN26IǾ‘tuøv\e^v1_֭sKoK?.K]^5]9u7s8}"u̯қOϾjcǥ';]ݝNݺu\zt_n~YKϼiȸեuyO7I]؏?S}~ǥ'N~.I]z≯t陇ޏ:']zaC'|w<ֵ];}x=+'ϕo?oO^z W>i}S|?ty^.cK:6|tpdۇ˼tB~.W~\ѕ1KN7]}qݾJtһ.$ίtOtb^:uˠK׭oyͺ~:N:}_n}D+ycvq?\z7tNj.d]K>O~wz8}_ǥKg<:Oo)xy\י.>}tYӥOx|ҥߎ'i<ǥK][K_]ϧ\z|kN9S?掼v̯UnǥϝJvKқgץw%Ͻy+9_եu&:i^ϺtׇWbԕ_LSُ֥.\zťwꠏG>T~.mÉ?ίqKpѕNJ<Z:t kzԥW.2.=/צSpKǁsҥ{}KO8/_OpL}_b~.}~\KwZzn3v탱җΜ/CgN\su9 n;]:ok}t֥Wpcҝ?\u̯MNߗ]z qQ=ҋOӥ7Ǻ99+ƭcoi棛CGqQ-ҋg^:y%]#t̯8 ]C W\܏{|>ُKW~}ҥuһ?̿\/[^}||WӅ빲^8ԥ4 ǣK7?oʩ\KtLpӜtz1WLꘜGWzd΁nzYӝswùYk3|j9ᠶKO>s}gН#D^m_K3/.zH]#/}Ω5gҩ!p]Mw^c3?]zn]#k^t:<]zϟᾖ3GptӼ'1KnN}y8ߕ.}8zGwh>YK~Btӥ͟'ѭ~m:ulw sQ9תc7/}1е〻>Wt[zDZy>+Qy+K_z\h:tI}ҥЩ'>QCw g{^tm硗~4/1;s)ydP|?w^ҡӇ߽nYNp+Mzcr}\?{\;K7Dn߽KO}tgy>.:]K<f5_;/=ﳏ./wҟ?\zѡ~R&G"];G^:n~9SI}Q˼tͳ^s~\YKϿff:v\:ǥoKsz~Z^sYO Kԥߺt3o}W ǟ.=2o?׏K2/=/4>>q:sߙ>uN}tۤKw.Y>:K)]zgtrxҥ7@nv1u^Ol^t-?ǥK^tg^덝~.O]z1h#4u5zGe<^֩9ޟt陟N=IutGO~_gߎ~~oHwo?w??_G ^q?vԹQ;{Y~Mhi7~peqC/MK75{yùν#_y7n|K zpg :!7}wv8ߘW~S i+wy e\۵ߘv82} }2ӡ`{M[?!xM~=[~w:*[g1_{=NϻoùZ¹oo{nW^Խg_{}so?k?{8q{-t=޿x;^W|o?:_Ɇs/|pM+:6r\G{p-s}~}|w}\~ νw>'{>{}9~g~z>!{>msŸt<9^N{>νU; \8?[?W|_Ésp17ԯ+mэ8sq~ ^F:w=L-/>'Wskͯ>@3~^GYщ_1Not#޿+}J_}sӹx+ӵטͯ}|_b~5/8{~խ?k1Ͻ: ;;c÷_ƹ];kսr|˸Wq<:s3_=^4| w~u~ }җ~>Rxc> ^F/U=}ϷW~g̾;oq{K\s_d\^F3Y37}ߟp0~:v~3N]+>p ^/{<:|>ӱym^9=wϘ[2?_}7z#p<}{p 羿o/{squϢSz,;[|?4c~^/O̯ {|W=g?b|ͫ_1^+gϊu3C8xӽט/ߌsb%⿯_^[\oׯ85\D8ҭ?7ֳNj~W8燫~Az7_c~;t_uss/6hx82=G`ù0,{>:~_o?b3 ?*W2DZ糇s/pyùqs>yb~Asq+?|f^L8!t¹/p;ʸ|#|Vt{3];=xN"{^o)s9{:{j|cwtK,ҹ)ߌgx?G-Nùsw+|FռpzGp|7pϛ;{>G;pysחq9|Vtz'Ƨs߿ùǿg{~{܏߱5·s^/{GpFztysú;g|w~ս|qѱ>ߌ[䓷_N~u`_]>8t̯#WùùVK8w~c7O`sO8/Ĺk?U]>{'b:˰|fd8zL?Wqe=cz~tϺn.'{:pN>cu׵p=Oǥ{#6Kؙ_8P8z ^_p/{}g8?~ѾVb.~\=׫8߿qqC^c\u5]tr~8Y}t#Gx+W`wK^{tmc 9G¹z ~.ƍ%{><;Kg\\/ :} kpù?_cZt5G蹾ǹq~}=>byuZ>yùaec2w {> <|>Wew)[S?Ooskj1ņ1+[v㼟sϟqKԭ_}|>֛KܖK羏p#.^s/> usM^~~|>b{=g~]s7u~n=OϟoX_sqYc:1~s7qts3ߟ? py?x8w?@)s|d>W|+7w~WӽXo>ֵs>[8wg[8sz^ōG};z$Ÿ?_>:sq=t97:E\črc:Ky8u8wҹ>[b={8w4? ϧK? ^,ƅ]{1>=W_$8p5v̯Y:x_z1fұw cuùŸt_[b+5'5ɯz͑N>5G:~}_qGOǿuڷ1xqc֯{^=xt>{pyG;p{Pi.-_9_szG'5<g?Xኣ^3rz'qwzMs/#׬Ox񅛦^6;5~>z!>8z<]sgXu׬'p{=}/Wke8}>[5];E>pkV?ǣ¹pP3ùg{pz=¹gݯs\c?}_sOY/^3+zM [_s?q?k8'ĹsxPk'^<^g{q~uk:~t_sz:tOL]:p5WsyŘv8¹y]߿ǹE_5K~=O8s=oxp~Wk6z=w.a^/=gqǿG&;8/qex.5?~=C8MfF~'=}ϳw=]pYs=לqP~~w|­gީ[n^ӼpŹg} =?ſ\^Q{cq~w]_=}~k櫳~w;Ï7>\Dȼi0} |{0c~:z5} _u7ݾQ|sM~s8ù?~?>/Wפ=q]p_u5G |p5yYY|Yxֱ3޺wƘ5KQu#޿g?n}_?spz#45s8:8}}=ةo"<5{טo¹Øzp ;"ڮԱ3=o]sO?s?^kzQI]o#oqK4] .}o \4~|Q'89ҳ>åk|>p/ts]z:\j]ĭ[3[wu Μ}N q9}=O7Х3ӥ]ptXҥ߾3]q扥S:J]׾xOKàK7S:e]tsr.o>>K%tt\K:tg.ѩ/}ήեcsY>o>"y|"bܭNQwS:"]ycXwXu ‘ҭ9\zו3Op|KnBޗ?.ub};$}Ko:uܧNN.=ԥ.6|ƥ7os/`ͭ3b]c8..1t:}tpqy|ҭץPY~ϙG>b8Z]e=2}쮛u KOut]zϟԥڹ+.}Fם+>R'[_:taSwKY~o:l|\7 9g~Kg|>ggӾW _Y;g~%<9K.]5xrrL\t+ңuQW/Yw|NߏjqK>}t\һ}tt]:u鿿b\҇Ow~%<]zӡ>tsr\}θХdtG8\z[{1fuT9.g?o.w>'.p8:,7ag}.}zK\H.qW9n}Z]z_K.>f>wkgsBvs>'u`K|$uc;uu:^/ cW><]z_Kusz~ץ_۵sfʩ?w]z|Ks}qͼǾjg3l۩~ՕԥۗKx<9쳁_Μ}طt׋қn}NvQq>S'KKҥK7_L~sv}D9uW:]Qu?uMϩsѥ3.<眗y>.=tο{q髖NkeL/9OsåХեs_/]uWsFݖ.1O~Z{ѥgK%`>2FwKә~v\zqL]ĥCǙD].:6]z|vtoǥ*]:xtӥ?۱Svǥq/tK|)gR7Kؙ_q:G>'993:\s~đwWq.}\~>1 qy.q\uۥ7KYׯ[ҡ[q\~n1?ֹo3u̯_ҧWn..#."]0_En^.]WK)҇vt;҇s\.}߮?ǥftyF].׍K:k\yRt]-.K7_PkѥqYKϺa]O^|}KzIХwG\/.}__qԗxqǥ>nv]VǜϪ?OҭťoХg]?.=?].]gK7Z~T=:s 8}t̯7g9]z) i]znqׯu:tLCLC7% ͧ}"<..}tvqYK>pӥOϺ.=ݪ.>:tot<>q&Cm]zol[꘾/l\zba:tߥKSKסo]va~.ݾ ZS|:zVn^.@\z[_9.}KϾ tt.ݺM]tݾ[NåW~>sK7zqOto9}tt#$,}qn]MNǟSKSw>pL>u߷_n:؇1s\?3x5֭xw\͘>V1u:8u13o\tӥ۷at}|vgq]7.}۳`NGKU̯ǥgK:z1qaߗ+ZםNtXq2~< nmWک'3Otԓҧr?.?t2ӥ{LM\z'F'.};￞.=]..=.=Ϸ$KWnߗɸُ[:uIѩsҙ_f̯8Ϲ?KpֵҽK.='KOgKw~ѥEH7iߗt>ɘ;.p帹Seqzұ~.}t_\uº\ҭեDqy}KJ7ş~.}ҙ~Et8k?t҇Ntsi\v 7̫}9nYOμ}1[~0.=83<\z~p֥G_2|x<ҷ \|CK}åwҽѥK:I]}op=.=g\zQ\z~uo{qyKn]ttu_[gҽ¥T~]9X-Kg^g\^:sᦫ}_pkz}KI~Oޜ_#qycC?Q'rgztWNtzts?H{]7\ҧIt{\z¥OίcҭN|dСNJ|9:t0L,/ts=K__^19B|>gR?.o[y3XT}zM\v5zйؙ_oԝ^xtOy޸['ݲ&}t?Gҽ?]:NzM3֓tUsl_Ky5t~VNqs0.5?.~|2^8ҧ}3qzxRxQ|I~<\z+spYuN~op]q9 y3GG5;c\zӡ_uT\4e+^S][^+.LwnŘp7̟7CvC?OR>i_SJn.|V]9:qptݝ.]wK//ʜ93>:sg~~t}]9}_N>9vx2?wfc8]:o5g֥rGg^x*c4.<5Μp;oίß_1_K}}kBnzͦzͥ+gz.}tv|9^tlaLP/t?ׯKϣ.}uþ7ץ.ޗ$˱*s?psKםnNn.mHq))Y! GKv:}K~`l_p}=|ߕn}#t7=e1_3k׼t^K֓3ť[.tҧ} ptmS?uї.]}gtڋ}6.fȏK:~mj^z˾/q틠K/yХЩoIgq5xYW\o.}\:^sq֥>ҥfP\?sƥ֥.g.Ofnz͢S_}_.Gf/tߺtҷAN7]ߓKO^2fld}D->gA]W[㔫>gstH~>'<99gJgN\ѡSwKω>/.]wKwKt8uoKϣ GԳfY.<'uút.:t|$:/9.`CN~7qa:.3^׾M[ԥwy鍱.1ә0xǥCuҧ.[S:]uF/R޶SVy 9uҿyKtiKǍKߟWs6;xfا˼>:|[Nk:t"t5q},/]nѥOb<}եWn.}oKϼ3wt..=ÿ7ӥ:st.ˁKw^KһΜ:qfuw9:?Xҗ|XwK+O:u1u|><;gȷK}>밮SuCty]K|+|`qܗy8Knb܎`o^:ӥs߮þ.5]y/7N8dM0{ss.=׏ϼ:;ǥO]3yGެIw=7Lps^:t^ly#.}Ltju]w>Xkra|>ҭKOK/>;]|'׏K?gsXyv9u|3w^s a^[_J>3s>gtS.=_:;׋10^OsNh=us/rqpԹKnD%qݜ/y7#țufߗt鸙K.å>';_ӥWy;Nt3KY?.=_>b}}ttRt^ߣocNQZ}{s WSwH>K3.]nkgLߗ3g+?.: ]uHaKfͨ3Х7qžjQa^z36:NC~_s:u>~1.r<>/vGם.w\:v`}8]} t陟K7̼tKϾsKnڼtmӥtc99:]Wt ..ƥ[K\zeltһyfsy}O8ppK9]ǠH_sf>ľ'}N̝W#>;o]9WޛLEnac}QKoU҇yۥ4/}mǎKu:tugץ{KߴNg9 } p n] .wqsUwNk鷝y:Kv.ݏkD\zμt\2.:h\zpK7_P^uuKwN]Щτ;Ƭ_ǥ['KIμ~O^;Ϻa]n_Ƙɏ(޼̃ǥ7sҋ{<⨻nq֯cݎן.=q֥gK|sg?.=]. uw%ҽKKo>?{=K?g_y8۱O;4/^ྯ;txKg.q}K''/]Kϼ+uaË}ctb~֥]sZo3/ԩ_oUñ7̏Ks7qӥ;A~.pu]z˼CЩ>e<t~¥KKZf~FϺt/?tc0g~:k;/w7_-cCqռ5ҽ>yQWK7}˱W;y8Na<^rq?Ϲ:oT%:ާK8w^}t3ӥ.ݲЩL>.:O_GD楷=e:#{d:?w;.kt: LW\c\:t>C1]zُܾ]UK/\z~t:>|7ҏqsM-upۥ_:tg:upۥw}!O:K|pҧy?汧K>N>o_qEN}u:qn_\z:]_t݃.]z_I|?KqK<]Kϼt~|Kx lKZ]./ͷztUgl_72/=O<^q>#ťtһy>1ҹyi9qa4.]7y麟yg^z>\0_S{d^бSNútrKץͿťk;sXe~:$8i^2~ٷ4\vϧ8y8]gnkelKwcKm;Ǚ<>>`.tU6:}p׸tKg?]>Ktsq.tKwLn+_5/}.}89]nKA^zٓyҺteuG>:n.{=:Xȧu8zCOd̗t/pz]܏եqϧ>tlt9e_p/0>3ytqP9]zGl7.?:}g&>yևKop<[J|k>Nߗ3:楛Wd&okzͼKo:לoqŘ>|tҥY.~dOȢ3'֡3>2o#K2/S~.}ryUw~~HtOKK_>N=IuqB.֝Gf+5ɷNuG>8Kw>y~XYs/'۷T.럕Wk^}Wqty陏N= n;sc wҥle'|嘾j!ίɫ<\z\[[N3f:u?cIxS9Bӟ_?{>.|5^39B__.K?K>085:.'^0f~=gҩ#tίV:sXᶫ]Nߗ n=v{!yH^q/K'<]x}8spp}t׸Za3It8W>5'ɼt.}?KǫK_:aeSKtԥ Kݺswuo}bLf$za8$tnjGwp|tE_]/̏K7wt֛שSs_KYI_#G{U|zΙ_9ȼz[תC`ҩ7ѥof:}_G4dǥ{$]S_]tǥKɼtr3/ӗt^9^uk;yoۥW]99Bjk-SmzM=ґ^t}yyטC'G^]/9Bq>ĥGj_q҇3^v׮#^u|kk~y-åg9}ҭ[0kg k;urp_ONK.םN_ҩӇy|3^u_^7sn:b>¥\~mN=Iթ stn^._uL߯Kg$]|K> mVt©W=DqǠeSg\z.}??snn}.K~oW_ӥ䥯?]zab8&\`ɼteK<c$]TN}Φ#z1ߺÕy/o޺ua1QW.t9oy WmSbLx楓>'.GS}瓇Μɧ.}.uPtVK7UoX']}Bty<>NáS_GDt>v|?.pG^zqtǥ} 7\yӥ.e>gۥ:¥[K.+ҩ3ե{BvE46;u:88gvG\>N>xt>a?50]z\ާKo_>b?\zt/<.=]usKƥйoCMM:1ťO3/}N'o9c7o3Cξo\:yߙο҇N:j:>_.餳f3.801'9zV^w:yq~եӏ/]Ko_\z\tݫ.}y7/t 3/ݾ4W]~ҝu󛗞K]zq9;KzKå_vIqz3|]>ʥ{_-]:n!]:.YKo䅧Ko:_xNz>bӡs2_Kw_%]:u n ]:қ.[wHzw_ _tGeVa#=9μ}˼n_tt.#/|s[?o^Μ9t3K/|uw9:t|Ko?>Vq_q' q޷N7w;f~%;ovp޹ω[.Y['ҭ3/aWע;N'bS] o:Nyǩy>.]wq>ԝ32K|z]z1[utþ/w:b\80\| ĥlߏtşy=}xӷ*ҭԥ[7Kw?K'O=}ӥA.}[QcHv97]ٯ?]z潧Ku;/:]}2/=Wӥs]z̯g_N쫖.}1_^v[K.:6̯ҧ^5.ujwN'Gwtpݾ/QwK?ѱ[oXGlKvS3uq2/}W |KSҭS5/]]q#~\N\./K{uűys1qe^z:˾/˼S2o`^ӷkӱϯOpq&NQ7tѿK|Kw}.]b^N`祓ts99]zS^yy3 틂KA?r}D^p~:CKϺ}\z:g\9^]z_SyùM_KOLJKO]:yܗ}6pǗku8.q҇tK[/]r1['^̛:3?"u?tҥ[Kvqԥg^w>uCgN]D߲.0ƥ~GTG8xۼ.}^y8q&N!ǰOoN#/?s%~^NT];Nqt?~ye^zpKhy\:sgtϷ3vp3N瓗.Μxt鮟Kkч#us:㬋}>.=].=?uSsTn]/mYCqa\5.֥m^zί3.y8 :u{UwK_K]Ett]yKy}|]:Kk8f~ t7.tҥot}_pθå|Kw/]:8tb+㙗>N?.b]zO_.>߾jxt3/8.nt鹟8v^zal}bl=wvCy鏯ttt~_8l\z:\O^ut?ӕ;NƸ}|׿K?7<c3/y鷯.yKХGVyvNgNAӭ[O2yN_cY9,KqKvԓĿ]z2Lp_cỴzym~zMuS3_8⪋_3?zx<5G,K'/>]:n}ׯ#O~3/;f!_]>^S>>%t].v䯧Ko:t|dfǺtuҭCץ[yԥS_K]zϧ$k^7p]z:tyu˜eksl㺹1qtMwKo:޾ϟxK]'btҩ/ĥ-.=כt楻>1/~o\Oʮ+S70I"ځLpf'Ɓہ_NH %{^j(2o=?[G7WNޭK \zK_n]]Ÿq/}Yv9Y:u_C@u`K<}"NOK2>ϯX?<R.L:}pԙo_O> ³_zՙ.=.å7^Μ8?Gtÿ/]:n|g#;_1£3o||MkӥOm]p_KQ9(gx5~c]q鬇ҥOyq&>s>q>N|ӡ^(:t?:t.OyJnh]N{K:v wqץssեf.]7K~W{p͙Yu>tǥKo:tIn_Otl>M!vq}үO.|5?'i/ԩ$o~Kt%<ԝ#.It^K_8./M>7]듣_zwL[3cKtzk.p٠_|k_:`..醳УSgGWvǥ['EDCեԩѥK禇h'$tqK]/|KҧgZtOn_]tEGQ~հ_:}ҥKK?׸t>BUgNtcINu,tȧ1/:ҭK7$gw\g&|t˾hK.x.}.lNݗw~W|ۥktϧ.<5 鋒.=7aѩѱ[1^#d](\NӹG/ݺ1tv1?;xwS?o_G?7Z˿__w;йtuv7s_^t\ׇ ^&ù^-{UMmstu8߸3 ^Dz_7n8~xqez?'M?wk87MkU.%{p}յco#^_^^لs}^Mb';X77S' ];x/zu8.׊Щ3ù7c?W|uq?7n;28:/3pT7tz-T8|L=7|/u޸_ތ~39V7n{?E^U]U8~~Vte߇pߧpx1~{~oܴN~8>q._po=_q[ѽ1?,On<Х8_ xo{ 'b0^1N^&kGĻk8pupS_qz|8K߸_qÍ v_:*q3]b|3?8~|U=c/qb~Es_u¹Q7${mcg}_~w]c~zk4oL~u%oί} ?px|s'[~w݌k>sW{?Wqot¹?r3νM#o<^Ϻ'?co;^t̯o돺=WeWuܮ}z5Οׯ3؄s/=G>w~{\oϯ3^qNG~z\q?Z.;kѱ_-1V]vm|{-ùY>/k<s3*]+/~-۳'87?ޏ:{o]ùח_٧Ǯk:#7z}߱^5sgo~]~_¹} y{]Ysx;{]k<4sKq?~+uFXڿ'ֵ?qKߊx~:ףyO|t{ p4s=7~t#W?~f݇pO8zaLνFXoot#x?+֯p_ƹzx>uU}¹s} Lgggy8} ~u=8?>Z}MN<9&cqqsx W|_ss#p1uj¹3];_|k93Řux|W-W|ùpq}ùw8wϖ.8{.ί.Ƈpq2ztd~:u5 . }p-Ww8߯pW̯¹dLo3.Ͽ }|׸~¹G|#{?[:ׇ{$lw1~8nr~xL8w|8ws|#kù3ν.׿];$_q=WT~8>דz?=#8'f'Wifs?:wjpϛ1گ9epzf=;p|q y0Y 83똄s_ {>|;W׸Y' ^sv֯7h¹׃cqo~S8}8z<_w0{?_Kt~sg]6Wߨ#p{ >87=~:N)5|y^:l忟1^9]gKYR'`x=ùpίp1fk.~빟?^.5r~z|I>7=98oW;{ͯcqj5z?Փ{,|g5:+#_x2=Gqǹ^9̯79sp1^~3t7=:8}5u|/ƃqs%{{58Jk_8ǘ+01"_{ޟqscY`kƬ_yz: KX?ùKS;+.ӯ|M]n-Us~ĵ#&_s;5kXWө$}0/!_ r+_#_s|ONf׵8yp+7o}|ͩ"_8uw ЩE:u8~bϋ|M[{錉q׼>dƹo~,'/2>|z<߿_~z?2s/<_ >s]8y3~6tć:tܯs[spse7#wC'Gfx\?¹zmkz<0O?ί֙_txps? /ćsx8q1f~:uWtP_u#u5[kN;8~qϹ>>3\q1> s;x~we88zq3&_ϖ}GtXs >|Mzweuqs|U{gӓ3sSq}߅|͜o5~a8<~75/|h\z q4{9NӼ]z_zՙd?unyǍKo_;$.]wKtͼ\{9g'OIw~s^v+)ҧnGYҍACg7u}?[gWqNu鮋uuBq+up˸tEҥ_83\z.q{JNFtҋ/]rb~|Nn.}R/O>oK ҳ_x9-W p9qK!Gt餛nnUvp8˼[%yeotǥۯug<]~~¥.;֙#Ι<tc1n.t1oKoۭyҭ]åAsҥ{"uKׁ> d3UCu?צKtE7w.8.<]Ϲs|ӥ nLny<y<~9ǥs80EL:qΩ3g6?嘟pK:"X'Cc?{t;:1<7Kw_V\zuѥY^vti\:qtӥts.ɃIԥg qNangKq2ҥg.۟.}PZ.]WKOqeh\;̋Ns'N3~>q[~븉s6q:S'ii]zå[gq7ϣۥs<ӥ'9һƉscs.nһλz?=W`nu$tCGKw~Х^z n\R1.}}5.8qβXҍ ҷsts1.\:yҫNc<.=tu?\]>Y/LL;6w'i]z8'4t6x.}XJ>\ ..~|t.}fgץg?p\:tCKN&1O_>bl~\tӥs.]'K7_nqN~qN_җys+- ۄ֥w K_wť;̋5‘܌;LN3pKWKcr~#.K,:st.:6җyt+utڤK7>ι~-5_q}2|=u8?y:ʙ_c'o]E]X{|tGK~q:y:qg^.=.}ӥwn7>.]KK_:f\:_]2oLn]tu.޶SgƥO n.ǥga[هuL<.?a\).=k\yu|t"uYWK:u:I^b8bݗ>ퟦK7on4]z#w<$.}t%t ҇KѥSwQn|0]яq:qtuٿNgq֯ۺp:t}ǥ+ЍNKwKt q?zi]\uuusK.=쿾lt}Iwu۱.w׈/?.:bttԥ|Ko:c't>Yg#~ߴtvӹsK_>]J~d曱KzѥVcӥKϺCts}K7pԉǥ;')\HKn.=ׇ\_ӍKO]zK'>5O꺜.}ds58]uSqKw2]zƥg]q΋n.tq5On::uU_~:״nvo[g3v]y=u Kw9]5\Xǥ[X.vkq&t鿏ҋκ~/O\z>ҍwu쇎~>ҟ}t:O]z:}\:NL:cqtҧuqYNNw>;k]z:b|qqoҍoo.ZWU7}iGf]uWk⒛תk']v♯>u'q7sqrySNJ=Wy;Dtsa|ee&c^ҝs=]['xt^ť҇uzq[Ǚs__ҭ۪KyDnN$^_~}|/җK^׍K:j\zx17KtKt3/G.!]tWuj%5q䙯yέSp1k8\~~upw.}d[wNݗpYt.=ݒ.%P8ӲY>KK.}_oťj?a~u]'_sZ 5Eq.ݺ҇.|é~zOgKΜ>7'_8.}ї!]zaI~ubs5K.xO_љ҇7O~㎶KC'%_g!ukAե[Auuժn::s_y}7G?y\qt?}K}<~..P~x#5..ҹҥ/ Wq-]:u|c}k[YWMNGK^Kt­Cg77>.>KKvSyutח̛u".=x3_OߥK.]]>o_q] nݗCg#_Ñ߬KױnO~.=q#כq;sҥ؟c={q;K_z'}Rӥvqgҋu~\/KNAok΋$q>pKj]tkӥt>t?t鮧uK'>.ݺHtGuy~u.oaי36ć/[NJkӝOJwuS\~}bs2><72ҳ.}ӥy Gί:2NCtiZթOrpҍϥK/.}X>ӥ:s~K#]u~t֕ҥSYwkK C8n/qt3gM7m\c^N8'nq]V{tG5g3ݓ7~g}0]y!^3OӥOeҍSK_g'Ik;y٥[wйcPKOwNrL\ѵ'q~ԩ'<<>Nit+K/{|E:]YXYgC:Lw^}] pݱ\u3/q9I?~ܒ7ѥg]WzKz/zZҽҭWK__z5smØty:әg~w70O#o޺uצSuC\y/{oףҳ{tCK[E^WNz,^?i~>it]:[߼=z֩Qc].ҝtХ;Dzu9N}^u~sOn9]Sg~_w9yͺ/8ftsΘ@ѭk:tN}|KN8KouǸ?ҍkftҧݺ/:.}n]qΥ[k~sCgN^??;J~Yg \qtǩo}헮S~.ݼ엞uy1{aLt>sN9y:}_:yu-K_.~O_zӡSszuw~ՍnYN:n}sԥ]q8<~c<ҷ:u[?\:#ҏn?ߟӥ|.:c{xytGt].}Y ]yԡӥ/ҳ?.}gz3/}K:scz}-~'׋x_qͼ~eԥ?ۭ8/Ƭ_:CTćӡ[ץΉUΤ3f1;]XԩR>.::һ;\z[WCctyp7K/~K_^|qg֥ft靼/=LrzӥةA?~Y/ct3z;Nd>ЭS>ӥ7s_::V?g2uԱ*gZ?.nNN^:vRgañ/ :nzĥK?;ć.g]̃W]y Kkav3:>҇G\-Х{ե5:oe|8qoGLݺ/Ƹoݼ:.ݳ^eLK׾ks}ԥwK~ۥye{MЩc]mtUN|xO\z:/=엞~c`< ];an~ԥ;Mn]LNS~B|?tEӥ[o޳_z8yҽoN˺/E~QGB֡/]_o.~љ~-u3qp\K7藞cts>.Klt.[tatחۥ~_;'0}^ǥ>Х[jC7DbҥsL^<].\7.Oɋa\xu~GxKonZ[>K~8fy_/8:Vדt3^Kc\sIpMCN^?q.?\.=D/?|\..Y\׈mN|z֯88~niscǥ:ӕO:t~8ҥK7"]cҥe?botպjt¥/+u9_m'_3utok2_~etB7w]+_G~aKw?~.~w!=].IGJ^g<.֩7 c_ҫZ<ܓy~ϳPS3 ]:~rZuם}}[W '̯9~åsԥۍ-N|NaݧKJ~q#_33R Cѥ^>>"]Gwt8.}y0ⳏ./qytaukgԍtuN~Ǯ_c<̫\/\?.]K7;]e/1&>tpt]OnSKե:{Y۫Gqפy 79u'.cG+unұ~-:u|1S'W>_ǥ['DӬY<><3_1K~ҥFu3_볟_3Kҗכҫk8֫G[Н3޺sϱ?Wg.]:K:zētkҥ_A _/pKס_t>g:=~kf~N>BC~Wѡ'W]9wǥS'*]uv1}pG.=,ҩNgt\mq?v~1uK_.=_c~:t֯UN~Sӡ>]zoUׯ9c/gd]p;Wӟ׼~ԥ~}pӥOĜuK>BǥK.zῧsљXwK:t5ԥ[gdK>.=Gcf;'>Nҫ>5c]9$׸o>t}yG.}sf~QK_\5#_u99Μd?t✸uXե&yG~~Ent]p>.~ۥS7 =o~X)vǥOvǥ{}so~\3'n_3:r"ǍƘy/qL͡cg v2:A=xZu59yK>.~t\ӥ엾t/=\/=~ ХgL~MgNҫ3A9N{߻\q1ܣ.{q܏nnz9қ'.pJ:)]I~Yw`S\p|/z✸~ҝ7tbҥ;.y藞/oφg?ﺺ~N>baynK7.ݺ 'K>ci8߸I=tίs~1u.}lNN׵o"dzfסs=:Oz:1ǥuKq꘼?sV:yqӡ_>..=*ѩy]zflu|E^}8Gǎ3-_ֱzNn^.}:]1{W5qǥ|K.wKKsK~39?.=[K'ι?G:u~_:v~uvt9~68]A.뾋u_Kt93I['9Х?jݗq<:2|DžKO>t}9qΡSgpؙ_.g}_s3;Ϻ!ҩk.sB;-?oE{KK_‰s^۩اiGs׎K_Cqsֱ2oKoWμ.|\y]`ZW:Bәu.B>ɓСSWm.}~y֌sp1ί8l\zg͡c ӱ׏3n:Ϊ+o8Ƙk/]׶㼛yqsgK/֡SW-=yEw^]Gt͋ЙW x֥OY'utχ~鬇zy_9ü~5u3OߋK|tOҥפK/UϾYUtiuDix\.:8\]?v1.7.{:Vq>un9\z/eK3yt﫮\uӥG_.=m]엞.~uguU:ɾXåg.*c'NәSjG|p1k{m.]yygs=G|c_[K^o0u pE~һyK߮XW]ӥo^uKu}ӥKϼGtK_:uY/|KOgҋc:.|\zcnG<:u.k_:q#_z~N}^Out^ ҳ:y:ǥ"tu)\`?A>&׻'Kn.=~;_Ա_N:]]N \2حqץp8t]ǎn+]uvtSK}=\z_Zӥ[@^t:u֯Ͼ[w+v헮;'/ԩ.>Lp0κ;~]y޻_:GdtGcgu}\3|y忟78.}K/=ҽ_uŸK..9.<_z:tln.=]xeuԫuK:;KcG7K'p1mN~鏯gkuL￾\z]S\z>ҭK:g[7 ]up#w~u?g|~B]dKt/:cKzƇos ƇMlaOq<ؼKҳAt'.=O?W]zn1ԩc:]utEKo8d:̯:]u_pt/ׯ0f.=/u1.}֙_c>ҥu:yV#_y#:¥K^Ot/.=.}XGQy^./=;KK5KtӼs5]u]p-y{t:uq./'v8~fRե3'>N ۖ|[ҽ^p~^ҳe?5~kѕn}~u-uLa<ҧSgS3]9uK3KNn~x=ntvt^uwſί1ҳt~wӥc\ԱSz ֩!NuJ 7K:#t8=T]zׅgpu_<ޭ;wS?0Kߨ//ҳ_kաӧ8]yj׌3s@ҥoǟ..tg/xkљSǪ׼.yq~Vׯ1֝<=Rućӡ5ućqG(c`L㙯[컱tt#æ o'yå_:q_:twyb|ts~s8u"/:p30t8}Fݴ[g~W7>B:uӱ#=]:wGtt95]':i8{]q[Ƈ?.:#ۥ:u t֥JN엮sե_?_:vI}Ytq|os|y~KNf|tMw몍K'K7=KhRә}p_zY6~r녢['_;8=.ҭcAåるps7S.wݹ]c\['$sGKۡGHgN>Iϯפuk|͢3>u_:S]z?KqefԹХgt}KN>I\KuetŕEԡ^=3~~ޯtֽХg?aݗK~;^K7_3ӥ뗲_ft~FѭEW쿾nǥ/.,׌Cnd:GCԝҭK_֙ҥyқ|q5ǥ /^u֯QnƮ_grkKN.>st___,#߾7_׿IϿ%${{OW?~Md m^l s/۷kی1^ iU˵Yc*[4 [i ܫ~N%yz se?ǃ5ϋimv0ʶ̽& iaE^zM)aJ̽خ^,=¦U sv6-̽6^Hj{`nv~bv^&l^#lZio's/2>{aa"[k]}Mcw2";d0J̽ wMkciܑ̽-s/{2 s/>>¦ղ0d6Ea}6-ɬ#lZ#lZ;¦U s/ÿ?¦"l~{=.zMi=̽v|M v{l'ar⃹s샹ױ_Y4{Viu+*r{rTQNy8#jZ W^U(bwX~}DMkADMݫ#jZ;ٝ`$Vab+8wfxijffC{=Z_l)Fbk;{-ϸ]Z$!N^%"7[@܋)L#dZ yKܫ)%jk{mX;w* ⾨p_;{ɂj}{1?^#o׷#^Zj~7Bkۋ^j( ^ͯ÷Ҫ·;~\@Vۭo <,nK^oxK/OJǿ W.,5GxqۋbD|{xi}]k߯t_qso_ K }KKϋp+5'oy{1=nP^:¥t xO#t{# }ZBZFvBvE=t{5^/f6إ*n/vEW=0N^bn/vGɨۋZ,tn_Hö/m_TRR%ݐ=/ de{8-! a+ۋΐe;d{c}梕!xCׇo2j&/ _j/d{5Sz-!ۋq ^3"+;SBת^DG+ )+ۋE9lߟbd{LhlvDR*z+]*Kv^}j%d{QN!kv^.MvCbG%d{+J :zkE~^(l{xkG&xUޙ=|`{Z^-$l/<`{Ab&`{i߆ l&`{)nDh%_2vCYJ^@SK(|S+bN|#wW,y̓ƳPݐ͸Sn ۋvU a{=\BBbCY`{!.ذ 9M ׺/µ/}*[P]bGGeP{H@դkP{1)^KwJQnQ{9{+u&Z@դwP{݂ȝ5BګԞb3?P{^-:jBP{b]{e@E j/E ,"^AŢDũ5@5Qqb2P{MګpAtEul*fkj/6WDբTj6P{.@u{ZElo2 \Lj/Gs+uqG|=U]6:V_bs@P{fګ2ލsm"}kfa+9BxM{/=i 4}M{^41Ǵð8]X&`ô57`ڗ%/#ֶpL kp_ehz37x!e@W+ +9CEF^z%T^nh/N!ګpZ^|S]Nο/D{"杢=u|@&6?5n~Eκz"BZuXQ!D{F4Z! ўVD{>H!KSh/D{!hϛXan]槢T$E{-=`)ڋARHh/VpB:^# ўw/DY ڋ/h!Kvў#=cXmnN^CWC^-h/@@/Zs1<{,S}\2^3@{`=oBB^6BnnUAz9r!wy-OSG X` n>^V@r ^,Gff5hoEKf&P^l4 h{nЫW)07og̬3lh/<Wƫ*#ݖ_d#3Ӹ&`' #3 vS+ēNjg}> [̑q+xԂyًsoꆳW !=V&j ފ73^,g g˫L'[8{u5-28{'G^kG^fZ^^m#/tbiP^j ^n>/8{6ޑYlSg7R^XLۢG^f3ZnΞAt8{5}SDZj|@^f ʴ̥noq_V>%cLDWsAg^yV@chu {<#-f/>n^[^,^f]LZMd)K`8>2+w/1{ÀMs쵉1a}e%>B1fbM#-zwe)T]^IdcYI`JXOnLZ^oq{ex%f6(f/݊{qLz"揼Lρ .5~_WY/fZ:1a݌?2M"Ef7L^* ^8=2 .ueD:^'fwOR:1{Sٽ T%y-.yQ쵉I$#fw)'f7*ftqp|Ybzo>E fKI$f/mcsbJ>1{m/^m f/bd0{o~ fID٫x^(.fy){vՃw?kmҎݧ11[bv@ד?ڤ^(&f/jH̬~Ŧ`qv7եuZM^&-{2kak,M̗h{ΎxR ~;3~ewDJsy$fGֲg sG<m"$.H̬Y`guI̼YX2 IURJ){Y,'/ز̤evڕA٫ [&dwCY^|dY Ǐ!wyx|c.\@:H̜3Ma& Mdž$f#d׬] ݃3@q3- d-d %rBru\ݑ q"dBDRyz6V?A-y.Aw|x~6H;S^Z)w%7$4Zx^~腬Ř","مK>qϵB:κl'<7/B> p3x|7>y/-a8sx2UHNjN\J09 ϵew˟NwmjN!),oaf6x!d"4w߱ |L3~vx3J Vr|a."!%p0!W>yϳ43g W|="oj[y3gܹYJt]LS9;s<]{8 FlvWJƘrԌOw^ތ;μ;ϳLRs@ϛ65y x3`n!Y=^xN@$94mKx^x(vs~ްNuΗeݟ63$/5 ~Se'i2[1-!;Jy+|M2MkkTMr;$mNԫ2韌{L }Tt2_a緭axPfbdbym[ymd Kv^擝7Yy@23Ɨv~2)"@pәXuQu~on"j9\͏:8R*Ƀp>ԹZy9ʸ:Pux:"YJ>.TY uZ&cJTŽu^#Y9syہ:"_, ؼǝjl: Q@Cx2]8GtPRΪ TyoJ2 8:Gts+nMo BvRc]PgD񒑷c`ya3g1qq~Ǭ8?<&+guL[e,\ "0w:GYAts|dt3n?L} ;ԋ`F^C'S:/6BWo΋PƠ[Gf 꼚W:Q꜏[un9չT6zFlt~I5SP@n<V~[M+XmpFM4V/o57O֜;_U)ը.Zy%od0fj*b:< Nu~mU ]GĈ0G]PRΩ:w#Xu^ZjRy*|9":k]fsb1UbM7*U>*:7 u^nY8M7<=꼊P/']Ջ]aݲ󴱲s(Ϋ]eDd癳 ;UMpsW;Ϝh Ow_!y o]9[5g7Wy^o}VJw^u.DFow_&Vw^Cu{^V61϶LM~=/Zg]a=OӁ=O=iDK=_^s9=d}Kί,2_8sܯ7,tqK]\*OM߬&oLO}^G)]i<͇ܖs-G0fӍ K-5$`Ϸ|sjSN\}3N$>bi!>$>&}))V5v*>=,FJh^A4T- ~>_:4x ץ&GǣQoW4bb5oEAϷ~}[/yWHN}C=ܴuGgz~̍sS>/P&J}^-[>ϰ<%dsPrJyrpn7}^헽y}3>WHϻ) :c ǩ#m}nE}n)+_u`Gnv^iQurLw7ebLX{HTGc}7"}+ye2UC~ERw⾧>/ݷ wep@?us[+M]z>} Cр2d4IB$C{ϨB2׳VfFFyͻ8.ڈ#5y?~^L̿~npƇ~޽:7,+n~xyGIyac|4UDN\/?(}9 ϛ]q9Jp*Ϝ>r}yu?|t*H? Ty씽Nyw^_9?t퓯̽ 9SY7s+p潟9wN]Ep_+gi 'q~[_&@ovߜߟ<N~.~a}f_Oyf~"?loV.fs/#ȅAy5}ğܹJ?N83MCՍn)teO>3ǟ?/>6=۹J>xŸT3M(NIq%Qn~ԙ>>Ns^O3^vviJJ(^Gn_@y{:)6uR qթmU0@~Vtۑ˯gwxe+ OO/x,a9ՙk=]G6*X#J"oҸ~8V-P#hG PFS _@.q3s?y^9somJq H:aWb2^wVB[|Z\M>_j.v+}~- ss&MWgK`^ OqfWi;JO05Ɯɂ=+E>]۞_u9Ad xs4Uu~i.y3Q G|NVތ=_kti ^RtTL[qGV9귪O#Vib<ތ{U볊^[VÒ+9m \|[T*7m|FUvÿ>7@|n`9Y-Z팙LQls/nm tkiMyf*-8&I(E##=o@~y'6Ř vz9kSu6=7gz)4uE U#ٛ=KOUy~ @痲|؍LӀx* Aݧ!ß'<Ly޶,Q\fך"K}z~|j'ui[xX|,''MLt<.tv]Aߗ9aߛ$qТY^:Ra(kE,7 mrc7CYs0ccu7ۡ6iiY5/>oËksq%>WoϏ?\_|0>"Ͷ;s}Zkv1:G#,o7G|ny#2ϽQ'םy>̪:sY5۪.'cZngM1:48ޙ*|-{7{gqSPŭOv&czÖ{]Wj =i7/i'ϡݵ;7YCgq+<'pSe/z~H[i!ERQx15 $?[jRtv+ 7fqݧf߁t?蹝/V-n^>Jg8Y=g5meK2gvyuA|DŽ$ D;5tV7kyPs;4'8]S0}G*3J2qnэ~q1ε mvCBϗPyROUwnt(2(-RJIrlAH`㕓oy4' ۄ˒vߢxp4'f(iKynt\ <#&WE8(MS7<yަoլb<lܮ?%KNyly~;ff禁)ύ@۾#ϻrNrs,)]$su<ly>,YfLTj_y^mZkK3^orQN8T˞iϏ<7Iyn}\e9u znAN=⚿yR_cĩLs3R})ǥ^zCϑ6(UA?U4Ƥiayq,zA@7Wޯs *wy dX9IO6\++=g]RO6??=5)zQzn}Lc6ƞ Fzn^=Ġ)|DTLw=g42q]:#XI\:Xۨ)'q)7ƞ{2\O&=c칾 zh^|I㧝4 4ZAe:+ĞM&Ugp[D{vM;}b{8 ?Qױ'c+ $- 㞛LMU圓{~37̵+qKINs+֤w2=?5?칓My#j+gҖ]\{k`m1YܕA~nF>wm>׷͜ܞ@6>}M|n_!3}n4|6UX}Fۦe@|NQp">1A|Ͳ\&>߿{}n1 yqڦbs9x;u_vysl|>voy^TU>م볋SxX^_Yn6>~g6.|dX|rG_.Nm'>*WYFS>'|n'Ϗ=,}.>*3v4.FczЖ5 HʢK?{n.4{֞bZm\ʼy^AWzn*=6zNW.5ꚥM/J*C)(zd?9|bL ߗ+Šm/26=~Wwp q kRk.FJu6] rWׯ/=(ϳϖ~]VUgJsǦ$')cAltJt ϧ躕tMCOrx+o?Tg#[X?}zUg^{wWǞWY݇-Tg>W M>^}nZp[]N I\kNyWe7t9sS>]jr *,^@s>׳ϭUՕoکN_[t]+{2ܚ RJ돧61x~1Gjή)]'zN|j3e՟o].>RV2PP#>:ck+X3|~uYZUg.hǚ,Kɱ52tz3-Mz~cu  =?9??I|yŜ|5㟌=YG~yeW0P8wy2Xjno)=|J~X¿R?6z?Y[\R^i:ɬ/Yx*H%߅9rv))Δ"_2;ř@y2\թl1fuJ-o,O# ҔKZNo`p .߿sg*՟+Uj:ҊS)=ﶡNRz,0|-I=yQ{4\(z^VIY{>>>>R2MRһrT*=fy可ܣ-޳|BǗ;9e*üܢ緔si9̿z~=oRsfqK;c!zNâ鹩sۢצJ9?oIѡd)8m&c.i%?rOW’ŘCϗk~^u<"-MKs/y䄞'6 n|q >?Bi_rI.$'G?4X ?^fSc82yƚKh~b1 s[%)tSfy5+ɏl|?\|>.' O_BY4[x>6D 緇5o-+ y,=..z>}O6=oRFw=wqyQ彸sq.wKY\p7SEU{G=7{^:ӳ9~󆞷tdrϗ9YuH gES߮ +Wᗞ7/盞ś|>#nϧ9Ϳry˅?s姧ñ|^Bϧ+5!jO[ZS]>͠ > }^s">TV9mՠ>#2hIJGv1[NGR|_ZwOH>Upl8A7Ǐ>s>9[=,pxq|ʫ&Hx֬PΘ͚!g0ǷocC}}ӹ+>z{{?꽂ϝZ?5=`ٷ{Dxa8+p0jf*Cϭ=G굒έ2loMgL ׽&r o??Iʟ[ջ7paN$_\3?oViXПGޜ^/k;k+nz*0ކn[\zsfǯTivMTGoN:›kÑ;yi$* @?X-n*V5^iu|8 nn,:KL!< @9i1Aĺ!t4H>$֬qZ.AYak!݈- uQ~> c/NeaH*w Dto_zUC/}>$GrnčgDغ!v)n(}}[AgM^$ ?nZt Ao\? ])A?%k?t# ͼQϙoA;<}qAsɦ~%B! 2 ?OCι^sZWo#\ r-AoFI=~n,nEЭ O=Vcp<69os~s}篴_n'ތ) AoFIЛK=nK :WŏV%)'%KЛz T $%s/M~ DU󸽾^_IyK+!YկFP*At[0HA/r-ϏM̿$A=Ɵ>,9ߡCЛMЋɣAibnv7|]1שIB]PCzW%BP?M!CzY|̙V`;BeNy*tO(f BWیa5Wc0zOk"+*u߿e!Ps|,9X^!\N~ѠA7|S^wgzoVDL9Sc4M{բY;c׏A$VZʷsZSrնRO^oeSrS(}aqsoNvyslx^ɢ畎A_~ůvϧ-?r /A/ 1xB~?O?q˓ِOAStݼU sHнٕ^ɡs#Zbn {E!!5)ytt!!2""F i7)vA}?%.@)Af{Kk|YɟE lAoE!1{Bi;}JkKW|S+gLt ޽VځTM5D?]u/1 GO tJ?Ƈֿ ɽsǛs:}ޤcKIu׆y!Ы}Bzhtj, u=?K;UoyqͭNzҧ`0dZw&?w'swn KZ&Oyx<`۟gw2ܣ_Nb]ZQ2Kj ?oCW9zzs&@Nz0];jW?hfoWfysօ/=:u$cZ O+Ȭϝ}:\*PJwG,"ܺ 越xi-p= ~xkωH~4CSD>D^O_>@wl=hݗ@xeW6:}lÙ7Uu65i}kNɕetb].?O!&>~2w|s- ),Q}Zt Ϭ:8^Q@:}+|[>s+ۚ);>~~m~*-@w*@w@xnzuRWog-iP@|4ݕ "[5XXBK:@,X.6uF*ЍUW6)SVy7]ֻLSN> t=*ޝ*JF"obM;!>sZ-1}x}-dN9Ӈu$SNDcE膰7[-7Q:%?X7. O.t7+1~x}A1D_>@621.-[l]s2 ׻_U5_g"01}QFyqBeΦOyuTv5Q^?/|њy+CL:㯁[qok]54\Z4sVʠ1nW1^4L-*S'mS_\k[gJ  hSنih' zDӎ dkWmsc޶i7U#gzi=>MO_ϴ-є??z.}h),>%2 CgUW sC'_)`ǭ~;dҙx0܎!2ayْ*^)/CwSQ%0a ih}5"0cO{5;%M9{I u/ݖNf}0a^yzCwsW~UhvW>i43Ku~}vUw? Y6q_ }QczUд+VG/[M4'B7sVBPZmSO!fzhVz*dN)bxeb$B_}>&Nv!tL!t6"tBF.]l]u?Ǚ{ۻ oSǿ_? LOr_}㿎?WI{oJKw3DZ}Q7.3s@w&ckW'{dK.!t~WH;-AaCYuQ>Kwqާ]MPNjƐϿZG}Ӯo ^m-RUѾE.vN;Zk#/*Ќˡ]>ѻ:Imaq/'߅ЮV7ƪ}H~{9?w t(W$2JK>w֬_{W?.~vn6v_xz_R}zcGjk؈Llg7{w!*~W;s&}ѻٯ~R0]jhNOp0zgm\ލDSpvcJa_~7-~g.bSx6Ͼ=Y_?ZF7U] NA9i{G+vewQtQАnry&ۿ+yg7[^tSПU!=]i/;6D{]F?̼Т̻/ȼ8%d?¼wr;qu .k}Cͻ}1"03]ze'V^#* ޻tbHyF+P9>tW:rhq-߻Jw)_I^ף@:!Ǒw`/TTC^w#! kǏ6|C=>>9W9v|t)n̥-ۈw=/IE|w1ֈAxG!zݟ}#'Wpnhnku vcg#]K?R㰇Hxwh"f!^.ƇU#]PxMG{wkuڌT ;Lؗ1+]TƆ}UwAw}c;!Î|W$ŕXqHuQθ|rP!y%&{ QP 6~؇Yf6D x8ߵƾK}ֽ_dw'1Se$aԨ! z"m4=Gt nbaw&!Zi󝧀ߕQ](훃¦TbttḻDaQ1K Nj w~4w7>{qݚO k0]`nYS3e,8 ]V ϟpoAۣBϮKHR=N{n]Pv_pNv'(v~rD+]b-Z{ͲNAݔw {3v+F{tWhfvS9jltb+ɇ90wݯӱ-FIe1ò, ws"x{''5hndU0P;b- ;MX$0&¬W{oXVz|tcLa^  |şqQ|] }ݗ] AG{Hm\71[ xݗ{v݇|q]ȺgDe^ 3`Hڼ9:;Wロ]Z{ i=N2|YI5̺*WY{b>9kXHjJ<лr4q[[ڭьV+Nꮜ doA4_]E4k`2{2u=NAĩ[JoߺWR}!u(ɜwet(. 舡TP(C4[ǚ&ֿ|=m<v[2:K;0mw2q$Ajzxy~ra;=b/{v]=?=wkcP%y#yx/ߍW;;CX:|wꗫ?B箦sʇ9-U˹ҁ_*ƹ _B'¹?w7o欅7Et[ankln`\s,aͮ Q〚]4n^ \)G ffv ܗq/ =h1ׄ(y&%1xn4Z({O v2NQܽ3UwiH9i(.u iGmy ,BaNA:Mo{Oj@q\'~*fߢZvEqo:?ڨ XaGe{Eq`o]ǭ}ÕްߎzCFoMB~T̾wt xߚ{ovoC}06  .YIQ ⛥h|8{G3`~Gs :?:d3k`ovU`Ս 2;b-F.J(%G4b0kxɜ q3}|-;cO*R.n$7W0JQEΏNZợ)ME )33t⠳z }3$P2U)_~]bZtZ.ΈNwhN1XW^;ҡ#!u fB* #t]@Z 8Eq@t{!.7Eq'( _x\4I 6M=h5E!Mu#rE:vzrݛ庣uuMu:S4߻0LQ\=z3gq(W(Nk n_(\@Ű\aJC4}+o{W(sKm}V/v*v;HvmeeCnaoJWvۺ:?:Afm-5 P[$C|Kpv*rOw.Mj[[nRkfWbқegu>ֻd?ќ!Z|T PMtMA6W:f۟]wmi{w-}|w/+BB6E$ߦZR{27ʹΓY⋠Dhώf Eq:?Ukh{QN@n!"g{WN+Rl(Eq 11Af;GY8e0\ZֽcXY%X;PVqݨ0I&Jv/zHF5.~(>T`)fWxm/c:)>?:&8]Kll/y['Xq^z k(._غ) LY,zUrza ;l ;nm[7.1I: 0X- +[Y *XKu@Z}徘ֳʫ^_jZyTԗW-9Djcr9\Rj_{ ҥ⫙ZW].񪹣j1`1g}RW.e{U ëJƫg]ƫJUmujݸل>LXNjy4V« 5Ks@xUq:r&i4KsX5؄}4_*: sU-īWF^lxU#/=aַY[\Z׵7VU-\emǫjjW}$\k+|ѢxrU1[:yīJ,iU;:x:DUWe}?^u\ ܚ99UUO&z{9!xPcEY;By'(^VUƭQ</ jK}|2^ٶWMhj]񪞮BZeU=ū:3ҫ>xUfxթe^\H=Rf*CQEzzնEjUWL}p8۫;W kmm^ճ ^T)WuWW/^u\Ksƺ4Y.ӫޛƫ^ūmHVЛM׫"CovOnxUj񪽂uziW}Y^Ոx'׫N?xCPSƙ՜߻=6EK(x:U~rP47Lǫzj c^rJHPS<ҁUQ^(]Y&0bb47zGnW5r=^%RUuƫ PM{`ч|u@j]D޺īW^uqW[xU^&bJU7"~௚?z*p'z47vQ$ߥiUqzաԫ j:3*t'gOLVūV)^4w/ǫVWū&ti.۵(;&W-Nr4w+RUIW*KsV}īdIzgdi@x;>M5^lxnfn-ìgjGxrKWͷ|5zUxW˃90J6~:.ƫbW5/xUβRNxU 9^UڻƫWH>^utxQ/=WBUYsӫ*Wj!4^ c/t?^Փ[j#WM"^uz}lޢ&޺Ho(^U!R^576xU xUoNU?QmZM =Q1u: ë zgފ{FUǫzW-:jo:?WƫWӏWuwW٧zUoU07^8gxU?xhgD*tatݟMg9mP2ə=>yEЫmWhx{\UYUӫ= ǫňW9^uܒۛz@^WtiNmoW{Vz;tf?A^4"{N }lh.wQ$),$9^g ?zURo~>^ս'jL=AT6F^:fN~%Pm8NӃ{/͜.sj׫V!_Eΰ_{4'Jķx|FxU:U^W2ǫ/l(|[Ϧ3-n]!^ UW:EzyLNANZ[/R> JUj8^FXxեzf>Ų&P ?OYڍ;X4s)_N^#gl 4=HVzUgWj^>^^*j^:RjO69]:.dz&TZ}s.V/|5^\|Nb9oͲRtxϦ(HxRU1W:H񪕍]34Eq>+^>ɜbj/6PEjNA"zyscAo<(Ӂ=JP=J$BwƏ s}{5fUkjYW"5n*sWG+]s+zU5Uǡ\rWӫV4J(것ŰJ}t?jE}k«vU:ӈBWHiU5o^ltCj^v zϦ([x2>xUKԫVt!Cܚ =^U xX䫹nH}ASEq;,#rnTfN7}rJxU{/l"x>7PxUVնWfκW}| '%^o[(RH>6c<WT9([s{t9Uǡիf%/Т[٧ëZnjXVj!*'d%]f"? 7!Z.qz)+Z^iͬnD|q!AMVol9a(UzӃ *iϷ^՜EqW%8Eq]1K%xn?x~kQ\WTpWv|0ZW ի;SWRJn>ݒfYpzU+%q .ٺ'RajU0[wkUOU$xW&cI-#%UWԓ]vSfNxU(Uz{tf;5WS9K&+d9: U@5(8T,g'f]o^ǫN6h. (x@ONQ\Ԝ*uNxպU+?W>^u)HJQOwT ^aCԔ 7a*Rr0UOLq.-Sy՜TUtfU׫1~z7U ƫ>ZwJPsԬ̰&ġ}*Lz8\>zރxպ5WH^~U4zUvdʫBj}xw8UBqOZ84j*RqUوcy{zwS`{UJ5Z%^^'_:xm?aON)YY*|G]U|5KMG u:La!4ܢVǞlzWUAżzv܂Pԋ;!yr~-U Wux{ՔU>sV^~v&3Er $'~4kUùǫn4]KsCOcZЫ| Ks6_*7WGU~6-E- 3a!yfiNzvCnw4wiN 7&'Ƨ*s0a9䫹[(s|Ui*_ yPPل~V"^J0wUy7n ;Ü#_k+IΏWuEzr ^ÚϮ]ԄW,u_zU!ܩHM-A y^l+[Vry}l%whR xUEj 5Ksfig:47TԹLjELE\ AKGUs0n-JxU^uFjP:7~6vjwYӫ>rݛ+&)Pk0L"boL9}h:~#PW^5KsZU+Aub*[czնj*G=FUG$ΰ_UƣWTW^u(' 0O׫[`'NХ!_MxUo,WEy&A(vz+CU+tp櫺"jW:U/o7U@}$`F^\kJ.g$#"~htf[>uEjNA5͡.GU?D9~o/?9xKsJ9{JJX5T$䫲ƋW>>g`Fa#_z^j:hjէkʪVrԶUo_G3/y=_E^XjJ7zQB_"|[1tj:3 %n^D5|Ɛ]@xU˫WfU/ȇS6x'_b9V2$W-9əU6kI<؄%ei䫶/4^ղU=W_ë^TU$_xxl:?@5琠~tzk'1at's 7L]^4%:UHHgU-Zq]U7Ƚ6hfU-WQjJʫ/ǫ΍[̡2$YVʩZG^zUw5JZjVZj|*r깁j fxpWeʳ*9Ji@5W:cJ+ԫjt5^/]5_JGgUMb*zW'zxѤW×2oƫŕtKrN/6xc/+uzS{nx0LBpi&P@|_Rg|ˬ҇ӟ%'ǫV =^ ^m^p=Z@5}O%I|2|1m\`W]m&`2xJ2m\-RsQWf2şxU3R_jH|ř?^uH,szt׫pdj&h!_%YT67 _6_Wm Wn{>_gUK>F< S9'^IjNV/96(!>HX ifWhUyEzզuxUc^5xU^@x:AWͻWV-^TUzUK^RW"_UW&_UUFS^zUU|2!Jj:ҝ_G 7]^u0zӃhIPGzZ s0VtZ׵+TW5UfW9] 3+z<mUjx{5RW@5}96h>/j&5WEfūK?^5( D |57yUaP͕fNW),7&_Ք[Uz7^JoSxU Cz U4)8}UY^ OUퟀW܂l֛ì>櫪U0kլ^ūg:Ts ⟊WUxU=eW~܋"2fMa0(] *_QU3"_MU5MFS=zEM9&W=43(%PM =6h:Ɯ^ xUWÖ&xU6AV"U]+W欕W&^Փjj{ǫVo .īĉW-P^x̀W=wkYjV^3-|jTg׻x0ҡ'_}j*U+5BUx ^tf>S%Amy ^6V/.Wp^F CdwVÔ}Wz+RxeWiYJӁ[WǯWTU')GNE&c}KxU Uǫ WZxfE[n{񪶝W[z9|9]s:ɇwxHիڐ*^)xU:0U-:^&Y^uhxn5kZ[c2mzw-WuY8^8Ӣ8.->^uW4iQ-s[j$njqiCӢ8ȁ3Jim}ji֓+ݔz r=V/񪶍"_ūZqWu)|U[U \H|U}' ,~UPTU^N|g>9ͼuM)mգfͲzUWʫ֓{iE=x8JSwql,C+_5/!^u4>2UQC WNSn7_>~QV/s ]87}e9prp{@Yī:gǫ=M/^վ#MG]jwjtKzOf5W~ NjtUI}XVZWX)p9z'8ivMUЫWSǁd,:!ĮUOŕeyP*%Y,͚TkW0( 9«vf9L3aM𪷩U]ƫZof||ۗ0P<1UuP=J<Sr9<ī˿Qf&|1#2I(WYtEqmGY_xbU6hhX䫞2RU?UE=䫖8NQ;_ $,9]^0^UAy«>LCNYs 4_fYTSWqٮat 34_ѱkV^HTjnOqk׫W-'{҃(Z|բ'7ۼ-C`/Z[Ojtwk=>5_t^UԬbGs_ZofM^;_5fQuz's˟@ī \C86 q>9׫.F-W|SKb0Ô_zr'_դi*;zUzղoi۞"mdW UիN!Cߢ0Eq׫>UUMadJu ^"5WxSǫ3|wW6^jg׫>Ϧ5/HV@5(!H@!U:լWzhtpEw-4ā#5͚7ƨaڠ={jm"Moxť|c0s铹dU:82xպ!aiSEWe5Bʫī>|U'!ũ˫WEqc$_{׫NZ^UQW;^իiQ\zUU%_UO |xؓ9]zzON*ywo꿧ojxUW5WhʑU R|1W@4yq 474*?LXRa~4G¦^Azsnq4d"ju~j,).[]Dvj6bUMr:,qox m4ل"4^usx?"x~/U"5mx /sU+lwfR? K0^5=WW,3a%FzUUexUzUe :/0_~fi2a3Ksfv3U샇(:^u';[T뜵>W5 jzUv6@Y{$,͙lM:ɭ2ƞM0KsmVrU=Vꐯ>;:*5_Z)JarxU/W}$TCS7KsO0^՛1*zHPC+n5WxU˫b'PyJPJxqYkWSΟ9W5|ն}v ^xUgzUj_k? #nMzUuVr!AjIW-MCc#^uT*hZɑU"r05}ONaa>|:70չW^j=|UC˫-zJP yYEE }f9%0^f"U/DǫUѫWjY5vI9^u0iMS!\yW=IP nmnBMj]{\cP5_rxU;_Μ`+SJjg/>5^*9}}WU>^?[xnz^xU~Eaaݬx|S?^վs۫:̏MSƫj֤qlīVijNxS^)W0v4cebl.,iN3#`w4kt@BjW%34Ks4"kqɤeinP:*Sn1ŭYV*:݅4*lB9h 2Ҝ ͥGȧ[VrKJbI׫Z?S"5MS1}s\ūzu֫W6.Z~_<9^PxU'&;_zUT8 6ի^8݇݁LU5 +p'J|FuRU> *rWPPޟ9^ƫīZZxīV[u퐯j P8x'U f«u^Tj}9?TJGtx0WI*,wUG7"|%_={o^ɜ?|U>ȧ[GzU/jrz)߇W5%4*^)yI).2#RMm{Wwà,(u-HZդEʁA 7:!*=mƂWxUMs鐛!MSuMOʘ;WUmWC^1tطd8~ׯWUIU鍣W&^n{;_RJw}pZ)W=^]ǫ@[Uz Wfo\M|q𪧌BsTu0ZaQzgdHsitCs!\麚yo/Wel9핯|2I#/."5W:T|)RUKf9^UtIXU/ j*_ʟp "QU׫V?7ەk+^`5w2xUW)'_hxPnq8LsAOūL5ktCzHfOwëꐯ&uJ嫴A^OjM';AWh<^uS'ǫ=pjݸU]WoJy.PMSxNwPUzDu«PWed{xUs'?Hū:>DW%lv^Q<ǫWZa֗&u.7cxUjGxUR|JGҥ^P< LxUG!v y(z4tNmpë3WW'^ѺF\_ij76jŞ(%9JSȧU02_i0UmW=H嫖f͕覵u? ^CEJb]͎+|Uc*_jNA'Cj 5UmG;^u4.;_׫“Os96p҃C SܚSszGNw)_M%nQ*+^5W$l$qUgQUA=9n ^~#ëWu9m^jWyҬ@UtٛDǫU@5X,gW_ W%_eYaI=FӾy(ZxպeW-WۍWcc:>^ڃ},^|x{k(Y"D Ŵ(nONQ\=>|U_~같v+:ĭDV/KLC"5W:ZOEɝU'(2|%Wuo]4lZ"t:yī"z*a97aN$^w*^)|B}'(NR4(.pz]ꦾ֝aj^`unz{Ixպ߈WcU"5Eq0RUojO^ҙ`H"jWu*hXB0̲R]逓)W=4zJ_MnƫnE)6!_%eQ\Y*jɄN^~U«NwR_2.rLT Eq 9gY|?^@3/xU{-.@6h>Q R2[[Ys *z|Wl{UƫޚӜ׫Vk9L(Tʦ^UYWZ7^zU5^ Vkm;W5sjbxU&We]fH'zՖ:Gmˈ{Sիz \-fz(5a;;`NYPJA%<\ ^WU9C}BJV_UzU9Ы"0zU^v[mCq^5Q n(UCJdar{ʪ"1 /k-;}/_o?͊_g//?𿷿ܝ鏫QFw/F#+n7!{o} &|G]8Hǽ`aq/G}E}.V c}Ër |pð?߾wGq?(u+Q !9Kdۇۇg+QȊy.VYE9낯zG=0ϻZFΜzs=p7>$TQ++QcPo~w) TcczV=yPvݺKǫ1z`%2 ~.2ͮ _wAz/;Dx{I f1?n(JW:,oA-ܛi ARkauu~]uv"x_%]yl%<lT| r0'?:Q5s=;jqns/vs9Oi-3ފ *56LPφI/!]iNӯkuv4]ykasutp>w !ܻ]C ~N$#7xF$-֔=m{0a#h !َ'C :5)kHRװpI<<Ob/¹{)&(1mCt'f;E ALGNَF!htZx2ʩJsOܽ'^w≻?O$+ -x'޻mMmf;Byچ/ a#})j~ʝ_wu_wu`w 3ނ'C/{) ^lG(/A!vO:e- ̮I\Ó½'{ O 'IR9h plG]  O;e-,w}X8pI^Ó½'sdT<QO GvxaBy؎/ A@(/SV{W\ݫ^2{[Fwzʓc\}|I4,,z%,;k" έh%$uCH+1HD %A^B:"u@09mSHru+> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj xref 0 11 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000256043 00000 n 0000256126 00000 n 0000256238 00000 n 0000256271 00000 n 0000000212 00000 n 0000000292 00000 n 0000258966 00000 n 0000259060 00000 n trailer << /Size 11 /Info 1 0 R /Root 2 0 R >> startxref 259159 %%EOF brms/vignettes/citations_multilevel.bib0000644000175000017500000002520513625764732020317 0ustar nileshnilesh% Encoding: UTF-8 @Article{brms2, title = {Advanced {Bayesian} Multilevel Modeling with the {R} Package {brms}}, author = {Paul-Christian Bürkner}, journal = {The R Journal}, year = {2018}, volume = {10}, number = {1}, pages = {395--411}, doi = {10.32614/RJ-2018-017}, encoding = {UTF-8}, } @Article{vehtari2016, author = {Vehtari, Aki and Gelman, Andrew and Gabry, Jonah}, title = {Practical Bayesian Model Evaluation Using Leave-One-Out Cross-Validation and WAIC}, journal = {Statistics and Computing}, year = {2016}, pages = {1--20}, publisher = {Springer}, } @Book{fahrmeir2013, title = {Regression: models, methods and applications}, publisher = {Springer Science \& Business Media}, year = {2013}, author = {Fahrmeir, Ludwig and Kneib, Thomas and Lang, Stefan and Marx, Brian}, } @Manual{gamlss.data, title = {gamlss.data: GAMLSS Data}, author = {Mikis Stasinopoulos and Bob Rigby}, year = {2016}, note = {R package version 5.0-0}, url = {https://CRAN.R-project.org/package=gamlss.data}, } @Article{wood2013, author = {Wood, Simon N and Scheipl, Fabian and Faraway, Julian J}, title = {Straightforward intermediate rank tensor product smoothing in mixed models}, journal = {Statistics and Computing}, year = {2013}, pages = {1--20}, publisher = {Springer}, } @Manual{mcelreath2017, title = {rethinking: Statistical Rethinking Course and Book Package}, author = {Richard McElreath}, year = {2017}, note = {R package version 1.59}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://github.com/rmcelreath/rethinking}, } @Article{wagenmakers2010, author = {Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul}, title = {Bayesian hypothesis testing for psychologists: A tutorial on the Savage--Dickey method}, journal = {Cognitive psychology}, year = {2010}, volume = {60}, number = {3}, pages = {158--189}, publisher = {Elsevier}, } @Manual{bridgesampling2017, title = {bridgesampling: Bridge Sampling for Marginal Likelihoods and Bayes Factors}, author = {Quentin F. Gronau and Henrik Singmann}, year = {2017}, note = {R package version 0.4-0}, url = {https://CRAN.R-project.org/package=bridgesampling}, } @BOOK{brown2015, title = {Applied Mixed Models in Medicine}, publisher = {John Wiley \& Sons}, year = {2015}, author = {Brown, Helen and Prescott, Robin}, owner = {Paul}, timestamp = {2015.06.19} } @Book{demidenko2013, title = {Mixed Models: Theory and Applications with R}, publisher = {John Wiley \& Sons}, year = {2013}, author = {Demidenko, Eugene}, owner = {Paul}, timestamp = {2015.06.19}, } @Book{gelmanMLM2006, title = {Data Analysis Using Regression and Multilevel/Hierarchical Models}, publisher = {Cambridge University Press}, year = {2006}, author = {Gelman, Andrew and Hill, Jennifer}, owner = {Paul}, timestamp = {2016.02.21}, } @Book{pinheiro2006, title = {Mixed-Effects Models in S and S-PLUS}, publisher = {Springer-Verlage Science \& Business Media}, year = {2006}, author = {Pinheiro, Jose and Bates, Douglas}, owner = {Paul}, timestamp = {2015.06.19}, } @Article{rigby2005, author = {Rigby, Robert A and Stasinopoulos, D Mikis}, title = {Generalized Additive Models for Location, Scale and Shape}, journal = {Journal of the Royal Statistical Society C (Applied Statistics)}, year = {2005}, volume = {54}, number = {3}, pages = {507--554}, publisher = {Wiley Online Library}, } @Article{lindstrom1990, author = {Lindstrom, Mary J and Bates, Douglas M}, title = {Nonlinear Mixed Effects Models for Repeated Measures Data}, journal = {Biometrics}, year = {1990}, pages = {673--687}, publisher = {JSTOR}, } @Article{wood2004, author = {Wood, Simon N}, title = {Stable and Efficient Multiple Smoothing Parameter Estimation for Generalized Additive Models}, journal = {Journal of the American Statistical Association}, year = {2004}, volume = {99}, number = {467}, pages = {673--686}, publisher = {Taylor \& Francis}, } @Article{rasmussen2006, author = {Rasmussen, Carl Edward and Williams, C. K. I.}, title = {Gaussian processes for machine learning}, year = {2006}, publisher = {Massachusetts Institute of Technology}, } @BOOK{hastie1990, title = {Generalized Additive Models}, publisher = {CRC Press}, year = {1990}, author = {Hastie, Trevor J and Tibshirani, Robert J}, volume = {43}, owner = {Paul}, timestamp = {2015.09.07} } @BOOK{gelman2014, title = {Bayesian Data Analysis}, publisher = {Taylor \& Francis}, year = {2014}, author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Rubin, Donald B}, volume = {2}, owner = {Paul}, timestamp = {2015.06.20} } @Manual{stanM2017, title = {Stan Modeling Language: User's Guide and Reference Manual}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/manual.html}, } @Article{carpenter2017, author = {Carpenter, B. and Gelman, A. and Hoffman, M. and Lee, D. and Goodrich, B. and Betancourt, M. and Brubaker, M. A. and Guo, J. and Li, P. and Ridell, A.}, title = {Stan: A Probabilistic Programming Language}, journal = {Journal of Statistical Software}, year = {2017}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{duane1987, author = {Duane, Simon and Kennedy, Anthony D and Pendleton, Brian J and Roweth, Duncan}, title = {Hybrid Monte Carlo}, journal = {Physics Letters B}, year = {1987}, volume = {195}, pages = {216--222}, number = {2}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.06.19} } @InBook{neal2011, chapter = {MCMC Using Hamiltonian Dynamics}, title = {Handbook of Markov Chain Monte Carlo}, publisher = {CRC Press}, year = {2011}, author = {Neal, Radford M}, volume = {2}, owner = {Paul}, timestamp = {2015.06.19}, } @Article{betancourt2014, author = {Betancourt, MJ and Byrne, Simon and Livingstone, Samuel and Girolami, Mark}, title = {The Geometric Foundations of Hamiltonian Monte Carlo}, journal = {arXiv preprint arXiv:1410.5110}, year = {2014}, } @ARTICLE{hoffman2014, author = {Hoffman, Matthew D and Gelman, Andrew}, title = {The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, journal = {The Journal of Machine Learning Research}, year = {2014}, volume = {15}, pages = {1593--1623}, number = {1}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.06.19} } @Article{betancourt2017, author = {Michael Betancourt}, title = {A Conceptual Introduction to Hamiltonian Monte Carlo}, journal = {arXiv preprint}, year = {2017}, url = {https://arxiv.org/pdf/1701.02434.pdf}, } @ARTICLE{bates2015, author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker}, title = {Fitting Linear Mixed-Effects Models Using \pkg{lme4}}, journal = {Journal of Statistical Software}, year = {2015}, volume = {67}, pages = {1--48}, number = {1}, owner = {Paul}, timestamp = {2015.11.13} } @Article{hadfield2010, author = {Hadfield, Jarrod D}, title = {MCMC Methods for Multi-Response Generalized Linear Mixed Models: the \pkg{MCMCglmm} {R} Package}, journal = {Journal of Statistical Software}, year = {2010}, volume = {33}, number = {2}, pages = {1--22}, owner = {Paul}, timestamp = {2015.06.18}, } @Manual{rstanarm2017, title = {rstanarm: {Bayesian} applied regression modeling via {Stan}.}, author = {{Stan Development Team}}, year = {2017}, note = {R package version 2.17.2}, url = {http://mc-stan.org/}, } @Manual{afex2015, title = {\pkg{afex}: Analysis of Factorial Experiments}, author = {Henrik Singmann and Ben Bolker and Jake Westfall}, year = {2015}, note = {R package version 0.15-2}, owner = {Paul}, timestamp = {2016.02.13}, url = {https://CRAN.R-project.org/package=afex}, } @Article{brms1, author = {Paul-Christian B\"urkner}, title = {\pkg{brms}: An {R} Package for Bayesian Multilevel Models using Stan}, journal = {Journal of Statistical Software}, year = {2017}, encoding = {UTF-8}, } @Article{wood2011, author = {Wood, Simon N}, title = {Fast Stable Restricted Maximum Likelihood and Marginal Likelihood Estimation of Semiparametric Generalized Linear Models}, journal = {Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, year = {2011}, volume = {73}, number = {1}, pages = {3--36}, publisher = {Wiley Online Library}, } @InProceedings{williams1996, author = {Williams, Christopher KI and Rasmussen, Carl Edward}, title = {Gaussian processes for regression}, booktitle = {Advances in neural information processing systems}, year = {1996}, pages = {514--520}, } @MANUAL{nlme2016, title = {\pkg{nlme}: Linear and Nonlinear Mixed Effects Models}, author = {Jose Pinheiro and Douglas Bates and Saikat DebRoy and Deepayan Sarkar and {R Core Team}}, year = {2016}, note = {R package version 3.1-124}, owner = {Paul}, timestamp = {2016.03.06}, url = {http://CRAN.R-project.org/package=nlme} } @Article{westfall2016, author = {Westfall, Jacob and Yarkoni, Tal}, title = {Statistically Controlling for Confounding Constructs is Harder than You Think}, journal = {PloS one}, year = {2016}, volume = {11}, number = {3}, pages = {e0152719}, publisher = {Public Library of Science}, } @Manual{loo2016, title = {\pkg{loo}: {E}fficient Leave-One-Out Cross-Validation and {WAIC} for {B}ayesian Models.}, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, year = {2016}, note = {R package version 1.0.0}, url = {https://github.com/stan-dev/loo}, } @Manual{stan2017, title = {Stan: A C++ Library for Probability and Sampling, Version 2.17.0}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/}, } @Comment{jabref-meta: databaseType:bibtex;} brms/vignettes/flowchart.pdf0000644000175000017500000013154413202254050016044 0ustar nileshnilesh%PDF-1.5 % 6 0 obj << /Length 1630 /Filter /FlateDecode >> stream xKs68ʓqMLfzp[#1R=N~ "!yޚR^ {DyO(+nS6w67\̭'rr J'u.iT!*Jt@f06TpD9ZK[iDlP(K9 iyCId7+Tr#.b|xe`2,֯GXXfaI[WA71j9=qPư[;, &`rEwM9_?}Ƕ~T~r84̬4)O9;~=;g}P Sei̗٬~plAY)1uMmHK40^@ Hrb\ aϊ@r "KD^RcTy D5,wH5$EDAl A!0d հ1ub6$߈eP eanʎVCwX2J9>}&h j(x-en?zc{ALvV>{} s\Q/PCR,4N0UhM`gBfq03( MTSM9dmzn2/\b3lFf3 h>86F96Ćp3ǦoDw7Vp37ZP}g%-=NC1% xj0 _7߯r:nwOv&*–D{:tlL(Yrggǖ \I/r|+Jp=>˕hKsRؙ:KK$=o#DSS sb(L Z1?фaF)M8;yv: rm_dRR!rScF8l"(kFQע![z5$X'Ì9ù=vXpA\6GWM(L^{D@NX96nmP1 #zgwr >^TS]`N %[sJ6ByTpҖ9 vx9D1 ߓV(%\*ʵj~a!rSUѱW*{aR0q0h&w*hPrj@7!SKjZ|avsFbMy\{> endobj 4 0 obj << /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R /Font << /F8 7 0 R /F15 8 0 R /F16 9 0 R /F17 10 0 R >> /ProcSet [ /PDF /Text ] >> endobj 1 0 obj <<>> endobj 2 0 obj <<>> endobj 3 0 obj << /pgfprgb [/Pattern /DeviceRGB] >> endobj 12 0 obj [777.8 277.8 333.3 277.8 500 500 500 500 500 500 500 500 500 500 500 277.8 277.8 319.4 777.8 472.2 472.2 666.7 666.7 666.7 638.9 722.2 597.2 569.4 666.7 708.3 277.8 472.2 694.4 541.7 875 708.3 736.1 638.9 736.1 645.8] endobj 13 0 obj [555.6 694.4 769.4 755.6 1033.3 755.6 755.6 611.1 280 544.4 280 500 277.8 277.8 486.1 555.6 444.4 555.6 466.7 305.6 500 555.6 277.8 305.6 527.8 277.8 833.3 555.6 500 555.6 527.8 427.8 394.4 390.3] endobj 14 0 obj [525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525] endobj 15 0 obj [555.6 555.6 833.3 833.3 277.8 305.6 500 500 500 500 500 750 444.4 500 722.2 777.8 500 902.8 1013.9 777.8 277.8 277.8 500 833.3 500 833.3 777.8 277.8 388.9 388.9 500 777.8 277.8 333.3 277.8 500 500 500 500 500 500 500 500 500 500 500 277.8 277.8 277.8 777.8 472.2 472.2 777.8 750 708.3 722.2 763.9 680.6 652.8 784.7 750 361.1 513.9 777.8 625 916.7 750 777.8 680.6 777.8 736.1 555.6 722.2 750 750 1027.8 750 750 611.1 277.8 500 277.8 500 277.8 277.8 500 555.6 444.4 555.6 444.4 305.6 500 555.6 277.8 305.6 527.8 277.8 833.3 555.6 500 555.6 527.8 391.7 394.4 388.9 555.6 527.8 722.2] endobj 16 0 obj << /Length1 1486 /Length2 7258 /Length3 0 /Length 8243 /Filter /FlateDecode >> stream xڍ46\{JĈ{"1bӢjjE(JkԮUUԫ'$usɪgȯh~P@a PFr<<( <`LF!^HK$@(!C ;($̓S wrFߤ$$~\ap( Іa7~ -FI @\=PN>p3 9~@\a0 9=>FCaH/p`u!0Àgm  BQnp#`]U-/A:2 V.ZS] MI,:~E:;%N9#_/S!!AlK /ܩɸ`y^~]D|vgqZEJ5 fzI`u|o`^:7NcX<̝//a1#$l$3öMKOcqA}bW` p/ֆE\vاj jqex,dZdfK@ }wDL8Yҙ:+.|%M^x-@ .!ENS4Fm AZʈc rT?F#M8?t [#ve"r'?oXH流jw=QpN]#),zg6聫$lg *?8F&߸G_jZV1F1lj%^4y|=BC@g^d_mP1fg˴Qo]vTu{Y|dI&6^8J#AOۓAIHzI-|7;v~8S ?>KiLɌ8ΥDM'\FmY$^E`2I}ۇ(=7[.e]AW$4>rZe EVKpPۨ?^'60a8]Hkhg쓃J TW].՞څu@m>Ạ̊́ FǬsDZCd#h${zw4a@ЪTn_J }E1"lX$_e.]˯h,}z826oIkZ[pتsw}72$O$*E~v!˟$o7&gBK5#_Wハ[߽*36 ;K9I \$keC볺!a֚i\;@Їw[LO毜ʲ_p'-3*=s32#vg:X-w |_ f]q(,̈K%:~taPo]EJ ?) vB=x\g!Yyt/nwKgN?#dM {wʋ3 sltw|S9a+V>1D v:G~4!O5Rz|Lrdj"alüVB]}KQ[~qM Qekc YIa{nO` \Z>ZʝNjBh&ΞgURdI]tksRrF }6ةܱ{oh<*A\x~I~63#aMY? NuA }:Be Sy kY,$߱B:_9[s]rxܙtK!曮enng±%Պ:h\]0|0a:PanvgL!E0"k#T"ʂA˪wO y "d}XM@rbZ?[7Hõ[ͮDݷڊ۱N|-NFQjPTv`XO=L9/[?ZY"\[9;BbNOלm=Ŏ?,O@zGI a2EAgOvRvjn&_%&J!5Nt(~,'Qy K(AyK40Niz('N{$͘lpa%ib. –l*-!x3ITOQhju;\: R!|}=z;q[|Fa=usT\E 'ɸ*퉙% ãƆpz]L㴥ƐjF/GUˉfCQN8W,"f䳞QJ\<3Kb]v<~yy$O{W8ឬO d=3C9$8O҃هtgN30/X#_6g!δ=W,PB={XRg(%4]pAC\P^DúOϖʛL6{ӂ\ai*Dӳ-gaRv]vA G|Ns^4z$7w n =/ 36:Tǽc=ĶzTkxYM0#>bX ms`,FTi~qp{Rϝ;cs\#,g@Ow(zP.VGpZ/X˰?ww \QLx VS*ϣY wI& 1Ro:ZIySܟz7v*0ףϚ~gu瑍Y!NѤd2!+3{lSzk"IX̞݉s>ed%C$Ƙe7GFvBchĄĴIP^/ [nz8VcIB06ͣH^ T~SҥxF}([;6G;OnѼhJj^!D#1W{v Ie0s4i+pv@eUnYםWDKF<jX%Ġ ]WOS蓖4ni,>E(ߑ{EOo(p)`Lۑ:nr0W6w"*0ZG)EEFfc1O)T1e9!s{]u{:%uݬ*NC^U,>A(- V!kYաİR^oVSUe(*UlZ|R|;䝬T/o!Qm\[W3GIUDpmC /'W2\Iu5/r oIS㘔'_WӠH䓩87%=q^$adlAes;1hj*`$/-yA\Axĕ+SFR(*UC@Ʌ܀δr5G5?\Iv61[J;o;@L%6w#z7'Dh ;x"Lmj.4,ѴS4֥~I P\Ph.fTRI$>6Pu-aAeeέ'f<3At ]yPxTP~[=iFxK6g#|x?%W-~x!)?2'B;Y&T8c([ӓЉ!62.psA![G"#8> `_ ӪwRk&pTO;bٖ]4++tGI{jG߳MB4Yډ <a!bAOs?+J>BFy^ .-bM^6/.ɣf p@\l9,-Z}rdgVO؂ɉ>5~jԅ˒I'^Wy>.9yaEoZ]?BZx /+{*V7xY,lOI9ӅDŽ 1:4OLpɖͺVrd#R&ՏYaX(<* -;wN}Db2+dP?diVvjϕ k#GD٩s佸D2Pw$@FLuG.-^^j64T9JGngѪbi:OjpZ(zv~lnU9u:ρjdiNPDPT#X#C/)|/J(2rYx/?2puG]oY6Wƒ;I%LD8Xsc{]ߵ5X~we*r|l0i/AH-U^g =0WǘZ2ooy|^)L) Cw4~h܇an)780YH*# yYY{In䄳B#k"gE:_ƻZn284?j+ >GdkE0#&70Xm3$w X %+B~*=ff7W1w }D@iM}-eYAu虊KƗczKˡvKVaaĄpW@s RO` ?Ɇd&tB`..ꓕC$ aCzz E1LC!(5-@NJ ؒS 穿̈́ψ}'vEPGsv择$! _ym;]GKJ].Ҏ{5Of2Vԩ|gj׊$-1F%S÷)T+YztGBf̒#ӚqG S'$a"f %֮S%/BH]dN {!dhgd/ݨV % "glS 1Ij0y)y"@D-ąxz~) 898<jJ[mfEQsȼUOA &)ZVEˑ*#& RR,[9}#'22PqwA^8Ķ߫N[?DAlaQNߞ߮60o{(2WEꪕ 3 N~v(ࣦg _?:9d endstream endobj 17 0 obj << /Type /FontDescriptor /FontName /NBPXCR+CMB10 /Flags 4 /FontBBox [-62 -250 1011 750] /Ascent 694 /CapHeight 686 /Descent -194 /ItalicAngle 0 /StemV 108 /XHeight 444 /CharSet (/S/a/b/m/n/r/s/t) /FontFile 16 0 R >> endobj 18 0 obj << /Length1 1777 /Length2 13171 /Length3 0 /Length 14290 /Filter /FlateDecode >> stream xڍP `ݝ@$;]8r9jf^mwPj0Y:AnLl̬ %u6V++3++;=o16/ ]&iSrl6n~6~VV;++.I3[K3@tEptvq{#ւ9@bka(#Z4-lnV͉ӓZifP]{l?>=#eQWRPaL&v.VC~_7TQdO+Cʎ 3\_lOs IS6s>nkKJ@K[ws3{_1U hjfa mA@UGW?{},>?(S|ߧ )pcعf..fG\_ z9fۻ < (!?_4XA|"wX Xa i9,mlޟsX3|OO,r,9 /_=ۿ /kKavua5bL{B3T{:tLK.(Itՙ!.bI#}h;R7ˤ/' m jO~&_ԧ'q,<$'b{q -O΋y9 U?X21_ͭ\60,Uy91=ƥ F7R/ E|*4]{)!o0Ʀ}q|KV/z  321gT۪ǀk=Ɩvsa41wjKL\UMb0j9>:n{Rz6'A-O k#ߙ"RGGwzu Y{KDg@C=x}C~?mBs_xܵ9`aʭfޝryYo!eA!1o=FM8ZrY,YMukeLBHa_vV{Se@Rt4kz%'мϧ_!1Ck_is ) 5NPWu-Oeڌ1(q NV,SY@a)Mb|BݶO#i\Pa:֍`xa8冤H13wRδLh0=׶j>1la A ;NQ 'p8C89Ẹ_ꘪRydaL4蛯b(Ku`ne`<0ܵړ='ss 1,( ֞\q4(߂'&RLHXKۏ.3Gl.s6*kj2DFMEyOpMr yT9戯~˂-" p(q%|xB4_wi^:m}wU0(lsж=J:sxTK'y  ,V7YTRy$bt,e8\ebwYU?zi)uA0_bRx}h\B*9*!`Ϥo}DU:|*ϭ53s&Ua;cEMBc0)3fPt <}C9ypjQ[Z&59XHvb1̝"|+8p:ƣfP1TU7m;xyIBO#­s 6Eɥ75̟RtiZ6]#k9,Ka`ƩƆ[3(v.j6Vj,sK/leA!J9EWEk-=7x<|J㕠މxg11Qi|{ 0ɽP|`+>v:-Ҵ;y6AЀl\87AW{eYOfEvsD [i2φHrn q<$`\ɰ Co[xKj|0(d5Yn(D ɾstםڝ|gZX1ga z# r55}~CMaKlM2TO^7'^,/i%.|)A-zlnew%`#gcjsg zg샥ia?)_G lnYV=D{ޠ^3YS'(,!e}o92@0!]9=HYI>qʷw NcSI|견Xt::s3L* ciLNQjO9ˮsYT`\8st\^hԻZ-|HZR4}٪&ίCceIihafrO\5zKQ 晞3пv(nQ/R7„4TО9TO4ujrKnToə=%t<]Ϲ6,Z#=&_4.i, Pb"m(BAtFeb_ƍK}3S#B%[&u>U9E;U$&gy>6GK' m8.71eh&EdPrL:I5ƀlq$5g% U/-‰jN_"gz"Xb`5i[owݠkDChYeF&ڰWԜ!>%~ФiFw&-+J% R, 8OLIh0G 49哕GÒi4R‘GXxu^~t+fѲɕ3՜Z݃K`Sd?NffJfr-[NI2ė4`3m6(cj6ezL{!.f\%U=ҹI>X"#)\=0pQkS_CG$e"v\]CV'竫~~HZN@f5g8?"U{R\%]'#X[83pze=h1Vw^vVU^'ap+7d~ fgRiy, Om Rni«=pz4vGl +l3C:I͋MА3͸Ndؒ3)ON>ͼD}QqS;L}[˧YرKMΏl('Ia'}2k>JǒAp# EV[rBϡv]9 + =t[Ωi =#bɛ[eKX"5@: SY<#vYYHO>oopx| _4Jt)w=GTB !bB h:иX4ِP~ٮR?qq0p0>p C8n~/ AUqJ=uؐx%{2x9ڠz"tkL*ϓԌ)gJ, n`D z0 uB 6m9DRm/_s4#r{FĤ8E=\y`* 0m(3M?_q-i#WѭesplGrإoZ>Sfs1uZ*&!; o8Etٍ9Kd3ҍ{'uZgHwE_N}/NgCTJF ( ~p+z'UAiϗ'̥b]oh[d=+<(=z$pDv+KbH QݭpB7y{uz)VS8'd=ƵԖQgd8qŮy-4fi)un4ZOR&Y&qH9TZԱ1U~[y niKO}p9,6|hZ5Q_:|Cvbfn3ݣ˝l$1$@|߱:@p Y}Mme2&Kc\3)59 UX|/Va틜F H..4Ii4FeLhZe^+ߓ="gokt6:0VҒ6q+̋tSL$ OF(})I wzFKiCEW-.8\ PHlݯCa2Zhria#&O#s|r83例pZ>JgZA-ߨnIGzU]ZcFvߴ(j^ŎuB,puDfj)C^6 _пq /Pk.*s'q7> eޠ޲y1Y>p t/[ h쪞W1y>FR?pIja ZDP!+" Tg\|eևGk:p0+uO0tџv\*|"q]R,$MY>?P^38 ǃt4TcƫdhBpVq'Ǥ.'eUyzqIvuFG0,Ρ3N%ֺicEs4vZX0DK4,EMb0Dw"}ʭ܃ vpU*7=Dim1.b׉S*. K-k;f%Y/4˸d/ě)=jxnMuh|jyR~4r&@y~;|NVNLMw q?Db* p6u$T{ -de6h%VڜKaTm1aȫkYudkWd_1p(E2~.]r`OdExCCkt);pciG*R5Nq?nȷ=( PteI;73l_A jؓL\nf0]1(/oeOt9؏G|{LJ=Ch>_""BePy&B5ı>ڲ .eB+K-Fe wT[)y,xK'.#[ԝ]Q~7+mVCcmJ>e9QEb%rnsÇ#5)VY~$#~4e_]0J:doOZ]0sۢHCq:ҩ!)da"<~2 |LuZ% _ۜ6%D7¹b)xbo9_v[e.h6xZ1VV#Վ.kᄨa⚃ WWrZ11ܱv}+ttGb ^Q#͝تϹ u8g +O/1}.>aE45=2 D$vmY 7b$D1LZ4?;nPL%fj T/k1LTmf"ܕc00K%\9e1ja}3OaŘTE894J2:h Y/wa%eY9!ؔ3iD?98dՑ-_.pR)gq=|b 4%'9 ae[ZYTZrexZU}0 ZecCH ԫvr Zl(.L6$Ӱk>|͇QnPUIōsnfxRYVH录N\`uyuUȑZ%-v&luӝGJ xa+{dfҁDI^^O\25;xEfv*Sh{ ir[²JZՃo(p 2To5#0- r nMpA8f}a/5ģ;F'P탋 Qj>I.CvgjQ|l/4bxvNbػՖ`Šg T>HÞIIBᇓ鼴yRbsz_D;d+-_P"<{jqw{ٙPǐAzÐ%)eTh`InuO(EԨfAb@ʦ07-*-;lb[l޽p4C5_찫{Y2!nP pV䷸ܳ,|ӌ O .up-hp9ld`6HhP> j~zd;PYyܚ]b+0e}6QQ"kop#' P:a4:3 ZRU6wϥ5%2ג@糣Cr͔kN P:F77CQBx\ZƈVUdJOfs?!t vzKl-af+SM)?Kr0gSkPOC>qֲp3yJuC]_@!ʬ.u }=|ZzĄY^J|9d̎vP#:=)uʣ3b = bԺʖo+#JL1>&B3גyV9eAݣk:n K+|'6x֩0*NvK9 WvIBd~OGˑ~P?)-B?-cB JK7Ӄ{dE?[N|YKÖ9Lvf!Q"QiVJՏj)>-ETyn!ٴ6KǠ!xC/B"?w3hvvP?%wT,D/WPNMlĺ  lhgjQG@i >Z:+d :C]37ks89GtIu~Oݵwz) @Ǻ$&6ip6|mɉ[^k&SW3mbg&DM#.f_D8UNnmsJn*(oȹQ/(Y_Wd-Ht*'T6G#uQ )2?>@SB=LudMM$TMOǴeE0a Uh;%j.+/b/y>(h]b? 9ѰYlZ g9j`Le&:j!z&ݫrHą y|qi` 2ߤO~EJm1i%YX\7H98=Tߒ\=tm18-oy\̲`,6׮$x6⛇VR_`H$uNAtS{h{ x`|zs9kuV)~lS3[om75q WjTޙ(d5DAˆuCEa1Ϲo?~D* ϏcJ/f{k/{ŏ1lkm`H\r=G][O$P+ ϖ7>0i3̸'jJ6HJ`297 QH`u?_~ a$ًXkE^iv: `st!.Hd )i0lj8tSqNfB# n/4`ܹ=??^*phqdb/y @L[h7f&, V atzGS+9v׺0 w,Jjo&N9!';9z|*/4\54V ==j2ү*>ho{A_ʤ&]Y!PqXMUp5X>\\:ő$|H(f2n[`7wF1ca>;4qlmck' Syu{OZt\mӸaÀ`6QR'Wx#5xC[~Η-O{ ֭ NPZ,ة/{~ rP.zC3R0lqkXh(1Բfn4kdP]n _P_p ӓTw> ?f3ђKGggvGUU4~ox[!NAEٶs${$*QPإ\M"QFW[Ϲ'վ:fpol{cH@ABR[+Tm. kh;֬̑KxE,&18M 6AGhI}=$1#i:D|C}(Vux״9y֡A΂nLEOd`Bb1h~A9><Ͷ e}i!gt'_%YQ$Y1Z,MSUU`F4Ս~}"p9ex&R@^m q0iB<n͊deB G,,)m\ŝe r+ΧI_)f٬fs/x Y 0Qak lZJ"Pvw[2&AۭhxM0lO$TKw ^<0 n rE,zCQ> endobj 20 0 obj << /Length1 1423 /Length2 6170 /Length3 0 /Length 7131 /Filter /FlateDecode >> stream xڍwTk6R?t@ڂa&qh vP_%8Q(7I~~///>+p(G@ U a>P`u|(o]|'>:P7 C"n($#27cV)]]!0}\3 ۲Ѱp{@T`n\x9@P wmwPC  Cn~`OBx@w`E6( ?oG@3 _,n6skj8_#vAܬFjKϚ;GUQ\$ @QmpS _oW؍lo*țh-`p_ta""J ޿Q7) ub/oK\\<j@Hfo!o-V*̩*g칅g&]Q"(\?GʩRt&J#iv2gUrPhJ]{ymz;8àXJ"n.{UXjJf䪩& 3&Ӣe7oǚ0,>Ǔ"R= T @Ɵ7Q,ŧ}'+EzGhy]j2_52hl"' B#h}vfxbDc~okAiXàQC#. WowMjk,lJ&go/(!9R6E'Ϭ&D`r-0UR\}U:KϛM_g%sRiy[ Ykk>=KQXHqEGb{UM5 Jѡ bz["WrC-8,|j^IDh*]Ջ;(ﭵlyK)06xSӻjA/C8lȤ!ΐͭ&t`)ŒsM%x7rZXsb‰Toh5C5;,36_:@r`Ξly*^y ے> pۑKdH xԯl'}h`&Ĝ-݆49љ+h|qҞɹ6";={%(oSǜR 'v۬?+nPllz|FYn>9QMeg0 ?OLz!$VT.Iݚ+B`Ia}gpv@0nQ.-,ZOl3&tNiDݾ^M#Ve AbX5kTz؉(;{)Y\oD}Cj\wdp+"Ri 4T7O,I-K%uGhضeh)cͳ犃CYf4~adF;!Pi#{=UѼXj964ԟSؒ捞-Vіmm$Ga)^/'-m6not<8?tTlr՞r_-\t`Wg66?-_=7|lLJt)>-N&Ae#)~ִ9&0VjP+ h\/#4a$ÚݽYaẓ_Zz-snJM].0ƭM( I٢MGQiu}>g3 INx҉vE%ֺJ% 2SrJzW> }=ֻaKI<B| WrIVd|I%up[ .i! ^EN &O-kV41rI\XߐٜζrrS$v~Tv~M@Y:NC.E8֭q-js!34PVb3C9h:>զM/tujZa-zbUBF'*5ۮr;>5qf rBdg 㬧ckl[?O7$#Q+ceAUj.!X[WPn2GdE- >\މYfWwi=< 8:|HR cyɦ;R۴ӠFyND 27c{,wO%6ƺkw2 1$Q4#"r$16p4^?WokD+=NJZ쉴27ӊ_~O ()0hK 9C+CT|B9o2А9\$>箴P3U*4llGt a;o_+T'zW ) 6uEA~>e-.Qu.7g_%Hr!_|O<4زSf-]R1U<Í])+&)D[6nYaˈުb}= $dZI#uِqF4ިɸ&@%4G+Oޘ ǖͩ %;zg;Հm)15eRaǧHB36YZ*kMϫC Ρ_!|m=#+&z 'c _4YwwP*³#оVdÇ0c^? M`gky*G @YE ]-OsO//^NeYg:RRT}{NV_vjmG:w02#ϝpHBRA·5ߔ}!n@=橽R̪zR"mo,~qzCt.95f-upNlycNwgš:Gj QY?&-cK'*پu>{d>UΕ#y)˜QZUڒ{ԠTț!цiUhzދPF2N!sJX}/Q,xpKHAYQV^5MqNG-UAp27mIqE~ϳz`L%̉7Zeqg.UicS $P>yg3n,nMR5A2']A5ߕ7zSh .9TO6+YVΆg[[2ݽcJ/j|ͲI>"Kw?^m.eguE ଅe vfհA49a\4k'/.xOlymP4-5vK“ZKԉ'&,w=j2W`}Um[$ LҴIGj3hc}9[T)ԣ*d &O{,°/ڃ a#[#,墧tn@UkJ@zcXH"](޷@Є\KV〚sq>LaqAf|2  .YM"R,bT \BS2ibb+z ԫeM32gaC#Y=,pVYфA 񚐐"#S+?*7<; LMS{dG-d{7%2Bg5SI,DpS>ٖa{*c(ֽ!ӛEw_ѯV#@bN7yLᙕYԧ{*po\->>Au3XO$G|Oأ qJjo|BUAsb`uFgWhҺMi mRJ[+i=nd-HX+);H4Mq"s\#"]wưRHGsCo吗g RGFR-10l擛ڙP&n>INq +e􊘛j  TV.(Oy'7x/f^Mh)LZcos)>:j$zRF%YUkld)~«9z9=?7Q*.~ (HX!uUd>D R]B jڬ?Y]~T]CMJτ"]0i\dl7"Ghe[|G.oQѡλ N<fyXV=W3UYt\;=YJxry!xgD5Oꟁc`=i:Bo+ /IO~Cأ2N:_-JÐgߧx\O}Wth# N; B/z ֱ_Ə{,t$y:"A˲g J]iH *]&Y]ҪQ3wi$/*}Tgsu#|xgVg#loDU%mĚͽ1s"uw/j*ThFM/ozIfT՗_!M=GٚYXu2/ 2 i&rw oJ8t'X ?1S1/1eB/sÓlg7ll_>y$#E)]⤃o\\դMMQ2 =cIꪴ16a {|lewžGFߟq!cF)ެbx/T92RR߶G Qc x~BИ e'ɦ8A.lNN}>9i8DFղ6LvG{~hJɧF|_aj`anPX>[Xbݔ9q]R=3GK):$?TYབl*@١폅rA?pUy~dF_sY:3۲QA=/@i;1h0uP7k^\ L~]nĖ:k^ya^\f=/2z N Br>_"joCYo9DI{:#V]&s(Io4\5s͝ *~S +(Pi]_h#ѵΫҔg@J췍qaB Ű;垖SS:XJ!/Q&m]+V ' {%yH X e`]`Cx.]#wͨqF?y>;2?hc(iKekʒ~x 0W_p:۹S= gCDDHnK濯yP=ce5X iFx;_7ٵsD[&a@PiuRU@bQ;!Æ*AѧROD9V=$/=7?0}= o5_L(j^-&<8٦ M":/ZgS}?71f~D,dj~/zv̊ endstream endobj 21 0 obj << /Type /FontDescriptor /FontName /MNZTUE+CMSS10 /Flags 4 /FontBBox [-61 -250 999 759] /Ascent 694 /CapHeight 694 /Descent -194 /ItalicAngle 0 /StemV 78 /XHeight 444 /CharSet (/C/R/plus) /FontFile 20 0 R >> endobj 22 0 obj << /Length1 1553 /Length2 9000 /Length3 0 /Length 10018 /Filter /FlateDecode >> stream xڍP\-]4@c{ݝ,[ЄGsUծ5sUSUQg5 vLl̬qE 6V++3++;J +Bt~I_!v9w đ tr;8laO)t-l (-A/M6u)_%h-YXܘNG !:Fr9)1#Q4,N:!n@G`69ؙ/ e{ݟ 03?]lG2bkY6 3#hg;hy6@?H/ dwvbv6KڙClmAvNH'v_kmq0sgѴ;d%y1!frpr@%?l/|! 9t]@>^v7BbcM& AwY_`e038bqMuMuY(8L.77翫oXUzvߟ^ow%ȋA=+UI3ڂm6`; 0eL_z(ig 1=z\#E_\\/5!m %`qD}|<oӟb`1^ٿ /R_%?E,v/AN _/./|ٖ72EZ [U1LNcZrlwy@CH \w!4܃-I{#L먹!%A(Nmziq o`HICdV5l3tU /J>[{mXn1 Gs|`x?VEHIG@{56M%,U\n!j}{ ,>Ok-KN/ b掺=}trSaPm#k ܍эIͤ爾"jؚ0Ex]./^V3e.hHIi FɌ0Cud eZH.}l_m+̋P=ثGsBˤq&gd;O}I^U)lyd~lq%@%%$y`E o鳁(s|2l;Yw\Wm~ D VIAXfDGS DZy% ;Ty 2eRZQ4_ ^iZɉ][ bLtK !\Pwʩ^a<3MRlE(״T^fPݤZfCˆȧ S<0cC 4W$7݇qjj ~Q\l2Qj2†A.E!dr Má1ZgQcMIg\.n< aNB&Uۦ= Ӡo͠`fJn(1+{EsHpf:3Ӊ_ٿcvL}uKsh,-vԴ/2JWG⫉ۭ6P]&o yn %pQ4;==(1ev2@61/?cH)&_G}WּCfty1˱+ $*ccb`Z~~fc<_~L?,iNLRSDzuvٖ ɨɚcfrB>o&ݭ^+q/0f,0_d͖Nh=i_6K͔60%~ԇ2 r2[k"ӢU[- kwy`%QobPWHܗoRA[ro~y ~d$l̐29\:L|]Yc_rlO9Y!nW8)=+lé*`%AM~ >>e}s.GVثf_Iک mVOkpQ=Y3eh:(?,A-*NFl| ]ZgݫJf6*GPqI·SQqګ[SO4g~Vr%oYhB1X{澕iGP.ܥs2$n.m"-aB,`m-_g+,$ B<Ǧ~ eP408avMJSilQ{~!/=k*3=/s`FzHNZ_G-@ B(%z"+ i%[~' O&A?#vf8(^)u(W%LooЃj~2j;w+"Tr.3o[BpL?|@C3՘Nb b?0vY鼜-4> i](cNFTD)5͉A)t:$ CP.-xpYĀA?ⷰן;c4* GB{SrS6<h Y퀷x~Rvp~AWJv]'4\T!k.|H%FiGFY:>}C6{ċ hW1jhS" ˘H>$cD,grbi{uEz2{$MxS*w2 x\;xxTӷI }8T`dݒ.A֊!up߱r0덧KAIbd?"ؽt`qڱkM+0fHu*٫k(]c>WxIwjXSRV&2 fi2`R :Kʅ < M 3!7yCʝe4VN. x)q<! ^̻)iRw>7AThXfNxJ E\Ҿ~=M !{$ɬY1tĶ'M$P[4M'L!;9Rˇp\hbF<&I/Lmw<':ɹ&. ͨl21vY{ '"/:hy y?h$GV+r2gTuy reΔ7˻ uڠ7sSN%sZ)twWrZu]l?(놄3LTш:Mfrګ-l|Rt'\!!p8m>ެ\nDYkZ|K9Ci4#r_i,C1kd8NjN>F#>=q0u[UMӋȤ󌩵k߁E$ּ/=wD۲3oQ5"J)g?AoPKvצ >)"ifs,.1fn tכA ׊qY-D]-<4wŧDBSt?HۺdNp?(J2ka5cqDhםqǐLo@BR>jwSw6{kN( M)~ vЎ ]VVg;S!ғ+ z,lY)ɾ:m9>!,n7c /PiE[74$Baۡ#~4W@qL{ "DnV s:l47S 'iϮAF Cd"2#D2 r/R0w ^~kXs作Rˠu 4ּj铁MvK SՀ2 {$]YypƎ[3512k୶NBA,Ƒqrx@X /TCb;J(5¯XS O~)%}A]v/T`9q.$̉ޛut'Vqdrp$MߩaVtxI~NFHVT)dxW'T kP u"tߍUwݧmn/չ#vAݝo9_jX`/NM˜fZ:)4bpNdcGZ/;6 Z΍++TW-IQ_P1'۳Q:_1+qkw1/% f]'lieOz+FQ .uKx"iQL CvC(4ɔcVk 7T8߼+^+{*K9_ h, {j}U 巾wo>H&5%R7,`6~qPewb* ^ 樹7JVp}&>Iԁyo/E*JFAg#.%J F#Á+ě!^D'$ 'gYeG c~ًvκbh ^(YhiƤ1EcLa:W߮ӟy6򊍴f^EܐNϹ"7 ]*fC$a6Qù yTCFķhͮ|BEul'+@ (NbQsXK<2DVkWJ 1y;;Դ#d[A56b/?tc\<8*1Mm.Ӣ㒢o!AF*_LaJN1.^ ʣ㬡[B6j.oLBeO(2Ndc`C(&->7ҷaB3WJiۙ*L麤6V Y(ZsA k8߉sj⯟H>-3`䖧n[XHb7 æ,ÕX&Σ,}"P@G9`&F6((eT?UmZ:\`f$TP-<0L,T'Lz@3dw5!MDns$sqj$˜e4%y}+y h}uzcPO ixé$]"FȃafJJ6x(kmQnL /JM6B}oFHz38v{@ScX ,B q(s?ʡݒ7*( 4gGRPq%{9!yxPmhAkUGs%GKl9(䰘GY,!sLS2A զu10q䡩OkP|c3MFR)=J=_g>mfc7jӓdD(g#c['ZwԃpM$2||=;ԝA$+^6*)˘RÃgIJj*L v_Z-6 ZpǕ+ &p ulHp 48Mq@{IU%xRƄhID 3uT <~&+~FkYQ^![@~3z*86fi{=UJdWly2/:t3^1@Gӂ^,~ =d7&3VIָj`VU5J ] V.8>J娇^gϓY~(>ZERw֛]z/ӉM0o1:F}1G'zEp't3ѹ+[Wt]{-iaB5DHr fk}mf*i>9Kcp]lu5BW6ͽ'Աu{ISބ,7)jl`x#|嚮nVe`b "Ʀ JUn7Fo9C&ئZftq[Gη<0í0XWaCZv +cͼv\c|CbOѻ J bsfwC;@+TC$JZ-q?܏ G]J.SIҋ*9/|E4®C֍mnV6/B0ʬ+ ۑf߬&Mեx d`/Ň'8(g٧d7V폸!XW?VR(Uv;Mqz=r Q\}e`ϔv r_$yRE .c-&N9(!BV5oea;U6ryxljƭm LPu&٭^WyGa\qRrlA"*'ɁI,}C,n%6[r2uT~; Vuщeuey}%a+B}<޷=k$UoKy~h;6L +7Z1|ڽA *'.{"}fjo%MһZd:MUi׎D:dm[$:4$qK ^xswwAq&blvZ.fD7l'MEŞ7wQ&G#+5~)l6@4K"}ƅOЏfU|^ RsݴYWTQR6ǦH:!%]Fa)A+St2ydU515|қ)LZ/WԦAZiBKyNkgؙ{:O1_ ^͡e9b lh1AT?}YXW_E+Bhb˿dɬ4*U4 M+DDpPF5*~n4)U^#uS$ ۺviU?2uZ]g#箊@qYFq޸ߨwsݰ2\p)kSm4Cn^sl|M(PVkBYq~tCnzYE>^gY!DiDJ$I{d$z^(Gơgw lhMgv{j+EEWQ>\I\WHTTQsIӎyq}kq~[[,XDd]EZz}"]661neQ[uz8n(hg>Qt V"C *8{27 3Q\Anϓh)pX0UZ(vCnbqzU&STN@SN3u Af" :mR xaCv=F`?pnw$Yb%P{ %=;zZtg0&L~č-ДoQ뛬Y?65 %zf0.}4Rfei],\ UpTwH~|?t&Ėu;ye7@mj%D?ߑ]5>kdčZc6/},~?Q 1oXd+# ~D Y/(;& |#V|mĺ \V\jz` SOAA2;acrwěohGqpfx`RC΅! /C #EwI8w(ޝ6x ? ]'QSZ\A1 vu}1q tQFJ2J4w=dðP> endobj 9 0 obj << /Type /Font /Subtype /Type1 /BaseFont /NBPXCR+CMB10 /FontDescriptor 17 0 R /FirstChar 83 /LastChar 116 /Widths 13 0 R >> endobj 7 0 obj << /Type /Font /Subtype /Type1 /BaseFont /RMUKLW+CMR10 /FontDescriptor 19 0 R /FirstChar 12 /LastChar 119 /Widths 15 0 R >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /BaseFont /MNZTUE+CMSS10 /FontDescriptor 21 0 R /FirstChar 43 /LastChar 82 /Widths 12 0 R >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /BaseFont /CUSUSI+CMTT10 /FontDescriptor 23 0 R /FirstChar 97 /LastChar 116 /Widths 14 0 R >> endobj 11 0 obj << /Type /Pages /Count 1 /Kids [5 0 R] >> endobj 24 0 obj << /Type /Catalog /Pages 11 0 R >> endobj 25 0 obj << /Producer (MiKTeX pdfTeX-1.40.14) /Creator (TeX) /CreationDate (D:20151111142744+01'00') /ModDate (D:20151111142744+01'00') /Trapped /False /PTEX.Fullbanner (This is MiKTeX-pdfTeX 2.9.4902 (1.40.14)) >> endobj xref 0 26 0000000000 65535 f 0000001990 00000 n 0000002010 00000 n 0000002030 00000 n 0000001837 00000 n 0000001724 00000 n 0000000015 00000 n 0000044497 00000 n 0000044776 00000 n 0000044358 00000 n 0000044636 00000 n 0000044916 00000 n 0000002083 00000 n 0000002317 00000 n 0000002530 00000 n 0000002628 00000 n 0000003226 00000 n 0000011588 00000 n 0000011820 00000 n 0000026230 00000 n 0000026509 00000 n 0000033759 00000 n 0000033983 00000 n 0000044120 00000 n 0000044974 00000 n 0000045025 00000 n trailer << /Size 26 /Root 24 0 R /Info 25 0 R /ID [<6480FFBB824EB7A26FB56374AB07DD8B> <6480FFBB824EB7A26FB56374AB07DD8B>] >> startxref 45247 %%EOF brms/vignettes/brms_distreg.Rmd0000644000175000017500000002561714010776135016526 0ustar nileshnilesh--- title: "Estimating Distributional Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Distributional Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit distributional regression models with **brms**. We use the term *distributional model* to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, **brms** uses **Stan** on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue. Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term $\eta_{\mu}$ for the mean parameter $\mu$ of the normal distribution. The second parameter of the normal distribution -- the residual standard deviation $\sigma$ -- is assumed to be constant across observations. We estimate $\sigma$ but do not try to *predict* it. In a distributional model, however, we do exactly this by specifying a predictor term $\eta_{\sigma}$ for $\sigma$ in addition to the predictor term $\eta_{\mu}$. Ignoring group-level effects for the moment, the linear predictor of a parameter $\theta$ for observation $n$ has the form $$\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}$$ where $x_{\theta i n}$ denotes the value of the $i$th predictor of parameter $\theta$ for observation $n$ and $b_{\theta i}$ is the $i$th regression coefficient of parameter $\theta$. A distributional normal model with response variable $y$ can then be written as $$y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)$$ We used the exponential function around $\eta_{\sigma}$ to reflect that $\sigma$ constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number. ## A simple distributional model Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values. ```{r} group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ``` The following model estimates the effect of `group` on both the mean and the residual standard deviation of the normal response distribution. ```{r, results='hide'} fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ``` Useful summary statistics and plots can be obtained via ```{r, results='hide'} summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ``` The population-level effect `sigma_grouptreat`, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the `conditional_effects` of `group`. Going one step further, we can compute the residual standard deviations on the original scale using the `hypothesis` method. ```{r} hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ``` We may also directly compare them and plot the posterior distribution of their difference. ```{r} hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ``` Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations. ## Zero-Inflated Models Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: "The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish." ```{r} zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ``` As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations. ```{r, results='hide'} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ``` Again, we summarize the results using the usual methods. ```{r} summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ``` According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability `zi` is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-*inflation*). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. ```{r, results='hide'} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ``` ```{r} summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ``` To transform the linear predictor of `zi` into a probability, **brms** applies the logit-link: $$logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}$$ The logit-link takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying. ## Additive Distributional Models In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of **brms**. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the **mgcv** package, which is also used in **brms** to prepare smooth terms. ```{r} dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ``` The data contains the predictors `x0` to `x3` as well as the grouping factor `fac` indicating the nested structure of the data. We predict the response variable `y` using smooth terms of `x1` and `x2` and a varying intercept of `fac`. In addition, we assume the residual standard deviation `sigma` to vary by a smoothing term of `x0` and a varying intercept of `fac`. ```{r, results='hide'} fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) ``` This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with **brms** and to fit them using **Stan** on the backend. brms/vignettes/citations_overview.bib0000644000175000017500000005770113625764732020011 0ustar nileshnilesh% Encoding: UTF-8 @Article{brms1, author = {Paul-Christian B\"urkner}, title = {{brms}: An {R} Package for Bayesian Multilevel Models using Stan}, journal = {Journal of Statistical Software}, year = {2017}, volume = {80}, number = {1}, pages = {1--28}, encoding = {UTF-8}, doi = {10.18637/jss.v080.i01} } @BOOK{brown2015, title = {Applied Mixed Models in Medicine}, publisher = {John Wiley \& Sons}, year = {2015}, author = {Brown, Helen and Prescott, Robin}, owner = {Paul}, timestamp = {2015.06.19} } @ARTICLE{lunn2000, author = {Lunn, David J and Thomas, Andrew and Best, Nicky and Spiegelhalter, David}, title = {\pkg{WinBUGS} a Bayesian Modelling Framework: Concepts, Structure, and Extensibility}, journal = {Statistics and {C}omputing}, year = {2000}, volume = {10}, pages = {325--337}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.06.18} } @MANUAL{spiegelhalter2003, title = {\pkg{WinBUGS} Version - 1.4 User Manual}, author = {Spiegelhalter, David and Thomas, Andrew and Best, Nicky and Lunn, Dave}, year = {2003}, journal = {MRC Biostatistics Unit, Cambridge}, owner = {Paul}, publisher = {version}, timestamp = {2015.06.18}, url = {http://www.mrc-bsu.cam.ac.uk/bugs} } @MANUAL{spiegelhalter2007, title = {\pkg{OpenBUGS} User Manual, Version 3.0.2}, author = {Spiegelhalter, D and Thomas, A and Best, N and Lunn, D}, year = {2007}, journal = {MRC Biostatistics Unit, Cambridge}, owner = {Paul}, timestamp = {2015.06.18} } @MANUAL{plummer2013, title = {\pkg{JAGS}: Just Another Gibs Sampler}, author = {Plummer, Martyn}, year = {2013}, owner = {Paul}, timestamp = {2015.01.20}, url = {http://mcmc-jags.sourceforge.net/} } @ARTICLE{hadfield2010, author = {Hadfield, Jarrod D}, title = {MCMC Methods for Multi-Response Generalized Linear Mixed Models: the \pkg{MCMCglmm} \proglang{R} Package}, journal = {Journal of Statistical Software}, year = {2010}, volume = {33}, pages = {1--22}, number = {2}, owner = {Paul}, timestamp = {2015.06.18} } @Manual{stan2017, title = {\proglang{Stan}: A \proglang{C++} Library for Probability and Sampling, Version 2.14.0}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/}, } @Article{carpenter2017, author = {Carpenter, B. and Gelman, A. and Hoffman, M. and Lee, D. and Goodrich, B. and Betancourt, M. and Brubaker, M. A. and Guo, J. and Li, P. and Ridell, A.}, title = {\proglang{Stan}: A Probabilistic Programming Language}, journal = {Journal of Statistical Software}, year = {2017}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{metropolis1953, author = {Metropolis, Nicholas and Rosenbluth, Arianna W and Rosenbluth, Marshall N and Teller, Augusta H and Teller, Edward}, title = {Equation of State Calculations by Fast Computing Machines}, journal = {The Journal of Chemical Physics}, year = {1953}, volume = {21}, pages = {1087--1092}, number = {6}, owner = {Paul}, publisher = {AIP Publishing}, timestamp = {2015.06.19} } @ARTICLE{hastings1970, author = {Hastings, W Keith}, title = {Monte Carlo Sampling Methods Using Markov Chains and their Applications}, journal = {Biometrika}, year = {1970}, volume = {57}, pages = {97--109}, number = {1}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.06.19} } @ARTICLE{geman1984, author = {Geman, Stuart and Geman, Donald}, title = {Stochastic Relaxation, Gibbs Distributions, and the Bayesian Restoration of Images}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, year = {1984}, pages = {721--741}, number = {6}, owner = {Paul}, publisher = {IEEE}, timestamp = {2015.06.19} } @ARTICLE{gelfand1990, author = {Gelfand, Alan E and Smith, Adrian FM}, title = {Sampling-Based Approaches to Calculating Marginal Densities}, journal = {Journal of the American Statistical Association}, year = {1990}, volume = {85}, pages = {398--409}, number = {410}, owner = {Paul}, publisher = {Taylor \& Francis Group}, timestamp = {2015.06.19} } @ARTICLE{damien1999, author = {Damien, Paul and Wakefield, Jon and Walker, Stephen}, title = {Gibbs Sampling for Bayesian Non-Conjugate and Hierarchical Models by Using Auxiliary Variables}, journal = {Journal of the Royal Statistical Society B, Statistical Methodology}, year = {1999}, pages = {331--344}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.06.19} } @ARTICLE{neal2003, author = {Neal, Radford M.}, title = {Slice Sampling}, journal = {The Annals of Statistics}, year = {2003}, pages = {705--741}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.06.19} } @InBook{neal2011, chapter = {MCMC Using Hamiltonian Dynamics}, title = {Handbook of Markov Chain Monte Carlo}, publisher = {CRC Press}, year = {2011}, author = {Neal, Radford M}, volume = {2}, owner = {Paul}, timestamp = {2015.06.19}, } @ARTICLE{hoffman2014, author = {Hoffman, Matthew D and Gelman, Andrew}, title = {The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo}, journal = {The Journal of Machine Learning Research}, year = {2014}, volume = {15}, pages = {1593--1623}, number = {1}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.06.19} } @BOOK{gelman2014, title = {Bayesian Data Analysis}, publisher = {Taylor \& Francis}, year = {2014}, author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Rubin, Donald B}, volume = {2}, owner = {Paul}, timestamp = {2015.06.20} } @Manual{stanM2017, title = {\proglang{Stan} Modeling Language: User's Guide and Reference Manual}, author = {{Stan Development Team}}, year = {2017}, owner = {Paul}, timestamp = {2015.06.18}, url = {http://mc-stan.org/manual.html}, } @Article{rigby2005, author = {Rigby, Robert A and Stasinopoulos, D Mikis}, title = {Generalized Additive Models for Location, Scale and Shape}, journal = {Journal of the Royal Statistical Society C (Applied Statistics)}, year = {2005}, volume = {54}, number = {3}, pages = {507--554}, publisher = {Wiley Online Library}, } @Article{lindstrom1990, author = {Lindstrom, Mary J and Bates, Douglas M}, title = {Nonlinear Mixed Effects Models for Repeated Measures Data}, journal = {Biometrics}, year = {1990}, pages = {673--687}, publisher = {JSTOR}, } @Article{wood2004, author = {Wood, Simon N}, title = {Stable and Efficient Multiple Smoothing Parameter Estimation for Generalized Additive Models}, journal = {Journal of the American Statistical Association}, year = {2004}, volume = {99}, number = {467}, pages = {673--686}, publisher = {Taylor \& Francis}, } @Article{rasmussen2006, author = {Rasmussen, Carl Edward and Williams, C. K. I.}, title = {Gaussian processes for machine learning}, year = {2006}, publisher = {Massachusetts Institute of Technology}, } @Article{betancourt2014, author = {Betancourt, MJ and Byrne, Simon and Livingstone, Samuel and Girolami, Mark}, title = {The Geometric Foundations of Hamiltonian Monte Carlo}, journal = {arXiv preprint arXiv:1410.5110}, year = {2014}, } @Article{betancourt2017, author = {Michael Betancourt}, title = {A Conceptual Introduction to Hamiltonian Monte Carlo}, journal = {arXiv preprint}, year = {2017}, url = {https://arxiv.org/pdf/1701.02434.pdf}, } @Manual{rstanarm2017, title = {rstanarm: {Bayesian} applied regression modeling via {Stan}.}, author = {{Stan Development Team}}, year = {2017}, note = {R package version 2.17.2}, url = {http://mc-stan.org/}, } @InProceedings{williams1996, author = {Williams, Christopher KI and Rasmussen, Carl Edward}, title = {Gaussian processes for regression}, booktitle = {Advances in neural information processing systems}, year = {1996}, pages = {514--520}, } @Article{westfall2016, author = {Westfall, Jacob and Yarkoni, Tal}, title = {Statistically Controlling for Confounding Constructs is Harder than You Think}, journal = {PloS one}, year = {2016}, volume = {11}, number = {3}, pages = {e0152719}, publisher = {Public Library of Science}, } @BOOK{demidenko2013, title = {Mixed Models: Theory and Applications with \proglang{R}}, publisher = {John Wiley \& Sons}, year = {2013}, author = {Demidenko, Eugene}, owner = {Paul}, timestamp = {2015.06.19} } @Book{pinheiro2006, title = {Mixed-Effects Models in \proglang{S} and \proglang{S-PLUS}}, publisher = {Springer-Verlage Science \& Business Media}, year = {2006}, author = {Pinheiro, Jose and Bates, Douglas}, owner = {Paul}, timestamp = {2015.06.19}, } @MANUAL{Rcore2015, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {http://www.R-project.org/} } @ARTICLE{bates2015, author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker}, title = {Fitting Linear Mixed-Effects Models Using \pkg{lme4}}, journal = {Journal of Statistical Software}, year = {2015}, volume = {67}, pages = {1--48}, number = {1}, owner = {Paul}, timestamp = {2015.11.13} } @ARTICLE{mcgilchrist1991, author = {McGilchrist, CA and Aisbett, CW}, title = {Regression with Frailty in Survival Analysis}, journal = {Biometrics}, year = {1991}, pages = {461--466}, owner = {Paul}, publisher = {JSTOR}, timestamp = {2015.08.15} } @ARTICLE{ezzet1991, author = {Ezzet, Farkad and Whitehead, John}, title = {A Random Effects Model for Ordinal Responses from a Crossover Trial}, journal = {Statistics in Medicine}, year = {1991}, volume = {10}, pages = {901--907}, number = {6}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.09.03} } @Book{gelmanMLM2006, title = {Data Analysis Using Regression and Multilevel/Hierarchical Models}, publisher = {Cambridge University Press}, year = {2006}, author = {Gelman, Andrew and Hill, Jennifer}, owner = {Paul}, timestamp = {2016.02.21}, } @Book{fox2011, title = {An R companion to Applied Regression, Second Edition}, publisher = {Sage}, year = {2011}, author = {Fox, John and Weisberg, Sanford}, } @ARTICLE{lewandowski2009, author = {Lewandowski, Daniel and Kurowicka, Dorota and Joe, Harry}, title = {Generating Random Correlation Matrices Based on Vines and Extended Onion Method}, journal = {Journal of Multivariate Analysis}, year = {2009}, volume = {100}, pages = {1989--2001}, number = {9}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.07.23} } @ARTICLE{juarez2010, author = {Ju{\'a}rez, Miguel A and Steel, Mark FJ}, title = {Model-Based Clustering of Non-Gaussian Panel Data Based on Skew-t Distributions}, journal = {Journal of Business \& Economic Statistics}, year = {2010}, volume = {28}, pages = {52--66}, number = {1}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.11.06} } @ARTICLE{creutz1988, author = {Creutz, Michael}, title = {Global Monte Carlo Algorithms for Many-Fermion Systems}, journal = {Physical Review D}, year = {1988}, volume = {38}, pages = {1228}, number = {4}, owner = {Paul}, publisher = {APS}, timestamp = {2015.08.10} } @BOOK{griewank2008, title = {Evaluating Derivatives: Principles and Techniques of Algorithmic Differentiation}, publisher = {Siam}, year = {2008}, author = {Griewank, Andreas and Walther, Andrea}, owner = {Paul}, timestamp = {2015.08.10} } @ARTICLE{watanabe2010, author = {Watanabe, Sumio}, title = {Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular Learning Theory}, journal = {The Journal of Machine Learning Research}, year = {2010}, volume = {11}, pages = {3571--3594}, owner = {Paul}, publisher = {JMLR. org}, timestamp = {2015.08.10} } @TECHREPORT{gelfand1992, author = {Gelfand, Alan E and Dey, Dipak K and Chang, Hong}, title = {Model Determination Using Predictive Distributions with Implementation via Sampling-Based Methods}, institution = {DTIC Document}, year = {1992}, owner = {Paul}, timestamp = {2015.08.17} } @ARTICLE{ionides2008, author = {Ionides, Edward L}, title = {Truncated Importance Sampling}, journal = {Journal of Computational and Graphical Statistics}, year = {2008}, volume = {17}, pages = {295--311}, number = {2}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.08.17} } @ARTICLE{vehtari2015, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, title = {Efficient Implementation of Leave-One-Out Cross-Validation and WAIC for Evaluating Fitted Bayesian Models}, journal = {Unpublished manuscript}, year = {2015}, pages = {1--22}, owner = {Paul}, timestamp = {2015.08.26}, url = {http://www.stat.columbia.edu/~gelman/research/unpublished/loo_stan.pdf} } @ARTICLE{vanderlinde2005, author = {van der Linde, Angelika}, title = {DIC in Variable Selection}, journal = {Statistica Neerlandica}, year = {2005}, volume = {59}, pages = {45--56}, number = {1}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.08.10} } @Manual{loo2016, title = {\pkg{loo}: {E}fficient Leave-One-Out Cross-Validation and {WAIC} for {B}ayesian Models.}, author = {Aki Vehtari and Andrew Gelman and Jonah Gabry}, year = {2016}, note = {R package version 1.0.0}, url = {https://github.com/stan-dev/loo}, } @MANUAL{Xcode2015, title = {\pkg{Xcode} Software, Version~7}, author = {{Apple Inc.}}, address = {Cupertino, USA}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {https://developer.apple.com/xcode/} } @Article{masters1982, author = {Masters, Geoff N}, title = {A {R}asch Model for Partial Credit Scoring}, journal = {Psychometrika}, year = {1982}, volume = {47}, number = {2}, pages = {149--174}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.02.08}, } @ARTICLE{tutz1990, author = {Tutz, Gerhard}, title = {Sequential Item Response Models with an Ordered Response}, journal = {British Journal of Mathematical and Statistical Psychology}, year = {1990}, volume = {43}, pages = {39--55}, number = {1}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.02.01} } @ARTICLE{yee2010, author = {Yee, Thomas W}, title = {The \pkg{VGAM} Package for Categorical Data Analysis}, journal = {Journal of Statistical Software}, year = {2010}, volume = {32}, pages = {1--34}, number = {10}, owner = {Paul}, timestamp = {2015.09.04} } @ARTICLE{andrich1978b, author = {Andrich, David}, title = {Application of a Psychometric Rating Model to Ordered Categories which are Scored with Successive Integers}, journal = {Applied Psychological Measurement}, year = {1978}, volume = {2}, pages = {581--594}, number = {4}, owner = {Paul}, publisher = {Sage Publications}, timestamp = {2015.01.27} } @ARTICLE{andersen1977, author = {Andersen, Erling B}, title = {Sufficient Statistics and Latent Trait Models}, journal = {Psychometrika}, year = {1977}, volume = {42}, pages = {69--81}, number = {1}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.01.27} } @ARTICLE{vanderark2001, author = {Van Der Ark, L Andries}, title = {Relationships and Properties of Polytomous Item Response Theory Models}, journal = {Applied Psychological Measurement}, year = {2001}, volume = {25}, pages = {273--282}, number = {3}, owner = {Paul}, publisher = {Sage Publications}, timestamp = {2015.01.26} } @Book{tutz2000, title = {Die {A}nalyse {K}ategorialer {D}aten: {A}nwendungsorientierte {E}inf{\"u}hrung in {L}ogit-{M}odellierung und {K}ategoriale {R}egression}, publisher = {Oldenbourg Verlag}, year = {2000}, author = {Tutz, Gerhard}, owner = {Paul}, timestamp = {2015.01.23}, } @MANUAL{rstanarm2016, title = {rstanarm: Bayesian Applied Regression Modeling via \pkg{Stan}}, author = {Jonah Gabry and Ben Goodrich}, year = {2016}, note = {R package version 2.9.0-3}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://CRAN.R-project.org/package=rstanarm} } @MANUAL{mcelreath2016, title = {rethinking: Statistical Rethinking Course and Book Package}, author = {Richard McElreath}, year = {2016}, note = {R package version 1.58}, owner = {Paul}, timestamp = {2016.03.04}, url = {https://github.com/rmcelreath/rethinking} } @MANUAL{nlme2016, title = {\pkg{nlme}: Linear and Nonlinear Mixed Effects Models}, author = {Jose Pinheiro and Douglas Bates and Saikat DebRoy and Deepayan Sarkar and {R Core Team}}, year = {2016}, note = {R package version 3.1-124}, owner = {Paul}, timestamp = {2016.03.06}, url = {http://CRAN.R-project.org/package=nlme} } @BOOK{hastie1990, title = {Generalized Additive Models}, publisher = {CRC Press}, year = {1990}, author = {Hastie, Trevor J and Tibshirani, Robert J}, volume = {43}, owner = {Paul}, timestamp = {2015.09.07} } @Article{wood2011, author = {Wood, Simon N}, title = {Fast Stable Restricted Maximum Likelihood and Marginal Likelihood Estimation of Semiparametric Generalized Linear Models}, journal = {Journal of the Royal Statistical Society: Series B (Statistical Methodology)}, year = {2011}, volume = {73}, number = {1}, pages = {3--36}, publisher = {Wiley Online Library}, } @BOOK{zuur2014, title = {A beginner's Guide to Generalized Additive Models with \proglang{R}}, publisher = {Highland Statistics Limited}, year = {2014}, author = {Zuur, Alain F}, owner = {Paul}, timestamp = {2016.03.04} } @ARTICLE{chung2013, author = {Yeojin Chung and Sophia Rabe-Hesketh and Vincent Dorie and Andrew Gelman and Jingchen Liu}, title = {A nondegenerate penalized likelihood estimator for variance parameters in multilevel models}, journal = {Psychometrika}, year = {2013}, volume = {78}, pages = {685--709}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2016.02.22}, url = {http://gllamm.org/} } @ARTICLE{duane1987, author = {Duane, Simon and Kennedy, Anthony D and Pendleton, Brian J and Roweth, Duncan}, title = {Hybrid Monte Carlo}, journal = {Physics Letters B}, year = {1987}, volume = {195}, pages = {216--222}, number = {2}, owner = {Paul}, publisher = {Elsevier}, timestamp = {2015.06.19} } @ARTICLE{natarajan2000, author = {Natarajan, Ranjini and Kass, Robert E}, title = {Reference Bayesian Methods for Generalized Linear Mixed Models}, journal = {Journal of the American Statistical Association}, year = {2000}, volume = {95}, pages = {227--237}, number = {449}, owner = {Paul}, publisher = {Taylor \& Francis}, timestamp = {2015.07.23} } @ARTICLE{kass2006, author = {Kass, Robert E and Natarajan, Ranjini}, title = {A Default Conjugate Prior for Variance Components in Generalized Linear Mixed Models (Comment on Article by Browne and Draper)}, journal = {Bayesian Analysis}, year = {2006}, volume = {1}, pages = {535--542}, number = {3}, owner = {Paul}, publisher = {International Society for Bayesian Analysis}, timestamp = {2015.07.23} } @ARTICLE{plummer2008, author = {Plummer, Martyn}, title = {Penalized Loss Functions for Bayesian Model Comparison}, journal = {Biostatistics}, year = {2008}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.08.10} } @ARTICLE{spiegelhalter2002, author = {Spiegelhalter, David J and Best, Nicola G and Carlin, Bradley P and Van Der Linde, Angelika}, title = {Bayesian Measures of Model Complexity and Fit}, journal = {Journal of the Royal Statistical Society B, Statistical Methodology}, year = {2002}, volume = {64}, pages = {583--639}, number = {4}, owner = {Paul}, publisher = {Wiley Online Library}, timestamp = {2015.09.02} } @MANUAL{Rtools2015, title = {\pkg{Rtools} Software, Version~3.3}, author = {{R Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2015}, owner = {Paul}, timestamp = {2015.01.20}, url = {https://cran.r-project.org/bin/windows/Rtools/} } @Manual{afex2015, title = {\pkg{afex}: Analysis of Factorial Experiments}, author = {Henrik Singmann and Ben Bolker and Jake Westfall}, year = {2015}, note = {R package version 0.15-2}, owner = {Paul}, timestamp = {2016.02.13}, url = {https://CRAN.R-project.org/package=afex}, } @INPROCEEDINGS{carvalho2009, author = {Carvalho, Carlos M and Polson, Nicholas G and Scott, James G}, title = {Handling Sparsity via the Horseshoe}, booktitle = {International Conference on Artificial Intelligence and Statistics}, year = {2009}, pages = {73--80}, owner = {Paul}, timestamp = {2015.11.09} } @ARTICLE{carvalho2010, author = {Carvalho, Carlos M and Polson, Nicholas G and Scott, James G}, title = {The Horseshoe Estimator for Sparse Signals}, journal = {Biometrika}, year = {2010}, pages = {1--16}, owner = {Paul}, publisher = {Biometrika Trust}, timestamp = {2015.11.09} } @ARTICLE{gelman2006, author = {Gelman, Andrew}, title = {Prior Distributions for Variance Parameters in Hierarchical Models}, journal = {Bayesian Analysis}, year = {2006}, volume = {1}, pages = {515--534}, number = {3}, owner = {Paul}, publisher = {International Society for Bayesian Analysis}, timestamp = {2015.07.15} } @Article{gelman1992, author = {Gelman, Andrew and Rubin, Donald B}, title = {Inference from Iterative Simulation Using Multiple Sequences}, journal = {Statistical Science}, year = {1992}, pages = {457--472}, publisher = {JSTOR}, } @MANUAL{gabry2015, title = {\pkg{shinystan}: Interactive Visual and Numerical Diagnostics and Posterior Analysis for Bayesian Models}, author = {Jonah Gabry}, year = {2015}, note = {\proglang{R}~Package Version~2.0.0}, owner = {Paul}, timestamp = {2015.08.26}, url = {http://CRAN.R-project.org/package=shinystan} } @ARTICLE{samejima1969, author = {Samejima, Fumiko}, title = {Estimation of Latent Ability Using a Response Pattern of Graded Scores}, journal = {Psychometrika Monograph Supplement}, year = {1969}, owner = {Paul}, timestamp = {2015.01.27} } @MISC{christensen2015, author = {R. H. B. Christensen}, title = {\pkg{ordinal} -- Regression Models for Ordinal Data}, year = {2015}, note = {\proglang{R} package version 2015.6-28. http://www.cran.r-project.org/package=ordinal/}, owner = {Paul}, timestamp = {2015.09.04} } @ARTICLE{andrich1978a, author = {Andrich, David}, title = {A Rating Formulation for Ordered Response Categories}, journal = {Psychometrika}, year = {1978}, volume = {43}, pages = {561--573}, number = {4}, owner = {Paul}, publisher = {Springer}, timestamp = {2015.01.27} } @Comment{jabref-meta: databaseType:bibtex;} brms/vignettes/ppc_mm1.pdf0000644000175000017500000007500113042165067015415 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125191102) /ModDate (D:20170125191102) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 27077 /Filter /FlateDecode >> stream xKeKvU+&tѵ,$yHVaW9d[T!h pVwُxso毿o~_k~_|O_~2?|_m~N_M_4}ot~÷mOW9ѱh}F':]=Zs95~`z~] ң>~a={/lKc6ݢ}Mt_8 Ǚp_oph='ڏo?-S݉ڷhQG yhkhS{K Z{{U-D?eO_Ô{o9lѺ%m[ڽ=گDңc\xv-ƕ;p/ s֩76գkGV$[\۽_#kֵ-:z{Yވ[Wcڽ_DqMDW!X۽=ϬE>\֫ڽ#[mmEۖs?Ѣζq-:GgТus<Ӣ֫Z4e{dKzUoGU-:DWh<콤q[-}lzj9EǑ6OW6^բn{jv%jբ_帖^ko{Uv֫ZthェE7Qk g?zkﭛ[4ZƮGQk}O:ѫ.J9ѫcJ<ѫc-ߖѫneoEZ޷DݴhcェEǜ=:zj=m^բjWUߍvvqw -oE+-zKiz'jEgs뺥ߖ[4zv[zU=[Uk֫z{/Q{-ݞGU+3BZj=콪1ڜ:^1cZzP~Tc3ou-luk齪'3QkE-}ۭWm^ꮽ? آ֫Zo:zh{UwѾRט[O-3_[_wU-EmIo14`Zji=+ܙcѸ^ٯ^{j=f=޽;k}d^H{mE}wcOoDZEWyֺ֞ǘբZ 9OSք@֪Z`XkS±jo9[ 0VksNz:WkJKQcn'+ښȒo{b'l,1R}g%齇޳W5_;a{?:h˕TZMzzG [m}ׅ2VSEڰ]nnZ뗐_Ǭտvq+ęF;Qχq_'cnV}-eb ܗ&'q_1. s[w>ډqah_׍mr痱v c >~9{<}=.^_6\?6'q_M!v ]B]zsf˹ \Xm\VS>dWmbm8ڃp8eY? &n}vn`:AѹX*jn]zT֦6_}11xsȦy?h}j_^ܧu Sٰ TB܆1 nxnP^{F6 w¹ =cl`ۋ6q A ֗6^;R[M>^魴֨ޟ0ul{<|gZEܦ&~ξ4?^|=6Js禾h?7ml @{Ysޟto?I'ϧ7iJs-ǂ7<#͚cGxy=m<b_I-Vo} ;L?m66n+1gZ~h66}FXKܙuڬ;cg8Y?vG"=+yF}(=kq7Ng6Խg⓸ϏUֱ771_.i߭-g#;|x~qMNlzGݧ13=wܾf;ˉH{џ[{$NަMkϹ+hAϽgWJwFYӟYHb-ۛ?_ "33>\lzx-JV's?dvN=o{Iv縘?wVk|َ ҞZpwߜOx܆w=};<== ܇r/yO猧ywg{47J!nc07=>H"kk1m=7ٖc ϮzE(}~^zǚ:V>?==vdd9ևc:֏{~o }zYkS]}zvK)ꛊqVv=ݧ ĥֽG51,=E؞L,=7a%"~3]k6 K̭ב7f/rB2Gq^.Yilz:%“B[{q IM&T7~Dedv=l=`,c;XucF`R=G}X\16$Zzzlׄce~#:I;e}^ؽ_H(͸lvv=G /e?H^GRi GVVI#cҳ1g`#mumi{8vJ|鲎궇w鸅8Gjf%;SDz%tdNUqL#9yrL$H,jYwj72.[L{KK=5.]X/W Im\JyFieako}4x F7F/y}z"p'l]reqͬgZG~1>N key'3=y)ivN=e[D}Sa D e;DDOt~!':^ Ot~ۆ:&?@d l_h?(+esp|5m1  <;RgS+@x&c  #?kdB{u|n_ <_YdEY ;쟿=`7+ːa}}<@!@ء$@x  _3טS@xsO٭_SO<@xj <@% @xR $ ^k f ޲oُّS@x g]@xlf ||X %98a;- |g N'-\Xd;%jwֳpKva%aaX @xS7,@x[5@xӬ >ZOꣵ LvF -/dz_7 r `20Mf鬙/57V2ͮ+;L^1aV2Lx?wFqÄ(„wrY2A&FC=LLa00cMjE&|x}0fޟ9LT Lr0TL-Qp_c g `mÄOpt0E _ &|q6S048L30$6˄7LxfWLx 0@ ^wtB+C&܁Y>&qY&^;q}{D =0.N{^=pÄ4‘@ᾬM?Tx,rGheܲަ}2s3 hx 3/87+8Id&Fo^|3퍰5|r>|s>.u#t]~vbn"=1S>|zsqҹy#\,u}Ntr$>|hÇ'>|‡;J8+& fZ۽am y ߑd)==g5$|-| ^@u֝>ӀkYy=)Ad ;vk‡7>>0‡7A7NbÇ7>>q`>^L>|6sOa$o>\4|a'|%>z-u҇"[a+Ç[C=^|x|!l]ul2P ×$v! =v<{>||aFߕg*QŇ/qqpehplcZ|xw>HF!^Fa@|8W9sl|xβR>|3ÇL@`G0s #U0Ň)N\p Q@ߏ!LpN+yy gpXZxK0^p)Θ`4C0| F0^pY&(N69÷WU)΂A0pC0<4kO4#_9^Na0׉`n`l;|I90& r_|xc-></҇KtŎWζ7|xfq>ܣ`'d2Ak,Ü*OiKs(>g |q,rd|iq WLUt < |,D u\ŇG{Pax|3&ÇdGFS0gÜ?_i!a&6!r}N$|xc>|xtApg oÇ7Njo8 o>s\.|xg+>uHvBÇwۆd[‡wuΉqu*o-j}'9‡]!j EÇV 'Xω\K[:'‡= >|a >^uB'ÇOv'>|ȕ-9 1b&|0|>|qZ>փ+38|-|ORasŇ H' Bxl"Q gebx#((9-XQ^1doU ŰtU:l_:PŰ4SŰ4C0(O\txI7axCYo3Q鰊R0V E֮t.Bwq)|_:L.}*;$ >TDBX9cS:F!͂)ctPAμ)M4pe8bx z)w(U|>H1:(3^>ytt>YHϛbbX:b<^*_Tr8t BC:|):tXt_ŰeT 3bbxE!bXş)A`,JOi5tB}_[ZbXEaR *pQ {*^(WŰT Wbx!;bxaQ1lR1#a?aJ1_ʤbp_!oUtut?HaO@xs}33>{H:|N?^:|=Z >Hɍ?JU /o*U .:L6&x'07:Jc tKQ /*-Ooӑ c\$dp! >آv C|77ra8rM:<#8˃08\ Y.N\5xWqʅOpxhW`8raJpPS``^yK.# |mʅ9t\xSDȅpWʅ/x`\88rC.|֧g7pn C. ,B.|g|G.i "F.,-@88x~&hAF΀47 ,n&M4bP,x*ra\i?: 7ra~ȅ@B _g^rOh?::8*8O`8#C,8D|~GW8Vp88xHOp:g KKuWIpWp"8xU= &ݎ\xF.0Ǫ|}ȝ48_ncr׭D ;'O}(lCn . \ ( EALꅻb@~a lok[<͂NqT4I-w>ӎ.QS UߙYso{zQBgY X wFC["wQK]Iׅ]YHH;U Fk6X-wc%Tp.o1C w f+mpi4+m4SmKnZG =-m0m0GiUlHrt6x~A >>nۿK [6oiwJ_RE/{C>JU: e:*K^'^kzZC{_p_0WߌG=?oi4[ťjG-6Zm Vk6XS?h6`<-C_O +;?ZA'm0gxK诧U -upTh] R 66X`UjWq6`hF\Rh~/|Oԋ6!em0 2K3N:hOnL:`" ?`'005|bhEMh7qoc%} g6X^wtxG N.Giwq Y ^>u@P|?l&q(hwqorK4ɥU"6X_ m0wJ% mzҡ >0{2p*M08 B%  C;:6`I0;:= zG^рd;z}iK, 0 g }`pBapH #  ^0j` c >`/0Dȁ:^)r" >ʁe qo`u!$ vv6{0,9(`%`Mr`;SsE#=0x} 0xUv;TZ@q `I9E 4a] ѷ6C6ܫ6hח66X1҆68WUJS # X>ϟ`2hDxGwtѣ=;z(~;pn;u;z=}G}_`άw;:_0|`K<ǓYmBm ?Vl\=QigQk6x`|6x'Xq m["/i ^>cFG7@`(h7 `2hɟ &{6HKFLUʦ0% ء0dD.z Ǔ` fI f B_`|p0n~Ph =я6x=`6 lw1#a`ɁO`*-aƵ,hapo`0(~_08Y<Ǔ/ Go\0h80}^8`r `*t7\ɞq* ńw$ j```0Ld`0;`05!wF$xGKKl-@`0C/p`0Vm0|Wm"]MV}*`Ti( ^ w.*0 >?oֈݥ a7*}QWa-enlwQKxɁRyS:Fު%}(9R*%m-RRKxGZ«Zj ;k-vc-a{C Ȇu {B6|ˊa! wpML-aSKxOaٰe\|᰾ o?a| jaÓaöp]|o>Fo6|.hwFs FF9 fPlxQ llmlҩAh_hmņv1l(lR/So%+7V lmvoFSη-e$p6}aˆeaQS (K(eço [GB6,K [D6X6, T:,et*.SK8%/5lj†q3zFkF7;EQ|Ć6.ذCeó|aìK[lxEI]xto&Fo:ɔ + ^T†oaa|h}Y3lX3`ذ$lML}R7ʥa>eòqt?V#o!k} zapÆo_а3d8mGa+aq&`xV;RHmޑC%ba]"0, /d= փ >9)E+`X]`X-/E tZ`X*`X7`x O0wJQa- Üa!'++0xW0BA0T»C ǂr~7 #+0\{@˧iY`+oE *V& ކHDH a<`x4R O`KiU'h\`8cKpR /Y§a >Sc8݀aa'`Ѐy10L@0k: `xe [02Ea`FEr0[Xt&`BGF 0C0{qp !qpY }óg W5߀UT)*n.m 6uA%|=$8]N|(Ѣ>Lg:TQ**G%X.^,T JxR4|< W0<6ɏit{/ U%< J8.U g#JI*lT b&!J#0TmѮ*lT o/0TYΆMpNOV%|}3h=T)h$Fs hLaf8o0(LJXSMG%\{MG%\}MF4:Uhj5AѢi4hF4ќ@%=*_;)T,Q S 0BT¬$P cJ.*aP SZ0P P YP STQ@9k*%w f ^*oJx͢0ffSEǓ sn0R7`0;$ØI EF_DMitE-ٚFGyiCH[P S"0 sP0, 00l⨄L _qATKa-?lQ),%4OZj!]f,|>b!DXP/(}-0*(- (lO>ibE_>AY`PHS+"<)""b"bD_;0%?@~""""~∈a!]O^ _ GS`DЯX`rŗei (_@q<_ _yUV>r_ 8`:\{D*PZ Ph (@X`A"WˇK(^ǀbD) (~IDl  PLAa ߂_ _?bMŚ<mςbx'/(!RU`*0lAy OZ6ok@A2@zooOϪ%6gIwrI-ŋ%ŋ%fAbk -A%(.:@|( (| (1ϋg3݀ZI=@Żx:>A7O]@xXZ_j!m-? 揃SGޥGO/^qV>|sX<|+ ?Z/WGG5` 4 voa*x#tIKJ~4 fAq?$çCC ~HO9[KYǍX@O@O˾B' ?2|hV2|ŀ wγ+!ǦcpaȰ2 .dx߆ +2Qdx !* U8dXY(dX&djʐvȰZ7+CCW88 Eq(xzqȰJm%f.>y)aq=dԤdXvȰcȰfĐaEa6ů?2mcG#UV2(X0a< Ix ̪F8ddxS#+HV2]d?l%' 3=dUpTE6 ë<0_0DZ2 d^;s? C92 HUCpoȰ'J ó%Ù ;@! EH'/#aOK4Gop7MF.7dXdV#?07XaIBpGGY !nǡ = ?y>QId8d8AӒ֮tx?lG"$iwJ× Àd5H3+ι3%è sp%rHZ$81Wd|Ҩ$7QÐC?z\'a3 V9Pc DžZ?z,?:WpưA6 Z?z~$RbAj%;QI+^!a֒%fu2́*Ȱ6dx+2 BɍBk c2fI}2g s2 $@˨r£?^!"׋ G2d8ސa8a 1$:!K2=dxdxa)aj%cOIdY$ã Bq$"eQ w YA* <U2 G2̻E2|gs㪐aO#[2b2Zj8d=d d%dSaJ""26dxCT2˗dxBu 2\*߮7 2i$ìCw8GNV)TF2%ц w Ӳ F2L0': S$|a uGr$̩H*.Q1d=>dڥm.2< [\7d^<\O·P:U 2 O:Q:g؁%S|";QPXh$6 @ᶨ;ᙃB:@ 𡺘:ZWغMf]aՙ@aIR ^/unKA_;PG S.a+( ^+Yhj$YU=|PBm=}Zx3ugPxMi wR [gT(ֺ»ja @dI~ PN2PXP4PZ—@ Pb#bR _M _,3t@(\CPԖ@oM>xW?P(|TWiK=:Øy_ufb=d8KWa s}uYb_=c>_=p}(|o 7!npj@aAez:WUS&yQ[P@uU딬zX5Y5-aBaą·N@qҿ~0 ; u: sA<ld0-4>(| [B(_(Pԡ0BC'u>պ@aw»€{9W'ooW=!NPx=PxSެ ?ÛjpN (X\W{@u}u| @ᵜZ=T^nչ yZ(P^ o:ASW /s+,x uRZWXuXWwpH鏂3n*g))'t"cE=>һlV00VBU%S<(C -/D \qLwD\L8%- "_q1Dx8Ëa3)[gEK<*iVvxo|uOo!9" "،xӒYDrq)ET""'oxXhVqJ%".&,tJ,">ag"C+"NSؚхAĺ-]VKD|ÞO"{!0 /C D0_1"- & jK gÓ8h jq&BFq oxUJUbxz3 0,">dAODvUb,"F\8[&L P"3FD-"VK "+DRbSd$P0m2nGN%#<wI |0+^K<|3&xv݇xU,s pN*?L#1I c$_у˫qw/"'[Tx~Uc xx9aFBe,!^,BhS(pdƊ("f}ZQG<<#A'5 s"q:Gt1X*}uW飠pw>K /i8\\W-*MrDW4ҧ_WiD"bSAķ{"*ˢ*}`U1җnKTQ{Z,8<OZ3j-MVtZZ9RpxR d'pXnAr]kSn4ZK 7u=aN}V&6 w) h&ZK~0^Y:p>_pEog8\0pV/V=L8[8>V [' {*;g |pCkZfc-}Pi-D)'Q T1^y>*'߷Lzz`1 חt)K\ҔEF1LR g ^]Ű%U Ss F1LζnXùSa6:UP = V-ql/ZOmUkc88>Z blpXpXx&>aQa30pǫp;V[1|V[Y8\Р+m@[1e &SvXU:p9pv߸0۲[+ypc}{Xџ/CU|pL;ppIJ:XvXSv8CŰ)Z:_kiZK;}>Uޅ3Vew6m/v~o ᭬7 U` /a uLWb yj:8N*n L0sa wÛ]bQ*఩N*e5 N[8[8CVp)c߁֐{aa6Mvw]-wa9`؂πF;|Ktﰱ k\YG,-XhE;|hw9a</OcYa_Z8O> ajqT;V0hhY0AJpF!Ř9W%f._0,S_ o_/0 'Uz6U:aCdp0'qpN7Zpifh}82`x>pз%LGy˃9`ڵaea]êÖv Y I0Y^óF#aWMWŤd%̀aUȖ>aTrr-7H0LVD0 mn]aĂa%bÞ\ {0\9`B a6GMUra4` P`X #`x}KiËvn c#s0, /^Ɖ%Xx>:aԀa,7lm { rZjPnba)7yR )7Vᷫ4~K;0O Qb/cB+`i`a<( S0?UP7`$`r}hWҜ  /*=z?`yJ_/0^*} S\Ǧ\Ǻ0" 0|*{QJ0|Ws({T ΋(? (8)7s0;3l W-V-0bjU;<^`x{8K0  )D;LtKi=f]`cX#y,Ylzsi2zwĻkc!3 0=V.7kvΚ%`Da>ȟ dN zZ0k6`.冕t s >p Ubg%`b[r +a'}CiuXy,.a=7 U0'",7[w,c}[\/D?WE gj 0ݥj ,ְ@ K O R-\@HU XT kZ0jSi֦V-|k*5R w@.C ܹR !~;w|3Lj `|U cj [Zn5l-[k ;YkX iaۏ\Pj XZ%k kAlawp޴pQ#9J{Bp3Y@! s0= _YFPp]bwRjᾴ |X U ªkU~,]ª‹ fXҧ iқB7j BZw1@Xpj%'T ?ak uy0 Ta8¥N§f\@Sm6r2>zV4#V!' ,+dVhaCaa+ O[8e50H4@Z~ S:P*0ְRPuf>*PQ ƪ7!j|ja )G-{_K-llK p@}J-|ʈQ 'kZ8Yja<^_CaK֯0B=jp1 Q + vO0@WjvGRn  R r V ֏(9ja}Q ے1^-(+vY.ȁvr0OBI(|jw80«ЁU%̦7(^jM ߊeV J|»8P~uT~סp>(lmyc,((ΰRPC-|ks-PXS“a»NpXKZx"Z8= ZxG-NCpR =ҹ25k(PR!UjypJ j-5 04R/RMRRù*50.Fha~(v]%Na=)pJ #<0RxD[j lL"Ujx0RiUj8ݹJ 0-5LA` O0ZbӧZ3jӧZxR.T~ )5J Ua1X _j'P|=  &Cӝ>0o}S (<^`  /Bi@B ( /Px+@aŒ@@P:sҙ4<7^4>Pze(=2VCX:k(=4a pK ("BC G S2`cB|'(f 4 ϸ Ϙ kn(<`   @00%@a@a& 0 40 Px& )ZFM &($ (   c V(A^0ZWQj P;P@aW[)1ja$MA-eušTP?`= C0 /@`30;PRe(t 5b(= V(Pz@#O)J\jR l:PW (L+ # b9`f s(L0čR=5Pj|4jaLP kDFQ s]Ņ1Wq~j,q P2A-B7jac(>xyeMUjU pœC-0UvP Z-jas&PX)Pji cZtQ 7ja-Mp/Ba (PDG-\:jYpg .'UtNlJ) JEPR 2J]0 4 [%p)WA*˦p Z/̀ag4_)kÞ.r>N0g_6[)N.Q /X«Fe1`X[np]ϩMeާ`EO)'m¥\ 0i V-`,`Xk gAsa0LmTMaݣ鱕`jU 9U`#Cp)Ö  Wm_}*wmQ k >=@>Q -;>:RCR XF),X Ϟ /*FzÂmkb#FO2 r,o cS`EfaQ0,( (0|K)h;ó`0`pa*0 lP^`/0 -0|$Â00*)×v_aO O6`d [V0| c5X`x 8b\XhsZý=m8jlg< NU _k&9+>JI0|X60o0ܓ'e- =ީ+`X_a".:7 c[ ajaR~lQ cRߴ`:Ia╍AXMN³b“6.`x,`xo))PJamU [ URaT#UQaXJtTP%lAKTRagpYRaXeTTR%F%Q2UzMUG>YU φ]%,A%,*0“cU@R '5[*a0*k_@@[*CpT>z7AJ8mT7*a0`r#7]CF%|Q "ߨOvTš<U՘ҧaU z t [Lh!ѻ$O}5.S먄W#>Fl{ 5;9@X"@xzp“|GI= @#X75@xQJ x!@x>`]6Ѻc@<@CBVFla*N@blᴜ06@Oժ0,[JxY @#/"_ D40 >z{ZhdUa8 pZ+ VBGD}nNHa 0/GUz죯70^>5<* T3՗}K%+GC>:?Tate->"p.C}a* + τG|WB* ˷/÷~ d/_[*>gRt),G@&@E9 R2ǽFUMd @f@>?T9-pP3 OU@{ g  sx |ȉF@f  h.@>_bWq*g -xJU1@9L c+:x *^?T1.U@^Qk mE(?*TK*I*6X:la+[HH8=ΨH<ƖRh*=T;x%8ΪtU/*&ZTVU_PU_8(S1Hnzd nzl4QZvӁȌDe7=Vee7= oHicEUUHi&Ue+q2ڪHO4-/U1KULHƘ/LL"EVE菭H?y%VGT$KHDыH1O|2_Y֔4K2>,5fӘٴ[UiJl1!"S4'lz -ʽh,іn4H\AnDȘu7BοgY*Bc6} dg^fyM~4ØM-n/yL٬yќiMJO%[j6ٴG*4>܋)lk6v]!_yL_M{$B42@MolT"ٴDY3j̦Dd_*D̦*Uϟ*%GhD 6bjEj-rAʣ?OO8b6-6N->$Rj̦lZszͦц!jA3P[\1xriEȮEGDl !k/BV d̬Aȗ|~V"ޫ1S/Hhx!r!;7Ae B[[F(U[lIŇHmu7A+y~2 bEjO/m!Rb=+EV3T[_b-VpT"-_141.X2ºPdPdtPU'j)2E#+E΃"6Cl"^ӚBM^4c(.Y> EyC0Pd*"e[=HL(I-YP E@WPd6)EOK,KOEVME>e̡ȇ7y煖44WiC?"+4XpkZ["k EVIqȻV̡LRUk:dŋґs:GYO6GM2~(ReZi٦P(5BNZ"E E.us(E"+Y"KZBه"ϟbΑk^9ȎPdբȧ*PddkȗJa)2UQ(2y"yȋ89CqQdR4/#+1GV<"•x3"ě:b)2"Ĺ*B S\%CQC=EvlbZ*WxMsW)6kz"k%+^&5(r`.~yMbeŏ4]&ʅ"K t(ra7!zMiy(ۿǿοÿ_~8Pyo<}O-c^8WJ^.W~n?g~M^s)8S0뻞\ ԁ-nGWW˺޿֩x__%>GG/?|y_o7GGNn}>AZK_К?jM|b׻C2;?~,B|> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 12 0 R /GS2 13 0 R /GS257 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F9 /BaseFont /Times-Italic /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 0.702 >> endobj 13 0 obj << /Type /ExtGState /CA 1.000 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000027442 00000 n 0000027525 00000 n 0000027686 00000 n 0000027719 00000 n 0000000212 00000 n 0000000292 00000 n 0000030414 00000 n 0000030508 00000 n 0000030607 00000 n 0000030707 00000 n 0000030756 00000 n 0000030805 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 30854 %%EOF brms/vignettes/brms_customfamilies.Rmd0000644000175000017500000003276514111751670020112 0ustar nileshnilesh--- title: "Define Custom Response Distributions with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Define Custom Response Distributions with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction The **brms** package comes with a lot of built-in response distributions -- usually called *families* in R -- to specify among others linear, count data, survival, response times, or ordinal models (see `help(brmsfamily)` for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such *custom families* in **brms**. By doing that, users can benefit from the modeling flexibility and post-processing options of **brms** even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this [GitHub repository](https://github.com/paul-buerkner/custom-brms-families). ## A Case Study As a case study, we will use the `cbpp` data of the **lme4** package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: `period` (the time period), `herd` (a factor identifying the cattle herd), `incidence` (number of new disease cases for a given herd and time period), as well as `size` (the herd size at the beginning of a given time period). ```{r cbpp} data("cbpp", package = "lme4") head(cbpp) ``` In a first step, we will be predicting `incidence` using a simple binomial model, which will serve as our baseline model. For observed number of events $y$ (`incidence` in our case) and total number of trials $T$ (`size`), the probability mass function of the binomial distribution is defined as $$ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} $$ where $p$ is the event probability. In the classical binomial model, we will directly predict $p$ on the logit-scale, which means that for each observation $i$ we compute the success probability $p_i$ as $$ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ where $\eta_i$ is the linear predictor term of observation $i$ (see `vignette("brms_overview")` for more details on linear predictors in **brms**). Predicting `incidence` by `period` and a varying intercept of `herd` is straight forward in **brms**: ```{r fit1, results='hide'} fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ``` In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of `period`. ```{r fit1_summary} summary(fit1) ``` A drawback of the binomial model is that -- after taking into account the linear predictor -- its variance is fixed to $\text{Var}(y_i) = T_i p_i (1 - p_i)$. All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called *overdispersion* and the solution described below will serve as an illustrative example of how to define custom families in **brms**. ## The Beta-Binomial Distribution The *beta-binomial* model is a generalization of the *binomial* model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability $p_i$ directly, but assume it to be beta distributed with hyperparameters $\alpha > 0$ and $\beta > 0$: $$ p_i \sim \text{Beta}(\alpha_i, \beta_i) $$ The $\alpha$ and $\beta$ parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters $\mu \in [0, 1]$ and $\phi > 0$, which we will call $\text{Beta2}$: $$ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) $$ The parameters $\mu$ and $\phi$ specify the mean and precision parameter, respectively. By defining $$ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter $\phi$. ## Fitting Custom Family Models The beta-binomial distribution is not natively supported in **brms** and so we will have to define it ourselves using the `custom_family` function. This function requires the family's name, the names of its parameters (`mu` and `phi` in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family: ```{r beta_binomial2} beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1[n]" ) ``` The name `vint1` for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant **Stan** functions if the distribution is not defined in **Stan** itself. For the `beta_binomial2` distribution, this is straight forward since the ordinal `beta_binomial` distribution is already implemented. ```{r stan_funs} stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ``` For the model fitting, we will only need `beta_binomial2_lpmf`, but `beta_binomial2_rng` will come in handy when it comes to post-processing. We define: ```{r stanvars} stanvars <- stanvar(scode = stan_funs, block = "functions") ``` To provide information about the number of trials (an integer variable), we are going to use the addition argument `vint()`, which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use `vreal()`. Actually, for this particular example, we could more elegantly apply the addition argument `trials()` instead of `vint()`as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method. We now have all components together to fit our custom beta-binomial model: ```{r fit2, results='hide'} fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ``` The summary output reveals that the uncertainty in the coefficients of `period` is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter `phi` in the model. Apart from that, the results looks pretty similar. ```{r summary_fit2} summary(fit2) ``` ## Post-Processing Custom Family Models Some post-processing methods such as `summary` or `plot` work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are `posterior_epred`, `posterior_predict` and `log_lik` computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method `loo`, which in turn requires `log_lik` to be working. The `log_lik` function of a family should be named `log_lik_` and have the two arguments `i` (indicating observations) and `prep`. You don't have to worry too much about how `prep` is created (if you are interested, check out the `prepare_predictions` function). Instead, all you need to know is that parameters are stored in slot `dpars` and data are stored in slot `data`. Generally, parameters take on the form of a $S \times N$ matrix (with $S =$ number of posterior draws and $N =$ number of observations) if they are predicted (as is `mu` in our example) and a vector of size $N$ if the are not predicted (as is `phi`). We could define the complete log-likelihood function in R directly, or we can expose the self-defined **Stan** functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon **brms**. For the purpose of the present vignette, we will go with the latter approach. ```{r} expose_functions(fit2, vectorize = TRUE) ``` and define the required `log_lik` functions with a few lines of code. ```{r log_lik} log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ``` The `get_dpar` function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit. With that being done, all of the post-processing methods requiring `log_lik` will work as well. For instance, model comparison can simply be performed via ```{r loo} loo(fit1, fit2) ``` Since larger `ELPD` values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial. Next, we will define the function necessary for the `posterior_predict` method: ```{r posterior_predict} posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ``` The `posterior_predict` function looks pretty similar to the corresponding `log_lik` function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed **Stan** function for convenience. Make sure to add a `...` argument to your `posterior_predict` function even if you are not using it, since some families require additional arguments. With `posterior_predict` to be working, we can engage for instance in posterior-predictive checking: ```{r pp_check} pp_check(fit2) ``` When defining the `posterior_epred` function, you have to keep in mind that it has only a `prep` argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is $\text{E}(y) = \mu T$ definition of the corresponding `posterior_epred` function is not too complicated, but we need to get the dimension of parameters and data in line. ```{r posterior_epred} posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ``` A post-processing method relying directly on `posterior_epred` is `conditional_effects`, which allows to visualize effects of predictors. ```{r conditional_effects} conditional_effects(fit2, conditions = data.frame(size = 1)) ``` For ease of interpretation we have set `size` to 1 so that the y-axis of the above plot indicates probabilities. ## Turning a Custom Family into a Native Family Family functions built natively into **brms** are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on [GitHub](https://github.com/paul-buerkner/brms/issues) so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (`foo` is a placeholder for the family name): * In `family-lists.R`, add function `.family_foo` which should contain basic information about your family (you will find lots of examples for other families there). * In `families.R`, add family function `foo` which should be a simple wrapper around `.brmsfamily`. * In `stan-likelihood.R`, add function `stan_log_lik_foo` which provides the likelihood of the family in Stan language. * If necessary, add self-defined Stan functions in separate files under `inst/chunks`. * Add functions `posterior_predict_foo`, `posterior_epred_foo` and `log_lik_foo` to `posterior_predict.R`, `posterior_epred.R` and `log_lik.R`, respectively. * If necessary, add distribution functions to `distributions.R`. brms/vignettes/brms_multivariate.Rmd0000644000175000017500000002027114010776135017562 0ustar nileshnilesh--- title: "Estimating Multivariate Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Multivariate Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify multivariate multilevel models using **brms**. We call a model *multivariate* if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the `tarsus` length as well as the `back` color of chicks. Half of the brood were put into another `fosternest`, while the other half stayed in the fosternest of their own `dam`. This allows to separate genetic from environmental factors. Additionally, we have information about the `hatchdate` and `sex` of the chicks (the latter being known for 94\% of the animals). ```{r data} data("BTdata", package = "MCMCglmm") head(BTdata) ``` ## Basic Multivariate Models We begin with a relatively simple multivariate normal model. ```{r fit1, message=FALSE, warning=FALSE, results='hide'} fit1 <- brm( mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam), data = BTdata, chains = 2, cores = 2 ) ``` As can be seen in the model code, we have used `mvbind` notation to tell **brms** that both `tarsus` and `back` are separate response variables. The term `(1|p|fosternest)` indicates a varying intercept over `fosternest`. By writing `|p|` in between we indicate that all varying effects of `fosternest` should be modeled as correlated. This makes sense since we actually have two model parts, one for `tarsus` and one for `back`. The indicator `p` is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of **brms**, see `help("brmsformula")` and `vignette("brms_multilevel")`). Similarly, the term `(1|q|dam)` indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see `vignette("brms_phylogenetics")`). The model results are readily summarized via ```{r summary1, warning=FALSE} fit1 <- add_criterion(fit1, "loo") summary(fit1) ``` The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation `rescor(tarsus, back)` on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of `fit1`, which we will use for model comparisons. Next, let's take a look at some posterior-predictive checks, which give us a first impression of the model fit. ```{r pp_check1, message=FALSE} pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ``` This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of `tarsus`. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the $R^2$ coefficient. ```{r R2_1} bayes_R2(fit1) ``` Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color. ## More Complex Multivariate Models Now, suppose we only want to control for `sex` in `tarsus` but not in `back` and vice versa for `hatchdate`. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use `mvbind` syntax and so we have to use a more verbose approach: ```{r fit2, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back, data = BTdata, chains = 2, cores = 2) ``` Note that we have literally *added* the two model parts via the `+` operator, which is in this case equivalent to writing `mvbf(bf_tarsus, bf_back)`. See `help("brmsformula")` and `help("mvbrmsformula")` for more details about this syntax. Again, we summarize the model first. ```{r summary2, warning=FALSE} fit2 <- add_criterion(fit2, "loo") summary(fit2) ``` Let's find out, how model fit changed due to excluding certain effects from the initial model: ```{r loo12} loo(fit1, fit2) ``` Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model `sex` and `hatchdate` for both response variables, but there is also no harm in including them (so I would probably just include them). To give you a glimpse of the capabilities of **brms**' multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of `tarsus`, which we will now model by using the `skew_normal` family instead of the `gaussian` family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the `set_rescor` function. Further, we investigate if the relationship of `back` and `hatchdate` is really linear as previously assumed by fitting a non-linear spline of `hatchdate`. On top of it, we model separate residual variances of `tarsus` for male and female chicks. ```{r fit3, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ``` Again, we summarize the model and look at some posterior-predictive checks. ```{r summary3, warning=FALSE} fit3 <- add_criterion(fit3, "loo") summary(fit3) ``` We see that the (log) residual standard deviation of `tarsus` is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative `alpha` (skewness) parameter of `tarsus` that the residuals are indeed slightly left-skewed. Lastly, running ```{r me3} conditional_effects(fit3, "hatchdate", resp = "back") ``` reveals a non-linear relationship of `hatchdate` on the `back` color, which seems to change in waves over the course of the hatch dates. There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see `help("brmsformula")` or `vignette("brms_multilevel")`). In fact, nearly all the flexibility of univariate models is retained in multivariate models. ## References Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. *Journal of Evolutionary Biology*, 20(2), 549-557. brms/vignettes/kidney_conditional_effects.pdf0000644000175000017500000002451513701270370021426 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170125183821) /ModDate (D:20170125183821) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 6431 /Filter /FlateDecode >> stream x\ˮGr߯8Ki3]gl m,H ,`/ȈKA^ }u[_o8n='ӎ1/n}wXڽ[zK;ųO~?ǽۓ{﷑ҽ۫>4XH<vpw<-N>dc3\~/67^RÄ8kݰ?_=ǧ_o^~:9}~8L&nb8}/w&r8.ɱ.p;.yb倇,X:Ã'1MPap|b9U߆'ǃgNɿ5l_OίvǮc&R)/noYtk%O^~X<;]| ׷ק[ nX?`_gקlտݭ^3qw;4Ḻ%s133-fLjKrKYJsNְmN8^?/4zn>m~>_?/b6M~{n6L_2__XHڹqT#qi&h_sw \}֯~fn毚d&Zl<6mOa`|Lgjw`'@~3y-AyWL&[a[M~@aw4}xL6iӮmnIczp95ຍS-x,ySLKǒ79zZ/`GZOZo``ʗPi7s#pK_ǦOC 0Ӳ7G2e/_\Gx,^0_sv)B>}{<<CϱeX/]3|D|D|w# y;wCWKS'k[7XW<|߃?p~(>_$79?$uGR>/}_:ŗ)ic}HtH"m|@Eqr!Ϸo5IkcgdlI=o2~q}`[;>=??g3?;Xt@0WO'|0||ȒṞZze;.|/ko zj^B{{Ob}ǧ+ut`cSSIO=gϞO(&^՟^'o{?l6p"* \y_|߾>9 "$㤱="O 8 TTTӌ]otBp"0z;zv|FSٚOFpѐ#υйy:qDžԇ/~ҾSO}?s5؃~k{?ޞqHEiE;$БÓHO#9%<>=5'?Ȩ~F6`㗲o`\wv]zvCFi,>"ya;Msڟ߬g;1ZȀ9E^R^w~7=YXw ,jWMC(Y[w zv/LdW>'v IS]N4Hmeҫ\Ӿ.dYup]/vEh)ǡGrd͝c[ /w\#Aܖ0qϾ_i&~!^w#+^EwC>@cBϯy7 ކFF`Um(\"r)lȻ%E"iٶ0W*~[A zjtbwF0Fl)yu+/j)5VVOqrsGڋ-ʞ/j5@3K rRtdOhg>ns$oft^XKF\0CEzdj9dwPuU4*r~3ے7^oforJCт'xp{z,VW3ʎtv siM]PUM~3[s$@i>Fd[2[變+{ٖTeNyҼET{xiD f7Oo>ܺ(tA;J%99p(6O:bnW֠^of>p.1ȥNUi*iǪN r3 @QSUϪUWU xYU X[qy@/< r-s{I4&: rJ;r1$\Hz!:%G\N9Hj$qzl}|/ڙ];gѨD#d!z5Xwd:$ک uJlQ,ӈ^.␱h`@#ڸf8H&IVpFߟ}tkyp{gL7S)k/xHL0ɦ-uRŊR\|xH: UmK piB!c,Hř E`Ώ vN1!$FĈp$b=6 72-|>0˸N)0@,$Y <!Y!'=83#\6`Gq 9ΐ}BKufcpBF XaqX'WHk+T`y6-\ 7x3WcE 'C0Ϙ4yRQ3lL8WxU>-]8{FK)?`oL$0sW(!0gTMA;Wq+LgbIM@q$]))Ҥ*ΗpqѱosYߢr=h A$HAbQAskZ%SUUNUUQDO2XJ9ʷJtK@҈$ =g49SߩONi,l=q*EYb۔#8eU.P9[xҢ:q}+ fQu2XRzG7e3 3r,TtXfꤑЄfh`Ii~3rY4Ω/АHѰg2oSJe̮JO̦GMw*ٌJ)Է6 NuENխ@˽Լ\X{*w->U֑ӟp 0PT^X& E3j@alFH`I@H{+0V q S} 8x2W` ۨn`,}cO1,ʢ|F`5NE)x>"1CEQ"@U(vzdPgɝ<i"V8ΫNBRqD6e,t8 GD P뭈"ڬ&IFasݹ,`+[ߌe,/ЖC=\eyOҸZrT.Չ=/GiO)|H "VO}s'~jޣVZpEŶ./oo;u.Z|kh^KiR.oH1TN09Ixh{(}?endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 504 828] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000006795 00000 n 0000006878 00000 n 0000007042 00000 n 0000007075 00000 n 0000000212 00000 n 0000000292 00000 n 0000009770 00000 n 0000009864 00000 n 0000009948 00000 n 0000010047 00000 n 0000010096 00000 n 0000010145 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 10194 %%EOF brms/vignettes/brms_families.Rmd0000644000175000017500000003324314111751670016647 0ustar nileshnilesh--- title: "Parameterization of Response Distributions in brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Parameterization of Response Distributions in brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see `vignette("brms_overview")`. ## Notation Throughout this vignette, we denote values of the response variable as $y$, a density function as $f$, and use $\mu$ to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, $\mu$ is not estimated directly but computed as $\mu = g(\eta)$, where $\eta$ is a predictor term (see `help(brmsformula)` for details) and $g$ is the response function (i.e., inverse of the link function). ## Location shift models The density of the **gaussian** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation. The density of the **student** family is given by $$ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} $$ $\Gamma$ denotes the gamma function and $\nu > 1$ are the degrees of freedom. As $\nu \rightarrow \infty$, the student distribution becomes the gaussian distribution. The density of the **skew_normal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) $$ where $\xi$ is the location parameter, $\omega$ is the positive scale parameter, $\alpha$ the skewness parameter, and $\text{erf}$ denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean $\mu$ and standard deviation $\sigma$, $\omega$ and $\xi$ are computed as $$ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} $$ $$ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} $$ If $\alpha = 0$, the skew-normal distribution becomes the gaussian distribution. For location shift models, $y$ can be any real value. ## Binary and count data models The density of the **binomial** family is given by $$ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} $$ where $N$ is the number of trials and $y \in \{0, ... , N\}$. When all $N$ are $1$ (i.e., $y \in \{0,1\}$), the **bernoulli** distribution for binary data arises. For $y \in \mathbb{N}_0$, the density of the **poisson** family is given by $$ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) $$ The density of the **negbinomial** (negative binomial) family is $$ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi $$ where $\phi$ is a positive precision parameter. For $\phi \rightarrow \infty$, the negative binomial distribution becomes the poisson distribution. The density of the **geometric** family arises if $\phi$ is set to $1$. ## Time-to-event models With time-to-event models we mean all models that are defined on the positive reals only, that is $y \in \mathbb{R}^+$. The density of the **lognormal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation on the log-scale. The density of the **Gamma** family is given by $$ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) $$ where $\alpha$ is a positive shape parameter. The density of the **weibull** family is given by $$ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) $$ where $\alpha$ is again a positive shape parameter and $s = \mu / \Gamma(1 + 1 / \alpha)$ is the scale parameter to that $\mu$ is the mean of the distribution. The **exponential** family arises if $\alpha$ is set to $1$ for either the gamma or Weibull distribution. The density of the **inverse.gaussian** family is given by $$ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) $$ where $\alpha$ is a positive shape parameter. The **cox** family implements Cox proportional hazards model which assumes a hazard function of the form $h(y) = h_0(y) \mu$ with baseline hazard $h_0(y)$ expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by $$ f(y) = h(y) S(y) $$ where $S(y)$ is the survival function implied by $h(y)$. ## Extreme value models Modeling extremes requires special distributions. One may use the **weibull** distribution (see above) or the **frechet** distribution with density $$ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) $$ where $s = \mu / \Gamma(1 - 1 / \nu)$ is a positive scale parameter and $\nu > 1$ is a shape parameter so that $\mu$ predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family **gen_extreme_value**) with density $$ f(y) = \frac{1}{\sigma} t(y)^{-1 - 1 / \xi} \exp(-t(y)) $$ where $$ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} $$ with positive scale parameter $\sigma$ and shape parameter $\xi$. ## Response time models One family that is especially suited to model reaction times is the **exgaussian** ('exponentially modified Gaussian') family. Its density is given by $$ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) $$ where $\beta$ is the scale (inverse rate) of the exponential component, $\xi$ is the mean of the Gaussian component, $\sigma$ is the standard deviation of the Gaussian component, and $\text{erfc}$ is the complementary error function. We parameterize $\mu = \xi + \beta$ so that the main predictor term equals the mean of the distribution. Another family well suited for modeling response times is the **shifted_lognormal** distribution. It's density equals that of the **lognormal** distribution except that the whole distribution is shifted to the right by a positive parameter called *ndt* (for consistency with the **wiener** diffusion model explained below). A family concerned with the combined modeling of reaction times and corresponding binary responses is the **wiener** diffusion model. It has four model parameters each with a natural interpretation. The parameter $\alpha > 0$ describes the separation between two boundaries of the diffusion process, $\tau > 0$ describes the non-decision time (e.g., due to image or motor processing), $\beta \in [0, 1]$ describes the initial bias in favor of the upper alternative, and $\delta \in \mathbb{R}$ describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by $$ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) $$ where $\phi(x)$ denotes the standard normal density function. The density at the lower boundary can be obtained by substituting $1 - \beta$ for $\beta$ and $-\delta$ for $\delta$ in the above equation. In brms the parameters $\alpha$, $\tau$, and $\beta$ are modeled as auxiliary parameters named *bs* ('boundary separation'), *ndt* ('non-decision time'), and *bias* respectively, whereas the drift rate $\delta$ is modeled via the ordinary model formula that is as $\delta = \mu$. ## Quantile regression Quantile regression is implemented via family **asym_laplace** (asymmetric Laplace distribution) with density $$ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) $$ where $\rho_p$ is given by $\rho_p(x) = x (p - I_{x < 0})$ and $I_A$ is the indicator function of set $A$. The parameter $\sigma$ is a positive scale parameter and $p$ is the *quantile* parameter taking on values in $(0, 1)$. For this distribution, we have $P(Y < g(\eta)) = p$. Thus, quantile regression can be performed by fixing $p$ to the quantile to interest. ## Probability models The density of the **Beta** family for $y \in (0,1)$ is given by $$ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} $$ where $B$ is the beta function and $\phi$ is a positive precision parameter. A multivariate generalization of the **Beta** family is the **dirichlet** family with density $$ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. $$ The **dirichlet** distribution is only implemented with the multivariate logit link function so that $$ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ For reasons of identifiability, $\eta_{1}$ is set to $0$. ## Circular models The density of the **von_mises** family for $y \in (-\pi,\pi)$ is given by $$ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} $$ where $I_0$ is the modified Bessel function of order 0 and $\kappa$ is a positive precision parameter. ## Ordinal and categorical models For ordinal and categorical models, $y$ is one of the categories $1, ..., K$. The intercepts of ordinal models are called thresholds and are denoted as $\tau_k$, with $k \in \{1, ..., K-1\}$, whereas $\eta$ does not contain a fixed effects intercept. Note that the applied link functions $h$ are technically distribution functions $\mathbb{R} \rightarrow [0,1]$. The density of the **cumulative** family (implementing the most basic ordinal model) is given by $$ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) $$ The densities of the **sratio** (stopping ratio) and **cratio** (continuation ratio) families are given by $$ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) $$ and $$ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) $$ respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the **acat** (adjacent category) family is given by $$ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} $$ For the logit link, this can be simplified to $$ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} $$ The linear predictor $\eta$ can be generalized to also depend on the category $k$ for a subset of predictors. This leads to category specific effects (for details on how to specify them see `help(brm)`). Note that **cumulative** and **sratio** models use $\tau - \eta$, whereas **cratio** and **acat** use $\eta - \tau$. This is done to ensure that larger values of $\eta$ increase the probability of *higher* response categories. The **categorical** family is currently only implemented with the multivariate logit link function and has density $$ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ Note that $\eta$ does also depend on the category $k$. For reasons of identifiability, $\eta_{1}$ is set to $0$. A generalization of the **categorical** family to more than one trial is the **multinomial** family with density $$ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} $$ where, for each category, $\mu_{k}$ is estimated via the multivariate logit link function shown above. ## Zero-inflated and hurdle models **Zero-inflated** and **hurdle** families extend existing families by adding special processes for responses that are zero. The density of a **zero-inflated** family is given by $$ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 $$ where $z$ denotes the zero-inflation probability. Currently implemented families are **zero_inflated_poisson**, **zero_inflated_binomial**, **zero_inflated_negbinomial**, and **zero_inflated_beta**. The density of a **hurdle** family is given by $$ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 $$ Currently implemented families are **hurdle_poisson**, **hurdle_negbinomial**, **hurdle_gamma**, and **hurdle_lognormal**. The density of a **zero-one-inflated** family is given by $$ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} $$ where $\alpha$ is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and $\gamma$ is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are **zero_one_inflated_beta**. brms/vignettes/brms_missings.Rmd0000644000175000017500000002476714111751670016725 0ustar nileshnilesh--- title: "Handle Missing Values with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Handle Missing Values with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using **brms**: (1) Impute missing values *before* the model fitting with multiple imputation, and (2) impute missing values on the fly *during* model fitting[^1]. As a simple example, we will use the `nhanes` data set, which contains information on participants' `age`, `bmi` (body mass index), `hyp` (hypertensive), and `chl` (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting `bmi` by `age` and `chl`. ```{r} data("nhanes", package = "mice") head(nhanes) ``` ## Imputation before model fitting There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but `m` times leading to a total of `m` fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is **mice** (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with **brms**. Here, we apply the default settings of **mice**, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables' characteristics. ```{r} library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ``` Now, we have `m = 5` imputed data sets stored within the `imp` object. In practice, we will likely need more than `5` of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of `100` imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to `m = 5` for the purpose of this vignette. Regardless of the value of `m`, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass `imp` directly. The latter works because **brms** offers special support for data imputed by **mice**. We will go with the latter approach, since it is less typing. Fitting our model of interest with **brms** to the multiple imputed data sets is straightforward. ```{r, results = 'hide', message = FALSE} fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ``` The returned fitted model is an ordinary `brmsfit` object containing the posterior draws of all `m` submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all. ```{r} summary(fit_imp1) ``` In the summary output, we notice that some `Rhat` values are higher than $1.1$ indicating possible convergence problems. For models based on multiple imputed data sets, this is often a **false positive**: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of ```{r} plot(fit_imp1, variable = "^b", regex = TRUE) ``` Such non-overlaying chains imply high `Rhat` values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at ```{r} round(fit_imp1$rhats, 2) ``` The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of `age` and `chl`. ```{r} conditional_effects(fit_imp1, "age:chl") ``` To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation. ### Compatibility with other multiple imputation packages **brms** offers built-in support for **mice** mainly because I use the latter in some of my own research projects. Nevertheless, `brm_multiple` supports all kinds of multiple imputation packages as it also accepts a *list* of data frames as input for its `data` argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to `brm_multiple`. Most multiple imputation packages have some built-in functionality for this task. When using the **mi** package, for instance, you simply need to call the `mi::complete` function to get the desired output. ## Imputation during model fitting Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with **brms**, but possibly to a somewhat smaller degree. Consider again the `nhanes` data with the goal to predict `bmi` by `age`, and `chl`. Since `age` contains no missing values, we only have to take special care of `bmi` and `chl`. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In **brms** we can do this as follows: ```{r, results = 'hide', message = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ``` The model has become multivariate, as we no longer only predict `bmi` but also `chl` (see `vignette("brms_multivariate")` for details about the multivariate syntax of **brms**). We ensure that missings in both variables will be modeled rather than excluded by adding `| mi()` on the left-hand side of the formulas[^2]. We write `mi(chl)` on the right-hand side of the formula for `bmi` to ensure that the estimated missing values of `chl` will be used in the prediction of `bmi`. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way. ```{r} summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ``` The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the 'one-step' approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the 'one-step' approach is that the model needs to be fitted only once instead of `m` times. Also, within the **brms** framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because **Stan** (the engine behind **brms**) does not allow estimating discrete parameters. ### Combining measurement error and missing values Missing value terms in **brms** cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, `mi` terms are a natural (and somewhat more verbose) generalization of the now soft deprecated `me` terms. Suppose we had measured the variable `chl` with some known error: ```{r} nhanes$se <- rexp(nrow(nhanes), 2) ``` Then we can go ahead an include this information into the model as follows: ```{r, results = 'hide', message = FALSE, eval = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit_imp3 <- brm(bform, data = nhanes) ``` Summarizing and post-processing the model continues to work as usual. [^1]: Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings *after* fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the `predict` method. [^2]: We don't really need this for `bmi`, since `bmi` is not used as a predictor for another variable. Accordingly, we could also -- and equivalently -- impute missing values of `bmi` *after* model fitting by means of posterior prediction. ## References Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. *Journal of Statistical Software*, 1-68. doi.org/10.18637/jss.v045.i03 Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. *The American Statistician*, 64(2), 159-163. doi.org/10.1198/tast.2010.09109 brms/vignettes/me_rent1.pdf0000644000175000017500000002006213252451326015567 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908164932) /ModDate (D:20170908164932) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 4217 /Filter /FlateDecode >> stream xZOGO;ߝk"@B bp$b$ķfwMp:3S]]ˏ_<_~iKIq?y{Z}t{S}}Omi>J۬7OmlxovlcY:`Mt`fJٷ[o0tߎ 6;̑\`-ŧFZz1K3z~B plP?AKf{!M>]n' <פ(@0xDoa9>6\Mepܯ=qʼ_kVɰ|X`jJޯ"y)Cj $ysn2up5"0uK(8r~ޗIx BOhR {c\BzVUĂDp l<# T'27*B:dd`EdtQ.L'K$ɘDr8fTFCÜEӪa`[=_22Lלҙג{ɣW^LW^ 3ϕa~۩:O9*Y>ҹk'ڡ;nK|nݒuҌK3up,3dYP\"ZaC,!`>Y&bXE$82z^ P-T`*:-l@ β,hF%0uZp&NGh-A{k)_I hQB䫀KCnǗ_?O'T=OYEU ]4S儇\[avFE6"u6yjuC絪 yߌ :U]T.c*]\]QҪ.zh yua,VLZg`:lVvU]8Eq5l\EZՇYbe'BYgkœņrCydAɒ=ucQW':˞,ܼhfU]4ݳhޯU]\]ԕ0\]%guQUENRsgua|Vj~Uf~\]R ^]$Ī.`k=.`7AìW]]. ؒ_.V[EruUݭYxZEruȺT4ZxEa%7Mwsba6a[]"U&5i.C5M.qdJ0kAS&u|Cq4x(8.A J0I![eSnn/_ ;L&[#()[folxXW*޽g?W.)Xؾ)Wo??~駯8LJ E"ch(>~1vtcoC@ǁ(^v8 =Eɴi}mޞ\'Z@w(3O75]n9h=y=/!b|.|İ"0҇8Tz,|dpv yڻ{kilȵ1W߽ns6uiYwE}ۿߺJ)W x7CnIxLgtn*Ӳdr `|Qur)æIlww `0mq_'ZPR6B< t Q XJ?gp$J4JCnW~i\ͷ_ž#8RgD-F\iy%e]w't-LmBD"s!0 %J+KP[4O`F1 M©mYne.%Y[~s7{X~I)PIF6_[dOl$h?߮ n[YI{j}_eQӧ:fm`1cWSßEԤ[a_.E=r`v-7%KoE7zQ0]2pmM tw}@:^-uq;r;ίӠ rݿ)1 R] 7_&[ 9LΧ 0bݿmr9Ew8Ӂ}1bZ)E)X[gJU}~*(lz]`#]4dBn;~|Ҿ{:gp *]ĢA dܮwn~av+ȺPpCN( J~渂0|͕ȜV$QW{'hKH~ȊU?ĭɾ_z%;˵gC*URO=Xzc(Zz-.!R^?f}j=،>|z2MKog}joN]?Cz[z"ޓ68JzEl蟪^;^gL~r=pm÷ˎ[2?uGͼE3#8>yQ闗^Zxl@G/g<)|?J/08#'?X^}C*.7x{t }'/?JP$7yNX 2 |A?|o3+Xs [f{z6%71 s>^0'n}K.z^ÁUj fژOySSB8Oiendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 11 0 R /GS257 12 0 R /GS258 13 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /ExtGState /CA 1.000 >> endobj 12 0 obj << /Type /ExtGState /ca 0.400 >> endobj 13 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 14 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000004581 00000 n 0000004664 00000 n 0000004816 00000 n 0000004849 00000 n 0000000212 00000 n 0000000292 00000 n 0000007544 00000 n 0000007638 00000 n 0000007737 00000 n 0000007786 00000 n 0000007835 00000 n trailer << /Size 14 /Info 1 0 R /Root 2 0 R >> startxref 7884 %%EOF brms/vignettes/me_zinb1.pdf0000644000175000017500000002005513051356434015564 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170216181050) /ModDate (D:20170216181050) /Title (R Graphics Output) /Producer (R 3.3.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 4096 /Filter /FlateDecode >> stream x[͏G9Iw5D9!:!{꽚]d&vr뮮W}ퟷ?>gK{JXI{[{[nmo}ˣƃ|Bɻ?}-ݿ}~ͭսV澺I+mjRާĤK5iv.٤d@% }0v*5 {[% =ٓ sހ0L^²]hw'"d9]5tGPi5^TZ.'C@P}ړ s@} \BߧK@@YQ+ ګ7Ag]rt~>Cv1/Gٺ#4Ml.**a,dAgmc9bB9cs \t[)Ffk嗧mlفt/Eͽa@ fFHэ5gR}]q UiFS@,')dJiiqdCZ?_+'ջ8iw,G3U [q8E˲[v.x߫%yP CevO5!Om5253MieV,~^]z,e{cXFpdžO5-"P3 x7a"Ƭ@՛Ɓf/eըDY/:WOuo zDg\֫fLqݎR(Ok/e]+7 ~hi!r' ,hxQ=~ڻ<tu~XШ,8:|q+#JX8cT"3?~"Y#{FSvr/RlLJ*|H^maH*%{i(M)K Mc#5b4x"O7&7yhAd0rw6-A݉Ɣ 2W  "밹5,J)"Ğ?(`Bj'H;pFn 47H!9a՚|*Tm+c񟽸Y 6_H m/>Mls_}yK7->n6>H[A3;+H p$UCK@(H5 $5ZK@H-,wt;d؏6|*䇽ҚXpV8WЉ?|i b_9[%O#N(:}IU/ H$>Z3`24!F jQÚaRaad5&|FLCI[$_AWf_<"#x[i1/}/&[QMxL sTiHU/$_uDBS## $'PU;8" W 0RR%L~E\{#+WrzHHu@QL@C$֕ $KF@I6ɖ=hdLubdM)\W ̾^vG2Yɦiy ~dUՐ6c$z%,egW2mϳH(H֭p%V/Q:~OI5o#^GɥtGI2y}M UokB};U-/4x/Gkߗwۯү~1E槾~]y--lۡʭU1dQ 1 ;/x0g1WCL$CҍQJ(u xÅq-#:F;'ř-_S'uJ[A-wCyCzhl%]֊zib73_}ݛw'A\5].,;coSK8JcQݛωG8WV>\A&-z! Rw%y.\!7$p&qA $Hu$xyII^& TYOIS8(iXIsE*V 9s&t+XJKsɳi4ٚ@0@V!)m4Ɛ@} >5پCƌ d BnY ߞ,֋)2=&3L@V8,ZȹL z F5T\GC^5r UGaYbx-[cl-rJ9Pu@g%%pIb$Qdw&{A0ՒD"]22xrɍMcQd;ZD8/@*z[$ŁTvNR]R1At - 1ыR49)@+¿ F(>+ ,@ T547J!DDApкh9.bCeTUE5͕J{. *rEdЪ<* @oU*(KJPDv&p{%ږ A*@Rd}g4SK'cVdr 9@db< 02h\7%k[7bCy4-0Aă03A#ι# X^Ϊ||qB.m%Ypѝy^q"rX{f'Le!4e>eew=s>C .a{AբJ>C.G@Vq3\.&-]Bk:L5XĊ[%Ű9 (hL^fLm~eS3Ǵ>bҴ> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font << /F1 10 0 R /F7 11 0 R >> /ExtGState << /GS1 12 0 R /GS257 13 0 R /GS258 14 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /ZapfDingbats >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 12 0 obj << /Type /ExtGState /CA 1.000 >> endobj 13 0 obj << /Type /ExtGState /ca 0.400 >> endobj 14 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 15 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000004460 00000 n 0000004543 00000 n 0000004707 00000 n 0000004740 00000 n 0000000212 00000 n 0000000292 00000 n 0000007435 00000 n 0000007529 00000 n 0000007613 00000 n 0000007712 00000 n 0000007761 00000 n 0000007810 00000 n trailer << /Size 15 /Info 1 0 R /Root 2 0 R >> startxref 7859 %%EOF brms/vignettes/me_loss1.pdf0000644000175000017500000001416013155225616015604 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170910140948) /ModDate (D:20170910140948) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 2231 /Filter /FlateDecode >> stream xXˎ]߯RZ"כֿĀHf, DHjŖ%$RC>6Sϟj"1 C#]ki#_"{K~%Z"F?ڌezۭ'}q]c$Z3+qjCшi6t<Њf4rz*J+צ'(asQu)Qf3V]}V1s,KwE2g)eF*1׀OѷKw@CZR[?9p[ip9-#Ÿ,qb_ BM{?zv;".>(sߞc)hEA W'm`T{8Oy$R'+_|ݪa Ly'x 6:L,gw ӚNk_2\]Vqx.?h"`=DS<_~W ĵxdzb',xWɰɣEכ_5Ϋ. b۸bN񔔘?Ά~#-hXї.b-_Tʅi^+_S\!_QLpy<&}|mo~} ,;^gxr~Xa|.coJ<8Og>zKy0k'_=ׯOg#ߡK=9a%CoF+[׭Wy=p=}w{C]/~CO[zumL=Xfz PO]auz>^-(OXrv˛.;:ʣnci+]U~>g7g]vyG}忻@vy y~8#/N^~=B{ih%\ Wc{A6t{7\TmGg~^WGϹSvsfޢc;9,>톶67=}]r{L{4v0^?|/`_Qrt}j9Jj>,G)ɑ|__p ᖑO=?~h4CggV-G9!}`N3]3#vLh\,#+*`ji`^q'F>\ݟ# 1w[/ZJO^]qsnk]|dvke'OI<xK~Iǖ6׫^&<=wYKVuTw|ة?mendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << /GS1 11 0 R /GS257 12 0 R /GS258 13 0 R >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 9 0 R >> endobj 11 0 obj << /Type /ExtGState /CA 1.000 >> endobj 12 0 obj << /Type /ExtGState /ca 0.400 >> endobj 13 0 obj << /Type /ExtGState /ca 1.000 >> endobj xref 0 14 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000002595 00000 n 0000002678 00000 n 0000002830 00000 n 0000002863 00000 n 0000000212 00000 n 0000000292 00000 n 0000005558 00000 n 0000005652 00000 n 0000005751 00000 n 0000005800 00000 n 0000005849 00000 n trailer << /Size 14 /Info 1 0 R /Root 2 0 R >> startxref 5898 %%EOF brms/vignettes/me_rent3.pdf0000644000175000017500000013306613252451326015602 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20170908174613) /ModDate (D:20170908174613) /Title (R Graphics Output) /Producer (R 3.4.1) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 42286 /Filter /FlateDecode >> stream xKtKr7?Z1EۂȀDH= 2Dk܇%ѧOt[k̈'OۿOQox[VWZ?_k97~~KʘgV~_~'Ͽ\oپ_w5ϪZy_g_H?˯|ߚzh_ySLg_ ~s> ı$~Mq֟M=ׯڿ.\^Nרs>r9wr3Zz9_9QW_k%~ 3c֑1}]戼J ߞsSWn?'p<wfgi #/kwug?~lu=-ۓVwOv-slOݿ&8?"nQ?_]yTnşWk2jyٲ]7!9lf޼;?bf8wu;tiztKvU'zn>X?sLJ[2qe8r~?Mqn^xe30&cb2+2ӷw7ۣGts[B|kn8WS5tsf qBq/MWqkom?Vf'?zܗ6fz>\=r=%`zdiISد 2QbEW$[W4y/ʣzğ%bɣ7Fȏy)+A}:}8F1u{ݜ,WW KH-Od(%Н< /P0ECl(_ StRe:} t52B_PIbxn](WNeR4r=PG4r=Z&.霟G5=O,0еa;v8/BqzϳsݗwuX ~#ʫ=a?c]VjV w]!ͣ/h1+{`0ܙ+i%羴ǤX,uSN|46 ٯ-/f*cy;m@q3.^n ߬pR&{Y\g۷S|ܯ=ngW]K7{Wq=8 =&&C&GMp7ol/8 !l*.sw3M=Z Y yq-h9hcæΖşBT}|[İ#5¦|>-ʂ뾃LokbL3abIFG5ltdbAEGl'&Tt`AZ\`b =Nax“3X:|,11Ȁ^ok;95nAX28?Tg#Z"3 4( tQ}^xeDh~ tfX-5XgspM7^c0,w{k}F@ȶ'=/JGRG2apI4X"myN)z}nȒI_NA+K[ǀ@̀+na\摒'pJMB8 8,OQ Fop(/rd߯ed c*o#'>6Fݏ<%ö=c7e X_cY+Ŵ'C^[fWh;ܲw #Y`iqyGI. H!8Z.^j󈯡?b~|R=JR%}~rCv?!JL 8ʴ_ #MgO<&1vE.8*\HgAʿ[fd h` rdcBݐ^ i\˿GdԮ6]e3k#kq-g- 8W}o ԭ,.e@6u C3~CM6 u&Oĩe#ϿxqS `t0J68l\h,t'\d{~<6wm>d4ot綀p%l #u8b2[) ʐJ% >:LA&;2+1`SY~ VT!A;2~ȏx^dȀ`,Fl? B2 :&2ȰdrFlPf Lud&PUk[u u`l>L" 3)&7]_&C-UC)W(`&c' a2GUdZQ^hld}090H-rə\G3qH :4+"p ɼ഑'lAJXh(9㾄=$@…|b?il ޠu?#hbEb7C`F`Ha ,הw>{TF̯ Z|nI;cyR~?;`D#[Ȅ5Ö׭$ZVJ;_n-@?MJv+!1znSU< 'm'6~ѭB"КB=H= s`zKMT ]="iFBaI[y+D *ttT9=(T^z !! M4[y&DCiVUg~G4AjGisU&HCM)9ҏetUiTp4=_ytI?-NH^ v+$~ݖIVK\?3TOvڷ*2~lfd{ Zse O恙 -ou0 04-f׌=[wp<*Y(#,Tq'X Yibs瞩Fzy#gM0 ƨi$؛^X "b;};#l!q$yvq>]Î8qa]faI;T+/CV]|!w: = B* n/0>hkLV4NwVWVM4.Hj"#XhNUU-yp5|-3|{4,UVVoUR{dV*kX-ͪ҇))p CHjl)USa5)Mj s; Vן+7,%K'X5;*~ʮ:}mT<15t<0S˪H5N-&@@A*~TSE5Uܶ\ HYSlFmL0'"h쩥Pki~ ~SN`,PR+t[~AtDp KnKrTNzVtS{>p?2i/88ޘne=C "8'UMZ,ZTv %Û2A֤IdM#ư$$fYSb5{ ZLZT(Z'Zg֢vD&^D\Pk +BJ C"Дm|j\F! -6TFP_&" wT"tQP*fO"A" [؞)-z u& Y'Mؔ^zF@#ĶTDb8+6U$6 x?Kl6d%uO۲[RFl0[A<$."%0[o'C{ Hֿ=Ba$qeYtLb*3'MbElGĦGNhcPs4\_ІUUq=ЛK^׆w:&{~Σ.77|ňކץ-6<(M冗鬽yw\,`9`9,=N<d ; ;aS<Uyz ;/ C$7{;8yu ;gź-opVr>Rsk\:S7A7=x%b*l"dϪeBlBvf!{#2"cϷo3 Ի213سFv=O˨ "FЅxgJ{W"D®zw"aH JD^4ve{hXA HأHxdzA2 򭯷$7hHQ!ЌyoB*!P[9p((G ._!pAHM$T7ok yCD?1#jɠfm>a(9&Oͭ2$ΐ0/׺Wzv $CzcC&.=}͒b.Uo𺳀 ߄]BX cxEZ6r6 #nQ[>](9: %BIE%%@IWANw``S߄+[M6=Q`YL`!E%K l#T$ڮt`p(鐻"`90` f«wO,5{`Pk%h >aI`9g $vXf%Hk=[/=Xz%Ilf۪\+ .I6Ev>Tq>$;@U` d-|kb APucB$IF,߶rΧxgpH2+I@1$֎j:$N؊f$I> p$66QkFHFm6?!56IzFFDGML(F]$˨MLQ{Vu5&$Ȩ$`R!TqfQo1+KF vM1bԡAF1 QuJ:՟JFU?u߁i6biuaO2O;ȨO:>pA8E f(~^%~D{,:C??/ ~(v;`W,Y<*E2؝ ~'`=IA?6P*3D ~5;uG?[s=1%CďL#H2lDISe.?/?L '#,8sIǎn#+%H2U}B@NHZ }N`N?wB庀(1t0E`k*FhkѩE wt(5oWDηI詑)#hjL? E#\PGE{hDIzBͰc(г儢QHUF-c US(h[((O7^c~d`Csk~Ѐ(:2+ o}Ę>葁2DѡVL(_pQUw&(:4ٞ(:ECTh\/Qtd:DSjVEsWE@)h|?Q&%(c 1؁^DѡVq: %#Z K( qs"&ՠ$Zυ]Έ]DϮdC&UkPTs𨣧gS㱡*S㲉K㵉K5DE"\m0O4AT h#Н^!@C ݌5BC[1wJ@tbzcV4U>-CgO#*UN,HQ CI 7C"f?ˉ&]0f9?0F3nQY09NC"gM"ДS|H1 aF!sޮ'ct]$0cqq5Y $HgĞ ep"A"X#$c6ZXG +j7 ɘbr1'/ 1b{l[=Ō0yPFgMپ2f1U(A>e<12'Ę[)|))L& c*L0<0&4+Ǧ1GS'SUgbL51p P6\*F1RbLTNH2s1#I 'c.i29~ǘ NJ1猩^1(ŘcYdiGƌL2fTm1dL71ɘ#Ɯߪ̡Y1Ve\a*1fOɘTVK 1Xbʥyh c ^2fCN2LD2Llp%sp!WU]SNdY 9:,|~ɘSU2e&SZݟ4 `UΔ9_s_Lw0g_~ ϡ{996dx[ŲdL?͹\ n΂7q,9, v1ysshp!o:p&1=sXV,ݸǙtfBPQ8࿌y(?0`<`RdyD[o)ɠTc"=W?da/$.u*O`dYj3s.HDY̭M( YV8WJ}W0xSGх>_Lɒuu%&P@gO ZH:зF/%Y;gPr#Jv6pyPs |@^*`bPp %ǧO T .ʱRM݉%cLvI8cETGȒ*vM= "vK6Sdf9RcÎSoX%uXR[KnKnXr& KU=dɩdɩYԸZ_Jm%MASd:XKNr$K%gKV}{gs%M~{qe`:~w KNu%P3,9,9uYrfgbySɒS[0#,~#&FdYxȒp\]NvUf~q4dG p}W&KjxXR7c=JKĎ[W&b9@쨎3:Ďv14GvvWZ'캧/Dv٣sUdGZ1 vCVdo5\y:q>& hEvΎ8xOCIǶT6r";?1츱b@N+vVanD 9o,:"dy|h CH澞 2M`d4X0`Gd*#z;2 U:4e7 vbp}Q߉9;`GFh ;w숌-?E0"ΑDz!*#a;2bǪ[`GFy 7ֆ|HUl$FLW0\Qv1ܸ:1"z6{58 7Gz'0P85a62Gx9ʹ,d¡`62bWĈs$F\bD}1digɈ512Ĉ:D11⸧:#*g+FT11bDuɋudQň%F*+&:!L8uT5pj<30Zv0.>#Nw%~2EńCLB1RLqb}T/b Lm99L8}#C$&ߺ[Vq1[_x=L Yz]~s&܉qAo5 n[D7+FyO #~ !ȕ-79.~2pnRpE缑afM:S7-=ϴf{ؘ/~#?, sB\߀!4gH߈M0qN`^~ =k<870'N4DM(s&g&o@A e)o&V9CF9]7D؈ aVX$mz% vv#/M7bzwJ&w, OǞMh-gT/`F 2\O QlɆ ;t#E  P(ӵWPpjD[8E1`wS~KQx'4zgZ9mZ)wmHK~?K(n .Ҷ#90Då#ሂK+Rx(hUoy$%jMofrǥ;Kc WahG N! z*_DťrJ*+ˁKIF@ T\^@T\Dŕ40ZyHHhڕt Wy t\hBt\:mt:q4=G\J\QI\ %f% %WVy#P^\ W6vz%:8>dn&:zF8踊N?]?DUߥfc=­DGY=z):.?x1IzE zI#b=z"F@1vN; Gع~D] gb+$N!LB\O")B^mC*ByD$ĮbNBT1w: bMbJP=6u&q(_"GbDKzG}vosBXl10xqwv#H#/s{&zzIgd|&"Jd!1AzHOQnz XKxzH _2ggu}\}Mh,zҺ@;B㊎#wځ* =ozhGJgVa%Q RQ. Ýډ; M ӗ6 @ѰXe`9PbYU)z~_E oٱ6A1!b9R"˲@Y8|})\#O^oYNؑph #%ד=%-,r#38'X8u&)qV^ MZ&JT.1.Fj3"%x@+jH@3Rc{TPYH(2R)Q^ 5LJ\QkiJ%qHq753L+MJDK_{پo;!qo#K/%nM5$%b>ԥ#s8(qgR"9%>߶]Iyi-nN"w55Țu.qsq>R?M$EnM$EnH[eȭ2PR֙uH1(r'>H['>"w%"-c8#)RSE¢zvF鎐8kb;{LHGXѳ""{7h*COVbxB+\IT 큨Y!c,6yš`1F5ɁE72Ƅǭ!aqg+{vaqG!ny#,n(y[5ǭqrqOq78u. q(x,{×f Q 9)ᑀG u@`q+L8,z^W}ɽ^wKE,.$aQ!Sf=Я__`qq*aq|{^btBroMՀEs!w#|Bڄݸ [3OoV}-qYr1u朓K*b<+Ⱥl`ƌv0<֟O( DV݁l{A7UpS_$hY:#fhY8J{9o 0KX#̗c?UܱH48⎆U`;7_n2KmѲhL Bc2&40p ⎁N8!ܑ1WAfa@.wp@fYg8H$qG84cΦ24,@cN Ef;z_ -6Q$Иi?K_ Sd, ^<eh; ʹ"ǮJaXġB !̎1{ +,]t5Z@ݺاߋDڸQB̲`7@ 1'OŁ:p avfR!c3i.C51HCaT璪bΨ#Ypf)ICǒkk1,%2F ?`r `GvfpaNI3t73 &;j| R.~a}F;%=9,`#yZmtKXǍ7;'mk`3 v̧ d݉a.`m:*4ŽHp5E;Q,;+Ǩ Xv l cXjvGjp?ia:202\0|K_d$6qxelRgaxL԰i^N L~5ZO &\EHEDaC1|klœ e̲T06?d$q l gN*5&gR$Nj 0\,Il@ϵT03WF ,m? Mp鬅K:ʳ'A߆ְTK KœjedyxL"{9"LLfG$&ӻ÷eIlMfy9DO[&3Q6I&qkVyLP(.)i vܗW 'ՙ$o9>OIIR6ḠЙ}ET=۳O%\5$ؾ?xPV䃫3BI"CJJg6M\D>$oh 5J=$~' ʅRP0]:sYJWJN}$Ƽ]%Ix!$)*Ox( Z%4ZwqQCXJpw#@\WI}g~zh 8aB+R2"˫~%qKhEńW+V~׈mߥ~r7(H^XHҊTC~,:6]5yz+TUT,$DuFv"ʕ`pY芫PG!!FR\CH[#b(.;ʊc.Ub͡?ѥR\ԁ#FFc~tc]*g-{XqO*WTJOɡc P\r3vӣg*d0;h }CmU8 䣝+}"DXIt&hJdT>avAw׾D lnڛ rCvwQ'±}TǣW?Om$oqio[埿-D[sV^J%{=U!>o?'?? ?y18w4??kk'?/8Ÿ?ϋ]gğ=sJ?Im>vCϱqǟCƫzZ3^>|u>8Z"J3/ׇd;lstƋt׻gu$:Ŀ3(Et1 Z_w~ޫ/Ϟ%w[f/M/׏Y7Lg-7S|? ˏ'eݽt7X=B+i{?ǫYw, 2&A7~|?[oy |Z>Ǟz8Ŀ3)^z:D7P~ߑwq}G}7}dG mRxxR|GR|GR|GR|GRS]˟⍵ vl?;|IaLH/Y9wש_݋mbZ&)axi<A){ۓ7ٵ |19?Yx}(}m~ z,絘 W .&gH }ك˸OidfufџAr]C '@ y!k{kblLorc\sk**~&8=ݫVᶁN\Sq}}k xyLOe6dLO !5z^<+kZ-k}`(QY{L)g غ<7]g_h]ujc],eu&:%{+u3Uors-OZ ֿ'c4JaML>a#/>7~F)l[7|hܞ&}, @'>!:GJC{)l 'o0hu >jS>Wftµwu b*l݋Q>9h]3[@&o>i J7|-#[/H Qxy"z[}Z?+fzݢzl868%=g?K}YNcC]G׼NCZϸoi}x\sڈ>O~^Ҿ| zzƾ/麌}ϑ"ZuyWz(O 9&gTlk|mhX<^5lC& j&RCM)|~ ab-%ȡBOy9v>qҐlzNE =UU@&1;Vz65i1w1̳=g}_~j]:~*޹~Tu-bې5UC+\ =ǡ~Z?x:s ֯ݛhMvGq?!=O=;؁z{JJo/hn7 =[>I<˘χmyL˞MЧp=5%8;#=Ygiր)5&gvm, =-f`]kNԐ =͙g\Ž9甴Fv9I q։h1gnp]屴ɓ̿~zCoP'GcWw2ʋz.>5-7vL!]<2bZb xؙc93ryYl1J(Hd)g * [[AW䊂ϭdPqb+(* .ejq.Kx0>GΟGC؃~: ZFEح̺$٩awyE/<43G Zڮ*11I8ݸz]ks=4/8Mqlkk܏a׸[~h[h߀_w6îOiI}vMv}">:Et[v|ˎnw ;(;QvN7~!GyhwI-o^_=IO!MKP^PP' 8Fv^NAS/6𮳉۾zLof@%Ќd^4-EN(70I$C *, <0-,,rVd8BаЍKD4ЊÊMC+%A \ib$m;c \{k%N H] Y䤈 -V|Cqd,r Ef== ܨd<+h\OU1P6Kך>}"!}85S#mv?J?SD౞Sč2Е;* ߞ/CןcŪ~酙y^ĉp.ĤO6镕芌R3N<O=?=ws^QT"sapa2BeŅll\q?S9P(q2BeāC\H䐲 K_ 09PDȑa~QRpV 9PxrH$|oqnQC8pde6Fȁ<ج|fXXm?NhyEAX8P&WtǼ7VҊ'c~Uj~BG(96BhU`Cx{Ǘۓ쮠S=?3{R`nN8:5E/bz^R|&ޗNE^h/"e}]Rm^QWI#C{VE{MQ+^FOvK.Co e6Mj<:O)5z =׿5ζv-p*걞raF}7wDD\oA]iWy*. v-v];Hݡ(%iw A$rӭgx}SGܷ<ҢE,ep8\ ~Š X,%KAMz՝ʀlҭ tGI$%ҭ@8+=}I07VU۪lU:p{xNU-鶉nCo!vюL8!Zu>.c7ދN)X0VχV -G&Z`ciuŰb=?9*HU-/\ىsLEx=V*,o6%)Ex2u0VyF3[LŴdجL0lr0Pa b6`1Df:i'dLy'Cd2G,L;#\ 1IA3_0t0,}`,fbuVď S+[0s0lJC psAS39&#s re.  f^82X"+_WGyǦJ] Ff &^8'߆```3b^ڼ3?Z-^hyΗa{D>zc|}1i08.VD%N  d$1V3L qފ|n2jʩFdT2o#"ԏH/JL=qTĐ`o è}I1؟x?P+PoP|E\0'e0'{D-1{D%A2Ȳ<񲨖mKv0P<=g.{(bOes+b/bf2"blJ>0Z*>MN*Mތɢ; M_ZBs.0IEk4?E{L*"K@@{vR /WVA ~{Ov:UϸMiBB!kT2>h/t[ Dp2њ:k{QhMWк>mL 2MBVYMq*{u?p%c,BWbuXB{ [>ӽu`[г>IKow*n,: P_(Vj@ j´߆"c2™r*\ JPyr8BEІ\UUUOFU"] W5h؞@:}Bc *ҕD Mc.*"*t+**]BrP`hP {Uh MWaj,*(-Waj ]2]DWa *o2b$n [t\b S aLb i~~^o݊(69cQkSI 0:L֊j63}=|r AS2\s`I.wba/e=pF\ʹ&{,"1nqCľ^5/H]iܪg/PT86Em/f>Ckǡ/ R%jfݪJj/ jzkN_3%bA7cLw^D.*RD}z_D.AP_D.A8mo}\E#hR2iR.Mpg7@%- B )O { (:J*:?BrO}{D] @(s96_ȕM({\ Na,tK<>O.\"$ @<t.7'\ oOh7!u1bͨ.\r1|\R1dԗ1Pjg= fYpbLv˥7ƔL^O]o{N>\RL"W*?.wˬӇփ/ǥKzׅKI ~"ȥtzayCuGIOJ ^ 7*?4 k_d +KBS_eߐ7o.KM,CutYޔeerIpYBpqY#tYˢd,Q1Deȅ2>7$72]I%tY*~oP_\?25.ZzeˢjrY],*,~eQ\54 cL \eoeQ=5cSg(StQ-Lq̆\x4]Ut4WՄ9?S"(S"((5eFy9R$[wYxg,rT,y`gJ9eڋlTG"eF;\T-~T3_V߀ 3>JҜN(JGh)IKYQ2o$}q%} Ƣ5 >JfNI+VQgH'9(3IeU#)QNaӆFEtJ.V$(VRnCwT:Ӆ#CCEW=8ڟ [H WNITcs8%Ćp蔨N"Ž*)ir 67p.7[5ه3_popr`6D6!!:'EΈiGsf\9#U9CrNYj%s!ɻp z^ 8?F\u~9PpFJ97kcT>v:#Q*1SϊY4 2jR8?Ǽsɯ9IU/ gItF|:#[&.oO`L9'840aMuv0~|:;(Mުϯp)|#/ʵg'cHG8;͟yΎwU>Mј72/i2Flݟ47uJm(83r)”El=}v,QL5QKZGz9S Fșp؜SW!dgȏV79T>Ky^tԋViXΔ@y`P;hY I!S6عExaճsyRW̺48OSt۫G7`?34 9 `*Giȷ9̇1xV3Qi*#˜ tONMs2coC|LҞchbMzK‰"QxKI}:$xKY-u wReJR&ã4Rouio \y}}8hޥ(G3|8xK*nit&Ly/[*2846 /RcV5=N۶=gr^,χѬ`VVC$iQ&/M^H^xeE%C^SSS).j1)5 Uh2?Z ^E9I!ݔZ/-r|{0zMu^[W|SP򚊼W@Uu55Fc]*EL땁JRC3 z0zMI^ŽåD-/L^Rl0r'0yI}ӻ/ BA^CKhe^/ !ueB/'aPxaփ0zacy>xzM x  免 I^ؘv,/l, 6免%/ ^Gz{;&3x3^XAIx]E'Bs$tx^=(8:\^(jY$/opԨ׫MUIE)]I^ސF/o< FH!DJukTUI^Rxu BSTA^ЁVNEMvC?Pͬ(([TT-;u, }>c)}Pa/f2Y5)n^,(`Nn^Tș^oT;.ih|zu&CE+W75L^h\zu&U|&MA2'yu>yu'NU&WW@Dʎ^]ҫũ[^\ezqjo^V@.Ꚛ55bЫC!.âWQ~0(yq]8+W'W-yuq(: W삼mSU(M=uGˋ[d?yq ^{qӆ-=-Q+KX]U K 10l ,_HϮ\m77j#UZUU{f,I(NDqRU#^@qMm7m%XG @8WY%{{TKr^\PK 'YO5B,I倰MUm]m\R܅{dkZlFe؁.*ͥk)#f6A,LǑ #Ou^ ۛnMa(Ǚaa4ّ 0ivګ2a=V+4rUvP񛅗"(j|E6)7W⪾ M͑2Wܴf |H-J6TIge!WI'ci.qfLJoUPHOrVuU^Y&Q%'z‡ߺ>wrp8u!⩲Ē ۹+'7. Cx笜[C딭߼īaX,ݫrhVfrzΆ  eaK[SI="P:ǃU4,5 Ǭ)OY.Tvƒ"s㱻n-9sg J^wGg`Ax R`GAv% \4CxrGo,D/}M|)UKuY _ΗS/}2$/}2"(ݝQNr,:i/H_<@$ex% |iO K$#hQvϻr+KܜϚ ͟G[{6H؞<';{^}oms/$b.6H)a9lQ,ޫ(7xLtÎ*;kSj80IyW* (풮5t+F%@)m FE.7gʰKG"*XVQl,B昘`*-hr FEjPNnpN[k$0r 嵙t=q /%A0r1lESX^`;R/V `QH@Ό&gKF 29>Q~#:Q2$y%*}$eLmGD]SD#zM \Q]fخ(qi4VDSYBD( A)r^~JdψJ5QP)enTE`I"JB?'Ѻr}(FVP\R"Q.Qhj"eM8 PR&jm|!( qQ`HwE jq'w#݄CCBPȄj˗,gWM}NkEd  s_F[!u(BQn%m1sQ.5ܮҞQYIZ>˰~=+g5遼Z%V(kI[xĬikc^,b3]eZ4B282VVOQ(]uҞswM`GC'jBͰBU-=UKyJ[*+Y콲8]e(KS^yeoYYV&$fiE51ԼCKe< u5+~JެJ>WF%BZ 5: ]"ϗ,PP{u`HZZ^u꣑B[Qtl({G:Q} ePAk'ix=B=B{Z| 2 uk-GuȔ=B@[Dٻ=l-P]'Q@,@,dԽ5:{ u6s#!W9eoUAeWFwm$Nԭni ukPFeS;(D5@VEV{zVOx5ig#~N//]94Z=ՄC ` NjҎ$/.羹&"V!q;89 m#R%ɱ gsԃ)w MMjc x t)fm )WvDL'4^)?/cyywq/aVV պV N.em DGpHNrU9B'RVMրD>bᖷ/RNyi8B|IupWbeODUɉh!妒3+=f"C]CO@r:o8$V'ap D)lw'arafg$ .BoJC߅b9\83}Z6* ~k1'0XJa&061ޓ06~JЄ=ix?0,!F}`"L57p}gLm)04d&OKZ_OwHw1~Xըܞ'cfU|-G13y'n=cbax9J%)+[j#bk2Gr $F4z_ɕԻ3a0.є ;H҅j0T5w}oo %fU.H̾5d80 Sno]\}7F'&p0Q50V%1~ػbB2|%so %댹VubJvblPz#Mͦ soItsososodsoUs;f$qbns?R'~>[z^kdZF榞cnd sw s/@}+1特>O=IǨ5>S[ܪ\ox0fn[b[`[܏ԕ}i-dd`nAs7{`&a(`R4U9=D]/iք5vXcTE/!|Wl@p'O7!$Bp% } ^bH؅ÈEl xU@^Chګ;̦7s9YIKu_ZjSuPN4+tt.}Za9{0S\]U w`b@hƙBWU3W701wS)< j-9%vEpuK% :@ԡ?!bȱ>!;m$VoKy*ܒ>97V} +`gpӺ]vLPcK}B%rCiL4c}@jn poYR6T=³Ul_Eb>>;{6+r &?j#I< ƽ, /p`Wʷ /Zgp/0^srpO0^| {q=6 ƟT6 0eep0Q̓2s{ʹ_*upO0SOO,$0L>yGdsMb8LlA&6<@͖ۆ+cCI11Ye `JÏNgsB}R-$^gy3:w}z1Ob|}\5G&w19H;facK ?mci3\޹͗Rtx'Y8ʐ޹fjSNpߩP~3X,N:pIϲ{Ua^k EK$HNz~> an* ?$*XʀC=qF҄Mi4I#Z$5I+wrC"9&Mi`VIS㿦@M3P fLJmtzWAVϽ{iI uXIj,i5I M~'5ƽX_* uB'%$5AHw ᯓX"1HjԠ&!e"51"5Hjlu޽["5wIjll!Q[cHZDR"=HRw^$Nb $CSCY}ΥH G$-(iq$!FBAH &iK").B]$)$HR"$)ﮂS?&Iq4G28v$-IYG@š";&nA\,öCWA54~X/bɚ0=I 3#i!G2S7Ŕ.H{C*^l"_i_s0.I :x$7a{oHaHW HZ0糍FaQ\1^Q~ K|z0UV ᨪáR^w4Jd!3gV@i@ckY!!QbmTdbMD0Y;(JaԲ3(rQ)b8-T G#ͫvĶ;hQl;wl(1>[C ͸=q' <%DJám- XRN,=[?hN \w7Nu'r?{wvoTK v(+E?;(pXn qK:x}øNut<JN)(,L :ռ[cVp);: xUM^;ι݃ %q{͞gf8-sJ:7㺣Eحf}h͜P!v;//XsJx'a}wי5i9E SXnenS3sXX+.攷ҍr׹Db4NT\LTG*q1`\L\Ѐs^3SAS笠ybb .F[\L.h1S4 A.h=/q1G-buس<+.%p10+}\S4:]sP%.󑋹.s1m.뜬k r1Gɽ5D{zԽD.Fe˷fS+\Q "jRqzr5 !Wbwq5AnFf0\q3+1nK.sQ>q3V{;,&U,R5]%5jP5MՈ U_?M8:jX-Ɵ4SLF*$"U#6T WUCpӥ\]6xD9X~~d'NKׄ1Tbg,I=r%15">OX.X. ؔ2>gϐ,œgy+~$_wD3ݵvMSL,m< qL3dL $c&|i*߄CYp(w;h Wm՘*F1q &9ED2FfvNdZ6?hHƨ.z:_7pR'd*dLpB%%bdLQVH 1T5ɘ' Jf1Y]*$D7ӲDYui$dzᶝ"b N@)1"}]@Th`Ȅ@cJ@T*aN"+@Aq]p8 n&=!73|ba >Sq$A}5dil<6V#+3Tbml429!և(h:CFebe;#k2}ЃX2,N&Q`B/!+*JQh=8t>g}X-KjD8|K}CO,N,k\tPbq =KNy;ؠ Ö֏, ϓ"켝Bv+/BQGbX1,Q0-DRKDb&e6vqY2iKԸx)h!1T2!8Yʯ"KĶOD"-oեgхI-tgnS]uz5 nU6m>%ziM"FBHҨY4:c\J:Hi~%IƻAHSI鱴gcܫP@4A{4%H#oi$O{KT)i`˒9U$x DMP`^ ~`L❽ xF]p ]-Z: }3}wӥ otGokvloBUBUV3u! d_&m:Y/:؜+?w .ykw>fuXWU`TTg ,b"!_Á1 q8%F, fKKUZzQC]Tt#$v ,Q43Ov­k<MNke%gk̎g;P)\l6n)D-= vi*,tond ʞllCQ''&26G?(FU(QrF{0,(F=JtgJ<(CgEtsT_@8EJc}Z*E~&]4Bɠd-ڴ4QWea睎+G*)ς"} [D5K;$fA'G#TM֋~\Kt[[w4Д0nDQK,]繾鏈pQeg<2^G  !>.GF|\v7.Ոm w5Pčgz^bJX'&bTN8}D3Je8N|K8m50f Q540͚lA5Y!уWH~JP5274Ʊ.W*%LI^m}T3^5a2(΄WD|*\cbP;l;k1k튕*ȝk \hv]-QINLrJM?'L*…#'̬:^Ep}*] #Y_|Y0gz Ȟ`dOb PY;TDTGOnDb9@,@AT 'ˠ P*4) -q@p$E; d ]+HUU]/- @>bBTH0*  3J"ܯ^_1C*ApHV^9s}#PO![֔p B($ \D1C2 v|vʴ 0]|hꮝ K _ȉ9`1]b:G2Qb8`PMҲ̎ HggOApcQ0Dwi*R eVϖ2oU ?Vu =ɭu? &uY8~L~Ta*ۖ]u[c{] v(GQ*'½ICzG :C垪֡HH83ǵꔅ1OHH8:LԺ alTQ&UG纾Xs.3vr? ?W랛7i e9 M[s"M(ׯ)ʫGd&?J?WtǤA I?lzgע)-Jз4) 91`I*5зWozWiTwbt5w%fLs5 ԫrW{k*ju1oMu}2ʝ gq:3Yvb{|8@ZhMƵp0!XvqLjp˪g]4c,nY]r܅fz.솇0ӻ$)?uKsz`?\KrV0kJ,Xt4E$J ?0kDccJ=ۇ5A=5d3,mY8io{vڛǬT!jev%!f<!>Ge֎ 6fa!`?l}3 LJi.ǔS=d9-e`L7~Ä&W }6Ic 00ڣ&J n>x?Z'/J6 dh}Ywk';A:vK֩G~NllhTM'Y?Z'ތN!ڐ͆|P&26dVZ-ähLu1ad4!"7Y lQT`,~ #?Q Sx5:a4 +4dZ*h! a"mv ]W,{Y,EL2yn%pף3eyɕHʝ5d;,<.uK +s+1bix^=%İb0 v<iaгf>N0  w|J1,iUò J$Ri10x,?)^uL 3c+A{lw6v v zd$jv0$. cɞ(si-gq.| % jⲺ_~.y\?Џr+DgLLK>D{D;8(G$% ixDŹl-M}ͩ7żnȗN ?v+cԑ(1wW>iuM(Bnߏ2)TFR1[ Rw'{TcdBql(z#cxR?f5?ߊ)ϷC s^㴿 8;j~Yk~pix3ZWp~T|}zv7#2.y_2^uu.,XO*7-C K9 Vx?\0}z$}}j_.3O~~\$} >j.+oa?-+^]˷s[w_.Ϳ~W_nfzT+?? >~+>A5>g% .څ0Ms&^*/p]XOz|a{!ށ}ǨV׸|k_k z_kPubendstream endobj 9 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 13 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 48 /Interpolate true /Filter /FlateDecode >> stream xt-׊ 36ԨkҪHtipSqendstream endobj 10 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 12 /ColorSpace 5 0 R /BitsPerComponent 8 /Length 45 /Interpolate true /Filter /FlateDecode >> stream x{p-!1{ski1﵍rrjuwMxendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 576 216] >> endobj 4 0 obj << /ProcSet [/PDF /Text /ImageC] /Font <> /XObject << /Im0 9 0 R /Im1 10 0 R >> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 11 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 12 0 obj << /Type /Font /Subtype /Type1 /Name /F7 /BaseFont /Times-Roman /Encoding 11 0 R >> endobj xref 0 13 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000043139 00000 n 0000043222 00000 n 0000043384 00000 n 0000043417 00000 n 0000000212 00000 n 0000000292 00000 n 0000042651 00000 n 0000042896 00000 n 0000046112 00000 n 0000046207 00000 n trailer << /Size 13 /Info 1 0 R /Root 2 0 R >> startxref 46307 %%EOF brms/vignettes/brms_nonlinear.Rmd0000644000175000017500000003071414105230573017040 0ustar nileshnilesh--- title: "Estimating Non-Linear Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Non-Linear Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit non-linear multilevel models with **brms**. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term $\eta_n$ of a generalized linear model for observation $n$ can be written as follows: $$\eta_n = \sum_{i = 1}^K b_i x_{ni}$$ where $b_i$ is the regression coefficient of predictor $i$ and $x_{ni}$ is the data of predictor $i$ for observation $n$. This also compromises interaction terms and various other data transformations. However, the structure of $\eta_n$ is always linear in the sense that the regression coefficients $b_i$ are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term $$\eta_n = b_1 \exp(b_2 x_n)$$ would *not* be a *linear* predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call *non-linear* models. Note that the term 'non-linear' does not say anything about the assumed distribution of the response variable. In particular it does not mean 'not normally distributed' as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in **brms** see `vignette("brms_families")`). ## A Simple Non-Linear Model We begin with a simple example using simulated data. ```{r} b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ``` As stated above, we cannot use a generalized linear model to estimate $b$ so we go ahead an specify a non-linear model. ```{r, results='hide'} prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ``` When looking at the above code, the first thing that becomes obvious is that we changed the `formula` syntax to display the non-linear formula including predictors (i.e., `x`) and parameters (i.e., `b1` and `b2`) wrapped in a call to `bf`. This stands in contrast to classical **R** formulas, where only predictors are given and parameters are implicit. The argument `b1 + b2 ~ 1` serves two purposes. First, it provides information, which variables in `formula` are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict `b1` and `b2` and thus we just fit intercepts that represent our estimates of $b_1$ and $b_2$ in the model equation above. The formula `b1 + b2 ~ 1` is a short form of `b1 ~ 1, b2 ~ 1` that can be used if multiple non-linear parameters share the same formula. Setting `nl = TRUE` tells **brms** that the formula should be treated as non-linear. In contrast to generalized linear models, priors on population-level parameters (i.e., 'fixed effects') are often mandatory to identify a non-linear model. Thus, **brms** requires the user to explicitly specify these priors. In the present example, we used a `normal(1, 2)` prior on (the population-level intercept of) `b1`, while we used a `normal(0, 2)` prior on (the population-level intercept of) `b2`. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors. To obtain summaries of the fitted model, we apply ```{r} summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ``` The `summary` method reveals that we were able to recover the true parameter values pretty nicely. According to the `plot` method, our MCMC chains have converged well and to the same posterior. The `conditional_effects` method visualizes the model-implied (non-linear) regression line. We might be also interested in comparing our non-linear model to a classical linear model. ```{r, results='hide'} fit2 <- brm(y ~ x, data = dat1) ``` ```{r} summary(fit2) ``` To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the **bayesplot** package on the backend. ```{r} pp_check(fit1) pp_check(fit2) ``` We can also easily compare model fit using leave-one-out cross-validation. ```{r} loo(fit1, fit2) ``` Since smaller `LOOIC` values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model. ## A Real-World Non-Linear model On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms. ```{r} data(loss) head(loss) ``` and translate the proposed model into a non-linear **brms** model. ```{r, results='hide'} fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ``` We estimate a group-level effect of accident year (variable `AY`) for the ultimate loss `ult`. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of `ult`, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods. ```{r} summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ``` Next, we show marginal effects separately for each year. ```{r} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ``` It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020). ## Advanced Item-Response Models As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of **brms**. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation. ```{r} inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ``` The most basic item-response model is equivalent to a simple logistic regression model. ```{r, results='hide'} fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ``` However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions. ```{r} summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ``` A more sophisticated approach incorporating the guessing probability looks as follows: ```{r, results='hide'} fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ``` It is very important to set the link function of the `bernoulli` family to `identity` or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (`0.33 + 0.67 * inv_logit`), but the `bernoulli` family applies the default `logit` link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to `identity`, whenever your non-linear predictor term already contains the desired link function. ```{r} summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ``` Comparing model fit via leave-one-out cross-validation ```{r} loo(fit_ir1, fit_ir2) ``` shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don't know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit. ```{r, results='hide'} fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ``` Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval $[0, 1]$. We did not estimate an intercept for `eta`, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models). ```{r} summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) ``` The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of **brms** and I hope that this vignette serves as a good starting point. ## References Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. *CAS Research Papers*. brms/build/0000755000175000017500000000000014146747050012456 5ustar nileshnileshbrms/build/vignette.rds0000644000175000017500000000110514146747050015012 0ustar nileshnileshTQo0NӲ!`lC ?!< MHP*xC^ri,;ğ$NkǩxZ$}wvAap==x݈\~KxP2牁j8!R XX'9 7g\qFb 9i*Ă`qF ,,F0P$ͩLN'2ޚQq/A, jV4TBsgФ )["*CQJ*cp41ES3,p +U]/cP@&+%^Y.Mۜ4Xگmm+OܷO:wZ3+:G󒱊WPn)Uaqu| MJ`y UUtƳJTNՖ!bq,/'' :)((ߛmII_R~40Nqzi|/7}ơX]ȟcuw7/\8zc#IZgUBf3M `I[\? w Zci^zS,[ZaҪ}amqbrms/tests/0000755000175000017500000000000014146747057012530 5ustar nileshnileshbrms/tests/testthat/0000755000175000017500000000000014146772152014362 5ustar nileshnileshbrms/tests/testthat/tests.data-helpers.R0000644000175000017500000000215713737533714020230 0ustar nileshnileshcontext("Tests for data helper functions") test_that("validate_newdata handles factors correctly", { fit <- brms:::rename_pars(brms:::brmsfit_example1) fit$data$fac <- factor(sample(1:3, nrow(fit$data), TRUE)) newdata <- fit$data[1:5, ] expect_silent(brms:::validate_newdata(newdata, fit)) newdata$visit <- 1:5 expect_error(brms:::validate_newdata(newdata, fit), "Levels '5' of grouping factor 'visit' cannot") newdata$fac <- 1:5 expect_error(brms:::validate_newdata(newdata, fit), "New factor levels are not allowed") }) test_that("validate_data returns correct model.frames", { dat <- data.frame(y = 1:5, x = 1:5, z = 6:10, g = 5:1) bterms <- brmsterms(y ~ as.numeric(x) + (as.factor(z) | g), family = gaussian()) mf <- brms:::validate_data(dat, bterms = bterms) expect_true(all(c("x", "z") %in% names(mf))) bterms <- brmsterms(y ~ 1 + (1|g/x/z), family = gaussian()) mf <- brms:::validate_data(dat, bterms = bterms) expect_equal(mf[["g:x"]], paste0(dat$g, "_", dat$x)) expect_equal(mf[["g:x:z"]], paste0(dat$g, "_", dat$x, "_", dat$z)) }) brms/tests/testthat/tests.restructure.R0000644000175000017500000001630114105230573020225 0ustar nileshnileshcontext("Tests for restructuring of old brmsfit objects") test_that("restructure can be run without error", { # This test does not check if old models can really be restructured # since restructure is called with an already up-to-date model. fit2 <- brms:::rename_pars(brms:::brmsfit_example2) fit2$version <- NULL fit2$exclude <- c("L_1", "zs_1") expect_warning( fit2_up <- restructure(fit2), "Models fitted with brms < 1.0 are no longer offically supported" ) expect_is(fit2_up, "brmsfit") }) test_that("restructure_formula_v1 works correctly", { form <- structure( y ~ x + z, sigma = sigma ~ x, class = c("brmsformula", "formula") ) form <- brms:::restructure_formula_v1(form) expect_equal(form$formula, y ~ x + z) expect_equal(form$pforms, list(sigma = sigma ~ x)) expect_true(!attr(form$formula, "nl")) form <- structure( y ~ a * exp(-b * x), nonlinear = list(a = a ~ x, b = b ~ 1), class = c("brmsformula", "formula") ) form <- brms:::restructure_formula_v1(form) expect_equal(form$formula, y ~ a * exp(-b * x)) expect_equal(form$pforms, list(a = a ~ x, b = b ~ 1)) expect_true(attr(form$formula, "nl")) }) test_that("change_prior returns expected lists", { pars <- c("b", "b_1", "bp", "bp_1", "prior_b", "prior_b_1", "prior_b_3", "sd_x[1]", "prior_bp_1") expect_equivalent( brms:::change_prior( class = "b", pars = pars, names = c("x1", "x3", "x2") ), list(list(pos = 6, fnames = "prior_b_x1"), list(pos = 7, fnames = "prior_b_x2")) ) expect_equivalent( brms:::change_prior( class = "bp", pars = pars, names = c("x1", "x2"), new_class = "b" ), list(list(pos = 9, fnames = "prior_b_x1"))) }) test_that("change_old_re and change_old_re2 return expected lists", { data <- data.frame(y = rnorm(10), x = rnorm(10), g = 1:10) bterms <- brmsterms(bf(y ~ a, a ~ x + (1+x|g), family = gaussian(), nl = TRUE)) ranef <- brms:::tidy_ranef(bterms, data = data) target <- list( list(pos = c(rep(FALSE, 2), TRUE, rep(FALSE, 22)), oldname = "sd_a_g_Intercept", pnames = "sd_g_a_Intercept", fnames = "sd_g_a_Intercept", dims = numeric(0)), list(pos = c(rep(FALSE, 3), TRUE, rep(FALSE, 21)), oldname = "sd_a_g_x", pnames = "sd_g_a_x", fnames = "sd_g_a_x", dims = numeric(0)), list(pos = c(rep(FALSE, 4), TRUE, rep(FALSE, 20)), oldname = "cor_a_g_Intercept_x", pnames = "cor_g_a_Intercept_a_x", fnames = "cor_g_a_Intercept_a_x", dims = numeric(0)), list(pos = c(rep(FALSE, 5), rep(TRUE, 20)), oldname = "r_a_g", pnames = "r_g_a", fnames = c(paste0("r_g_a[", 1:10, ",Intercept]"), paste0("r_g_a[", 1:10, ",x]")), dims = c(10, 2))) pars <- c("b_a_Intercept", "b_a_x", "sd_a_g_Intercept", "sd_a_g_x", "cor_a_g_Intercept_x", paste0("r_a_g[", 1:10, ",Intercept]"), paste0("r_a_g[", 1:10, ",x]")) dims <- list("sd_a_g_Intercept" = numeric(0), "sd_a_g_x" = numeric(0), "cor_a_g_Intercept_x" = numeric(0), "r_a_g" = c(10, 2)) expect_equivalent(brms:::change_old_re(ranef, pars = pars, dims = dims), target) target <- list( list(pos = c(rep(FALSE, 2), TRUE, rep(FALSE, 22)), oldname = "sd_g_a_Intercept", pnames = "sd_g__a_Intercept", fnames = "sd_g__a_Intercept", dims = numeric(0)), list(pos = c(rep(FALSE, 3), TRUE, rep(FALSE, 21)), oldname = "sd_g_a_x", pnames = "sd_g__a_x", fnames = "sd_g__a_x", dims = numeric(0)), list(pos = c(rep(FALSE, 4), TRUE, rep(FALSE, 20)), oldname = "cor_g_a_Intercept_a_x", pnames = "cor_g__a_Intercept__a_x", fnames = "cor_g__a_Intercept__a_x", dims = numeric(0)), list(pos = c(rep(FALSE, 5), rep(TRUE, 20)), oldname = "r_g_a", pnames = "r_g__a", fnames = c(paste0("r_g__a[", 1:10, ",Intercept]"), paste0("r_g__a[", 1:10, ",x]")), dims = c(10, 2))) pars <- c("b_a_Intercept", "b_a_x", "sd_g_a_Intercept", "sd_g_a_x", "cor_g_a_Intercept_a_x", paste0("r_g_a[", 1:10, ",Intercept]"), paste0("r_g_a[", 1:10, ",x]")) dims <- list("sd_g_a_Intercept" = numeric(0), "sd_g_a_x" = numeric(0), "cor_g_a_Intercept_a_x" = numeric(0), "r_g_a" = c(10, 2)) expect_equivalent(brms:::change_old_re2(ranef, pars = pars, dims = dims), target) }) test_that("change_old_sm return expected lists", { target <- list( list(pos = c(FALSE, TRUE, rep(FALSE, 15)), oldname = "sds_sx1kEQ9", pnames = "sds_sx1_1", fnames = "sds_sx1_1", dims = numeric(0)), list(pos = c(rep(FALSE, 8), rep(TRUE, 9)), oldname = "s_sx1kEQ9", pnames = "s_sx1_1", fnames = paste0("s_sx1_1[", 1:9, "]"), dims = 9), list(pos = c(TRUE, rep(FALSE, 16)), oldname = "sds_sigma_t2x0", pnames = "sds_sigma_t2x0_1", fnames = "sds_sigma_t2x0_1", dims = numeric(0)), list(pos = c(FALSE, FALSE, rep(TRUE, 6), rep(FALSE, 9)), oldname = "s_sigma_t2x0", pnames = "s_sigma_t2x0_1", fnames = paste0("s_sigma_t2x0_1[", 1:6, "]"), dims = 6) ) pars <- c("sds_sigma_t2x0", "sds_sx1kEQ9", paste0("s_sigma_t2x0[", 1:6, "]"), paste0("s_sx1kEQ9[", 1:9, "]")) dims <- list(sds_sigma_t2x0 = numeric(0), sds_sx1kEQ9 = numeric(0), s_sigma_t2x0 = 6, s_sx1kEQ9 = 9) bterms <- brmsterms(bf(y ~ s(x1, k = 9), sigma ~ t2(x0)), family = gaussian()) dat <- data.frame(y = rnorm(100), x1 = rnorm(100), x0 = rnorm(100)) expect_equivalent(brms:::change_old_sm(bterms, dat, pars, dims), target) }) test_that("change_old_mo returns expected lists", { bterms <- brmsterms(bf(y ~ mo(x), sigma ~ mo(x)), family = gaussian()) data <- data.frame(y = rnorm(10), x = rep(1:5, 2)) pars <- c( "bmo_x", "bmo_sigma_x", paste0("simplex_x[", 1:5, "]"), paste0("simplex_sigma_x[", 1:5, "]") ) target <- list( list( pos = c(TRUE, rep(FALSE, 11)), fnames = "bmo_mox" ), list( pos = c(FALSE, FALSE, rep(TRUE, 5), rep(FALSE, 5)), fnames = paste0("simo_mox1[", 1:5, "]") ), list( pos = c(FALSE, TRUE, rep(FALSE, 10)), fnames = "bmo_sigma_mox" ), list( pos = c(rep(FALSE, 7), rep(TRUE, 5)), fnames = paste0("simo_sigma_mox1[", 1:5, "]") ) ) expect_equivalent(brms:::change_old_mo(bterms, data, pars), target) }) test_that("change_old_categorical works correctly", { dat <- data.frame( y = rep(c("cat1", "cat2", "cat3"), 3), x = rnorm(9) ) fam <- categorical() fam$dpars <- c("mucat2", "mucat3") bterms <- brmsterms(bf(y ~ x) + fam) pars <- c("b_cat2_Intercept", "b_cat3_Intercept", "b_cat2_x", "b_cat3_x") res <- brms:::change_old_categorical(bterms, dat, pars) target <- list( list( pos = rep(TRUE, 4), fnames = c( "b_mucat2_Intercept", "b_mucat3_Intercept", "b_mucat2_x", "b_mucat3_x" ) ) ) expect_equivalent(res, target) }) brms/tests/testthat/tests.exclude_pars.R0000644000175000017500000000327314105230573020320 0ustar nileshnileshcontext("Tests for exclude_pars helper functions") test_that("exclude_pars returns expected parameter names", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:5, 2), h = factor(rep(1:5, each = 2))) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true(all(c("r_1", "r_2") %in% ep)) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(all = TRUE)) ep <- brms:::exclude_pars(fit) expect_true(!any(c("z_1", "z_2") %in% ep)) fit <- brm(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(group = FALSE)) ep <- brms:::exclude_pars(fit) expect_true("r_1_1" %in% ep) fit <- brm(y ~ x1*x2 + (x1 | g) + (1 | h), dat, empty = TRUE, save_pars = save_pars(group = "h")) ep <- brms:::exclude_pars(fit) expect_true(!"r_1_3" %in% ep) fit <- brm(y ~ s(x1) + x2, dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("zs_1_1" %in% ep) fit <- brm(bf(y ~ eta, eta ~ x1 + s(x2), nl = TRUE), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("zs_eta_1_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE) ep <- brms:::exclude_pars(fit) expect_true("Xme_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE, save_pars = save_pars(latent = "x1")) ep <- brms:::exclude_pars(fit) expect_true(!"Xme_1" %in% ep) fit <- brm(y ~ me(x1, g), dat, empty = TRUE, save_pars = save_pars(manual = "Lme_1")) ep <- brms:::exclude_pars(fit) expect_true(!"Lme_1" %in% ep) }) brms/tests/testthat/tests.rename_pars.R0000644000175000017500000000070613737534526020153 0ustar nileshnileshcontext("Tests for renaming helper functions") test_that("make_index_names returns correct 1 and 2 dimensional indices", { expect_equal(make_index_names(rownames = 1:2), c("[1]", "[2]")) expect_equal(make_index_names(rownames = 1:2, colnames = 1:3, dim = 1), c("[1]", "[2]")) expect_equal(make_index_names(rownames = c("a","b"), colnames = 1:3, dim = 2), c("[a,1]", "[b,1]", "[a,2]", "[b,2]", "[a,3]", "[b,3]")) }) brms/tests/testthat/helpers/0000755000175000017500000000000014111751667016024 5ustar nileshnileshbrms/tests/testthat/helpers/link_categorical_ch.R0000644000175000017500000000135314111751667022115 0ustar nileshnilesh# Very similar to link_categorical(), but iterates over the observations: link_categorical_ch <- function(x, refcat = 1, return_refcat = TRUE) { # For testing purposes, only allow 3-dimensional arrays here: stopifnot(length(dim(x)) == 3) x_tosweep <- if (return_refcat) { x } else { slice(x, 3, -refcat, drop = FALSE) } ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] log(aperm( array( sapply(seq_len(nobsv), function(i) { slice(x_tosweep, 2, i) / slice(slice(x, 2, i), 2, refcat) }, simplify = "array"), dim = c(ndraws, ncat - !return_refcat, nobsv) ), perm = c(1, 3, 2) )) } environment(link_categorical_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/simopts_catlike.R0000644000175000017500000000012214111751667021334 0ustar nileshnileshset.seed(1234) ndraws_vec <- c(1, 5) nobsv_vec <- c(1, 4) ncat_vec <- c(2, 3) brms/tests/testthat/helpers/insert_refcat_ch.R0000644000175000017500000000267514111751667021463 0ustar nileshnilesh# Very similar to insert_refcat(), but iterates over the observations (if # necessary): insert_refcat_ch <- function(eta, family) { ndim <- length(dim(eta)) if (ndim == 2) { return(insert_refcat_ch_i(eta, family = family)) } else if (ndim == 3) { out <- abind::abind(lapply(seq_cols(eta), function(i) { insert_refcat_ch_i(slice_col(eta, i), family = family) }), along = 3) return(aperm(out, perm = c(1, 3, 2))) } else { stop2("eta has wrong dimensions.") } } environment(insert_refcat_ch) <- as.environment(asNamespace("brms")) # A matrix-only variant of insert_refcat() (used to be insert_refcat() before it # was extended to arrays): insert_refcat_ch_i <- function(eta, family) { stopifnot(is.matrix(eta), is.brmsfamily(family)) if (!conv_cats_dpars(family) || isNA(family$refcat)) { return(eta) } # need to add zeros for the reference category zeros <- as.matrix(rep(0, nrow(eta))) if (is.null(family$refcat) || is.null(family$cats)) { # no information on the categories provided: # use the first category as the reference return(cbind(zeros, eta)) } colnames(zeros) <- paste0("mu", family$refcat) iref <- match(family$refcat, family$cats) before <- seq_len(iref - 1) after <- setdiff(seq_cols(eta), before) cbind(eta[, before, drop = FALSE], zeros, eta[, after, drop = FALSE]) } environment(insert_refcat_ch_i) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/link_ordinal_ch.R0000644000175000017500000000477514111751667021303 0ustar nileshnileshlink_ch <- function(x, link) { # switch() would be more straightforward, but for testing purposes, use if () # here: if (link == "logit") { return(qlogis(x)) } else if (link == "probit") { return(qnorm(x)) } else if (link == "cauchit") { return(qcauchy(x)) } else if (link == "cloglog") { return(log(-log(1 - x))) } else { stop("Unknown link.") } } # Very similar to link_cumulative(), but iterates over the observations: link_cumulative_ch <- function(x, link) { # For testing purposes, only allow 3-dimensional arrays here: stopifnot(length(dim(x)) == 3) ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] x_cumsum <- aperm( array( sapply(seq_len(nobsv), function(i) { apply(x[, i, -ncat, drop = FALSE], 1, cumsum) }, simplify = "array"), dim = c(ncat - 1, ndraws, nobsv) ), perm = c(2, 3, 1) ) link_ch(x_cumsum, link = link) } # The same as link_sratio(), but dropping margins: link_sratio_ch <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, S_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- slice(x, ndim, k) / prev_res$S_km1_prod return(list(F_k = abind::abind(prev_res$F_k, F_k, along = ndim), S_km1_prod = prev_res$S_km1_prod * (1 - F_k))) } x <- .F_k(dim(x)[ndim] - 1)$F_k link_ch(x, link) } environment(link_sratio_ch) <- as.environment(asNamespace("brms")) # The same as link_cratio(), but dropping margins: link_cratio_ch <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, F_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- 1 - slice(x, ndim, k) / prev_res$F_km1_prod return(list(F_k = abind::abind(prev_res$F_k, F_k, along = ndim), F_km1_prod = prev_res$F_km1_prod * F_k)) } x <- .F_k(dim(x)[ndim] - 1)$F_k link_ch(x, link) } environment(link_cratio_ch) <- as.environment(asNamespace("brms")) # The same as link_acat(), but possibly dropping margins and not treating the # logit link as a special case: link_acat_ch <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] dim_noncat <- dim(x)[-ndim] x <- slice(x, ndim, -1) / slice(x, ndim, -ncat) x <- inv_odds(x) array(link_ch(x, link), dim = c(dim_noncat, ncat - 1)) } environment(link_acat_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/inv_link_categorical_ch.R0000644000175000017500000000230714111751667022771 0ustar nileshnilesh# Very similar to inv_link_categorical(), but iterates over the observations: inv_link_categorical_ch <- function(x, log = FALSE) { ndim <- length(dim(x)) # For testing purposes, only allow 3-dimensional arrays here: if (ndim <= 1) { x <- array(x, dim = c(1, 1, length(x))) ndim <- length(dim(x)) need_drop <- TRUE } else if (ndim == 2) { x <- array(x, dim = c(dim(x)[1], 1, dim(x)[2])) ndim <- length(dim(x)) need_drop <- TRUE } else if (ndim > 3) { stop("At most 3 dimensions are allowed here.") } else { need_drop <- FALSE } ndraws <- dim(x)[1] nobsv <- dim(x)[2] ncat <- dim(x)[3] .softmax <- if (log) { log_softmax } else { softmax } out <- aperm( array( sapply(seq_len(nobsv), function(i) { .softmax(slice(x, 2, i)) }, simplify = "array"), dim = c(ndraws, ncat, nobsv) ), perm = c(1, 3, 2) ) # Quick-and-dirty solution to drop the margin for a single observation (but # only if the input object was not a 3-dimensional array): if (need_drop) { return(slice(out, 2, 1)) } out } environment(inv_link_categorical_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/inv_link_ordinal_ch.R0000644000175000017500000000572114111751667022147 0ustar nileshnileshinv_link_cumulative_ch <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 out <- vector("list", ncat) out[[1]] <- slice(x, ndim, 1) if (ncat > 2) { .diff <- function(k) { slice(x, ndim, k) - slice(x, ndim, k - 1) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .diff) } out[[ncat]] <- 1 - slice(x, ndim, ncat - 1) abind::abind(out, along = ndim) } environment(inv_link_cumulative_ch) <- as.environment(asNamespace("brms")) inv_link_sratio_ch <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) out[[1]] <- slice(x, ndim, 1) if (ncat > 2) { .condprod <- function(k) { slice(x, ndim, k) * apply(1 - slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .condprod) } out[[ncat]] <- apply(1 - x, marg_noncat, prod) abind::abind(out, along = ndim) } environment(inv_link_sratio_ch) <- as.environment(asNamespace("brms")) inv_link_cratio_ch <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) out[[1]] <- 1 - slice(x, ndim, 1) if (ncat > 2) { .condprod <- function(k) { (1 - slice(x, ndim, k)) * apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .condprod) } out[[ncat]] <- apply(x, marg_noncat, prod) abind::abind(out, along = ndim) } environment(inv_link_cratio_ch) <- as.environment(asNamespace("brms")) inv_link_acat_ch <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] + 1 marg_noncat <- seq_along(dim(x))[-ndim] out <- vector("list", ncat) if (link == "logit") { # faster evaluation in this case out[[1]] <- array(1, dim = dim(x)[-ndim]) out[[2]] <- exp(slice(x, ndim, 1)) if (ncat > 2) { .catsum <- function(k) { exp(apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, sum)) } remaincats <- 3:ncat out[remaincats] <- lapply(remaincats, .catsum) } } else { x <- ilink(x, link) out[[1]] <- apply(1 - x, marg_noncat, prod) if (ncat > 2) { .othercatprod <- function(k) { apply(slice(x, ndim, 1:(k - 1), drop = FALSE), marg_noncat, prod) * apply(slice(1 - x, ndim, k:(ncat - 1), drop = FALSE), marg_noncat, prod) } mid_cats <- 2:(ncat - 1) out[mid_cats] <- lapply(mid_cats, .othercatprod) } out[[ncat]] <- apply(x, marg_noncat, prod) } out <- abind::abind(out, along = ndim) catsum <- apply(out, marg_noncat, sum) sweep(out, marg_noncat, catsum, "/") } environment(inv_link_acat_ch) <- as.environment(asNamespace("brms")) brms/tests/testthat/helpers/simopts_catlike_oneobs.R0000644000175000017500000000015614111751667022710 0ustar nileshnilesh# This test corresponds to a single observation. set.seed(1234) ndraws_vec <- c(1, 5) ncat_vec <- c(2, 3) brms/tests/testthat/tests.emmeans.R0000644000175000017500000000271614111751667017301 0ustar nileshnileshcontext("Tests for emmeans support") skip_on_cran() require(emmeans) SW <- suppressWarnings fit1 <- rename_pars(brms:::brmsfit_example1) fit2 <- rename_pars(brms:::brmsfit_example2) fit4 <- rename_pars(brms:::brmsfit_example4) fit6 <- rename_pars(brms:::brmsfit_example6) test_that("emmeans returns expected output structure", { em <- summary(emmeans(fit1, "Age", by = "Trt")) expect_equal(nrow(em), 2) em <- summary(emmeans(fit1, "Trt", dpar = "sigma")) expect_equal(nrow(em), 2) em <- summary(emmeans(fit1, "Age", by = "Exp")) expect_equal(nrow(em), 5) em <- summary(emmeans(fit1, "Exp")) expect_equal(nrow(em), 5) em <- SW(summary(emmeans(fit2, "Age", nlpar = "a"))) expect_equal(nrow(em), 1) em <- SW(summary(emmeans(fit4, "x1", dpar = "mu"))) expect_equal(nrow(em), 1) }) test_that("emmeans supports 'epred' predictions", { em <- summary(emmeans(fit2, "Age", epred = TRUE)) expect_equal(nrow(em), 1) em <- summary(emmeans(fit2, "Age", by = "Trt", epred = TRUE)) expect_equal(nrow(em), 2) # test for a multivariate model em <- summary(emmeans(fit6, "Age", by = "Trt", epred = TRUE)) expect_equal(nrow(em), 2) }) test_that("emmeans supports multilevel terms", { em <- summary(emmeans(fit1, "Age", by = "Trt", re_formula = NULL)) expect_equal(nrow(em), 2) em <- SW(summary(emmeans(fit2, "Age", nlpar = "a", re_formula = NULL))) expect_equal(nrow(em), 1) }) brms/tests/testthat/tests.families.R0000644000175000017500000000651013737534026017442 0ustar nileshnileshcontext("Tests for family functions") test_that("family functions returns expected results", { expect_equal(student(identity)$link, "identity") expect_equal(student()$link, "identity") expect_error(student("logit"), "student") expect_equal(bernoulli(logit)$link, "logit") expect_error(bernoulli("sqrt"), "bernoulli") expect_equal(negbinomial(sqrt)$link, "sqrt") expect_error(negbinomial(inverse), "inverse") expect_equal(geometric(identity)$link, "identity") expect_error(geometric("inv"), "geometric") expect_equal(exponential(log)$link, "log") expect_error(exponential("cloglog"), "exponential") expect_equal(weibull()$family, "weibull") expect_error(weibull(sqrt), "weibull") expect_equal(Beta("probit")$link, "probit") expect_error(Beta(log), "beta") expect_equal(hurdle_poisson()$link, "log") expect_equal(hurdle_negbinomial(log)$link, "log") expect_error(hurdle_negbinomial("inverse"), "hurdle_negbinomial") expect_equal(hurdle_gamma()$family, "hurdle_gamma") expect_error(hurdle_gamma(sqrt), "hurdle_gamma") expect_equal(zero_inflated_poisson(log)$link, "log") expect_error(zero_inflated_poisson(list(1)), "zero_inflated_poisson") expect_equal(zero_inflated_negbinomial("log")$link, "log") expect_error(zero_inflated_negbinomial("logit"), "zero_inflated_negbinomial") expect_equal(zero_inflated_beta(logit)$family, "zero_inflated_beta") expect_equivalent(zero_inflated_binomial()$link_zi, "logit") expect_error(zero_inflated_binomial(y~x), "zero_inflated_binomial") expect_equal(categorical()$link, "logit") expect_error(categorical(probit), "probit") expect_equal(cumulative(cauchit)$family, "cumulative") expect_equal(sratio(probit_approx)$link, "probit_approx") expect_equal(cratio("cloglog")$family, "cratio") expect_equal(acat(cloglog)$link, "cloglog") expect_equal(brmsfamily("gaussian", inverse)$link, "inverse") expect_equal(brmsfamily("geometric", "identity")$family, "geometric") expect_equal(brmsfamily("zi_poisson")$link_zi, "logit") expect_error(weibull(link_shape = "logit"), "'logit' is not a supported link for parameter 'shape'") expect_error(weibull(link_shape = c("log", "logit")), "Cannot coerce 'alink' to a single character value") }) test_that("print brmsfamily works correctly", { expect_output(print(weibull()), "Family: weibull \nLink function: log") }) test_that("mixture returns expected results and errors", { mix <- mixture(gaussian, nmix = 3) expect_equal(brms:::family_names(mix), rep("gaussian", 3)) mix <- mixture(gaussian, student, weibull, nmix = 3:1) expect_equal( brms:::family_names(mix), c(rep("gaussian", 3), rep("student", 2), "weibull") ) expect_error(mixture(gaussian, "x"), "x is not a supported family") expect_error(mixture(poisson(), categorical()), "Some of the families are not allowed in mixture models") expect_error(mixture(poisson, "cumulative"), "Cannot mix ordinal and non-ordinal families") expect_error(mixture(lognormal, exgaussian, poisson()), "Cannot mix families with real and integer support") expect_error(mixture(lognormal), "Expecting at least 2 mixture components") expect_error(mixture(poisson, binomial, order = "x"), "Argument 'order' is invalid") }) brms/tests/testthat/tests.distributions.R0000644000175000017500000005477214111751667020567 0ustar nileshnileshcontext("Tests for distribution functions") test_that("student distribution works correctly", { expect_equal(integrate(dstudent_t, -100, 100, df = 15, mu = 10, sigma = 5)$value, 1) expect_equal(dstudent_t(1, df = 10, mu = 0, sigma = 5), dt(1/5, df = 10)/5) expect_equal(pstudent_t(2, df = 20, mu = 2, sigma = 0.4), pt(0, df = 20)) expect_equal(qstudent_t(0.7, df = 5, mu = 2, sigma = 3), 2 + 3*qt(0.7, df = 5)) expect_equal(length(rstudent_t(10, df = 10, mu = rnorm(10), sigma = 1:10)), 10) }) test_that("multivariate normal and student distributions work correctly", { mu <- rnorm(3) Sigma <- cov(matrix(rnorm(300), ncol = 3)) expect_equal(dmulti_normal(1:3, mu = mu, Sigma = Sigma), mnormt::dmnorm(1:3, mu, Sigma)) expect_equal(dmulti_student_t(1:3, mu = mu, Sigma = Sigma, df = 10, log = TRUE), mnormt::dmt(1:3, df = 10, mean = mu, S = Sigma, log = TRUE)) expect_equal(dim(rmulti_normal(7, mu = mu, Sigma = Sigma)), c(7, 3)) expect_equal(dim(rmulti_student_t(7, mu = mu, Sigma = Sigma, df = 10)), c(7, 3)) # test errors expect_error(dmulti_normal(1:3, mu = rnorm(2), Sigma = Sigma, check = TRUE), "Dimension of mu is incorrect") expect_error(dmulti_normal(1:3, mu = mu, Sigma = Sigma[1:2, 1:2], check = TRUE), "Dimension of Sigma is incorrect") expect_error(dmulti_normal(1:3, mu = mu, Sigma = Sigma[1:3, 3:1], check = TRUE), "Sigma must be a symmetric matrix") expect_error(rmulti_normal(1.5, mu = mu, Sigma = Sigma, check = TRUE), "n must be a positive integer") expect_error(rmulti_normal(10, mu = mu, Sigma = Sigma[1:3, 3:1], check = TRUE), "Sigma must be a symmetric matrix") expect_error(dmulti_student_t(rnorm(3), mu = mu, Sigma = Sigma, df = -1, check = TRUE), "df must be greater than 0") expect_error(dmulti_student_t(rnorm(3), mu = mu, Sigma = Sigma[1:3, 3:1], df = 30, check = TRUE), "Sigma must be a symmetric matrix") expect_error(rmulti_student_t(10, mu = mu, Sigma = Sigma, df = -1, check = TRUE), "df must be greater than 0") }) test_that("von_mises distribution functions run without errors", { n <- 10 res <- dvon_mises(runif(n, -pi, pi), mu = 1, kappa = 1:n) expect_true(length(res) == n) res <- pvon_mises(runif(n, -pi, pi), mu = rnorm(n), kappa = 0:(n-1)) expect_true(length(res) == n) res <- rvon_mises(n, mu = rnorm(n), kappa = 0:(n-1)) expect_true(length(res) == n) }) test_that("skew_normal distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dskew_normal(x, mu = 1, sigma = 2, alpha = 1) expect_true(length(res) == n) res <- pskew_normal(x, mu = rnorm(n), sigma = 1:n, alpha = 3, log.p = TRUE) expect_true(length(res) == n) res <- qskew_normal(x, mu = rnorm(n), sigma = 1:n, alpha = 3, log.p = TRUE) expect_true(length(res) == n) res <- rskew_normal(n, mu = rnorm(n), sigma = 10, alpha = -4:5) expect_true(length(res) == n) }) test_that("exgaussian distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dexgaussian(x, mu = 1, sigma = 2, beta = 1) expect_true(length(res) == n) res <- pexgaussian(x, mu = rnorm(n), sigma = 1:n, beta = 3, log.p = TRUE) expect_true(length(res) == n) res <- rexgaussian(n, mu = rnorm(n), sigma = 10, beta = 1:10) expect_true(length(res) == n) }) test_that("frechet distribution functions run without errors", { n <- 10 x <- 21:30 res <- dfrechet(x, loc = 1, scale = 2, shape = 1, log = TRUE) expect_true(length(res) == n) loc <- 1:10 res <- pfrechet(x, loc = loc, scale = 1:n, shape = 3) expect_true(length(res) == n) q <- qfrechet(res, loc = loc, scale = 1:n, shape = 3) expect_equal(x, q) res <- rfrechet(n, loc = loc, scale = 10, shape = 1:10) expect_true(length(res) == n) }) test_that("inv_gaussian distribution functions run without errors", { n <- 10 x <- rgamma(n, 10, 3) res <- dinv_gaussian(x, mu = 1, shape = 1) expect_true(length(res) == n) res <- pinv_gaussian(x, mu = abs(rnorm(n)), shape = 3) expect_true(length(res) == n) res <- rinv_gaussian(n, mu = abs(rnorm(n)), shape = 1:10) expect_true(length(res) == n) }) test_that("gen_extreme_value distribution functions run without errors", { n <- 10 x <- rgamma(n, 10, 3) res <- dgen_extreme_value(x, mu = 1, sigma = 2, xi = 1) expect_true(length(res) == n) res <- pgen_extreme_value(x, mu = rnorm(n), sigma = 1:n, xi = 3) expect_true(length(res) == n) res <- rgen_extreme_value(n, mu = rnorm(n), sigma = 10, xi = 1:10) expect_true(length(res) == n) }) test_that("asym_laplace distribution functions run without errors", { n <- 10 x <- rnorm(n, 10, 3) res <- dasym_laplace(x, mu = 1, sigma = 2, quantile = 0.5) expect_true(length(res) == n) res <- pasym_laplace(x, mu = rnorm(n), sigma = 1:n, quantile = 0.3) expect_true(length(res) == n) res <- rasym_laplace(n, mu = rnorm(n), sigma = 10, quantile = runif(n, 0, 1)) expect_true(length(res) == n) }) test_that("zero-inflated distribution functions run without errors", { n <- 10 x <- rpois(n, lambda = 1) res <- dzero_inflated_poisson(x, lambda = 1, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_poisson(x, lambda = 1, zi = 0.1) expect_true(length(res) == n) res <- dzero_inflated_negbinomial(x, mu = 2, shape = 5, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_negbinomial(x, mu = 2, shape = 5, zi = 0.1) expect_true(length(res) == n) res <- dzero_inflated_binomial(x, size = c(2, 10), prob = 0.4, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_binomial(x, size = c(2, 10), prob = 0.4, zi = 0.1) expect_true(length(res) == n) x <- c(rbeta(n - 2, shape1 = 2, shape2 = 3), 0, 0) res <- dzero_inflated_beta(x, shape1 = 2, shape2 = 3, zi = 0.1) expect_true(length(res) == n) res <- pzero_inflated_beta(x, shape1 = 2, shape2 = 3, zi = 0.1) expect_true(length(res) == n) }) test_that("hurdle distribution functions run without errors", { n <- 10 x <- rpois(n, lambda = 1) res <- dhurdle_poisson(x, lambda = 1, hu = 0.1) expect_true(length(res) == n) res <- phurdle_poisson(x, lambda = 1, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_negbinomial(x, mu = 2, shape = 5, hu = 0.1) expect_true(length(res) == n) res <- phurdle_negbinomial(x, mu = 2, shape = 5, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_gamma(x, shape = 1, scale = 3, hu = 0.1) expect_true(length(res) == n) res <- phurdle_gamma(x, shape = 1, scale = 3, hu = 0.1) expect_true(length(res) == n) res <- dhurdle_lognormal(x, mu = 2, sigma = 5, hu = 0.1) expect_true(length(res) == n) res <- phurdle_lognormal(x, mu = 2, sigma = 5, hu = 0.1) expect_true(length(res) == n) }) test_that("wiener distribution functions run without errors", { set.seed(1234) n <- 10 x <- seq(0.1, 1, 0.1) alpha <- rexp(n) tau <- 0.05 beta <- 0.5 delta <- rnorm(n) resp <- sample(c(0, 1), n, TRUE) d1 <- dwiener(x, alpha, tau, beta, delta, resp, backend = "Rwiener") d2 <- dwiener(x, alpha, tau, beta, delta, resp, backend = "rtdists") expect_equal(d1, d2) r1 <- rwiener(n, alpha, tau, beta, delta, backend = "Rwiener") r2 <- rwiener(n, alpha, tau, beta, delta, backend = "rtdists") expect_equal(names(r1), names(r2)) expect_equal(dim(r1), dim(r2)) }) test_that("d() works correctly", { source(testthat::test_path(file.path("helpers", "inv_link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { thres_test <- matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) # Emulate no category-specific effects (i.e., only a single vector of # linear predictors) as well as category-specific effects (i.e., a matrix # of linear predictors): eta_test_list <- list( rnorm(ndraws), matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) ) for (eta_test in eta_test_list) { thres_eta <- if (is.matrix(eta_test)) { stopifnot(identical(dim(eta_test), dim(thres_test))) thres_test - eta_test } else { # Just to try something different: sweep(thres_test, 1, as.array(eta_test)) } eta_thres <- if (is.matrix(eta_test)) { stopifnot(identical(dim(eta_test), dim(thres_test))) eta_test - thres_test } else { # Just to try something different: sweep(-thres_test, 1, as.array(eta_test), FUN = "+") } for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): d_cumul <- dcumulative(seq_len(ncat), eta_test, thres_test, link = link) d_cumul_ch <- inv_link_cumulative_ch(thres_eta, link = link) expect_equivalent(d_cumul, d_cumul_ch) expect_equal(dim(d_cumul), c(ndraws, ncat)) # sratio(): d_sratio <- dsratio(seq_len(ncat), eta_test, thres_test, link = link) d_sratio_ch <- inv_link_sratio_ch(thres_eta, link = link) expect_equivalent(d_sratio, d_sratio_ch) expect_equal(dim(d_sratio), c(ndraws, ncat)) # cratio(): d_cratio <- dcratio(seq_len(ncat), eta_test, thres_test, link = link) d_cratio_ch <- inv_link_cratio_ch(eta_thres, link = link) expect_equivalent(d_cratio, d_cratio_ch) expect_equal(dim(d_cratio), c(ndraws, ncat)) # acat(): d_acat <- dacat(seq_len(ncat), eta_test, thres_test, link = link) d_acat_ch <- inv_link_acat_ch(eta_thres, link = link) expect_equivalent(d_acat, d_acat_ch) expect_equal(dim(d_acat), c(ndraws, ncat)) } } } } }) test_that("inv_link_() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "inv_link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): il_cumul <- inv_link_cumulative(x_test, link = link) il_cumul_ch <- inv_link_cumulative_ch(x_test, link = link) expect_equivalent(il_cumul, il_cumul_ch) expect_equal(dim(il_cumul), c(ndraws, nobsv, ncat)) # sratio(): il_sratio <- inv_link_sratio(x_test, link = link) il_sratio_ch <- inv_link_sratio_ch(x_test, link = link) expect_equivalent(il_sratio, il_sratio_ch) expect_equal(dim(il_sratio), c(ndraws, nobsv, ncat)) # cratio(): il_cratio <- inv_link_cratio(nx_test, link = link) il_cratio_ch <- inv_link_cratio_ch(nx_test, link = link) expect_equivalent(il_cratio, il_cratio_ch) expect_equal(dim(il_cratio), c(ndraws, nobsv, ncat)) # acat(): il_acat <- inv_link_acat(nx_test, link = link) il_acat_ch <- inv_link_acat_ch(nx_test, link = link) expect_equivalent(il_acat, il_acat_ch) expect_equal(dim(il_acat), c(ndraws, nobsv, ncat)) } } } } }) test_that("link_() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "link_ordinal_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): l_cumul <- link_cumulative(x_test, link = link) l_cumul_ch <- link_cumulative_ch(x_test, link = link) expect_equivalent(l_cumul, l_cumul_ch) expect_equal(dim(l_cumul), c(ndraws, nobsv, ncat - 1)) # sratio(): l_sratio <- link_sratio(x_test, link = link) l_sratio_ch <- link_sratio_ch(x_test, link = link) expect_equivalent(l_sratio, l_sratio_ch) expect_equal(dim(l_sratio), c(ndraws, nobsv, ncat - 1)) # cratio(): l_cratio <- link_cratio(x_test, link = link) l_cratio_ch <- link_cratio_ch(x_test, link = link) expect_equivalent(l_cratio, l_cratio_ch) expect_equal(dim(l_cratio), c(ndraws, nobsv, ncat - 1)) # acat(): l_acat <- link_acat(x_test, link = link) l_acat_ch <- link_acat_ch(x_test, link = link) expect_equivalent(l_acat, l_acat_ch) expect_equal(dim(l_acat), c(ndraws, nobsv, ncat - 1)) } } } } }) test_that("inv_link_() inverts link_()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): l_cumul <- link_cumulative(x_test, link = link) il_cumul <- inv_link_cumulative(l_cumul, link = link) expect_equivalent(il_cumul, x_test) # sratio(): l_sratio <- link_sratio(x_test, link = link) il_sratio <- inv_link_sratio(l_sratio, link = link) expect_equivalent(il_sratio, x_test) # cratio(): l_cratio <- link_cratio(x_test, link = link) il_cratio <- inv_link_cratio(l_cratio, link = link) expect_equivalent(il_cratio, x_test) # acat(): l_acat <- link_acat(x_test, link = link) il_acat <- inv_link_acat(l_acat, link = link) expect_equivalent(il_acat, x_test) } } } } }) test_that("link_() inverts inv_link_()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { # cumulative(): il_cumul <- inv_link_cumulative(x_test, link = link) l_cumul <- link_cumulative(il_cumul, link = link) expect_equivalent(l_cumul, x_test) # sratio(): il_sratio <- inv_link_sratio(x_test, link = link) l_sratio <- link_sratio(il_sratio, link = link) expect_equivalent(l_sratio, x_test) # cratio(): il_cratio <- inv_link_cratio(x_test, link = link) l_cratio <- link_cratio(il_cratio, link = link) expect_equivalent(l_cratio, x_test) # acat(): il_acat <- inv_link_acat(x_test, link = link) l_acat <- link_acat(il_acat, link = link) expect_equivalent(l_acat, x_test) } } } } }) test_that(paste( "dsratio() and dcratio() give the same results for symmetric distribution", "functions" ), { source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { thres_test <- matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) # Emulate no category-specific effects (i.e., only a single vector of # linear predictors) as well as category-specific effects (i.e., a matrix # of linear predictors): eta_test_list <- list( rnorm(ndraws), matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws) ) for (eta_test in eta_test_list) { for (link in c("logit", "probit", "cauchit", "cloglog")) { d_sratio <- dsratio(seq_len(ncat), eta_test, thres_test, link = link) d_cratio <- dcratio(seq_len(ncat), eta_test, thres_test, link = link) if (link != "cloglog") { expect_equal(d_sratio, d_cratio) } else { expect_false(isTRUE(all.equal(d_sratio, d_cratio))) } } } } } }) test_that(paste( "inv_link_sratio() and inv_link_cratio() applied to arrays give the same", "results for symmetric distribution functions (when respecting the sign", "appropriately)." ), { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) nx_test <- -x_test for (link in c("logit", "probit", "cauchit", "cloglog")) { il_sratio <- inv_link_sratio(x_test, link = link) il_cratio <- inv_link_cratio(nx_test, link = link) if (link != "cloglog") { expect_equal(il_sratio, il_cratio) } else { expect_false(isTRUE(all.equal(il_sratio, il_cratio))) } } } } } }) test_that(paste( "link_sratio() and link_cratio() applied to arrays give the same", "results for symmetric distribution functions (when respecting the sign", "appropriately)." ), { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) for (link in c("logit", "probit", "cauchit", "cloglog")) { l_sratio <- link_sratio(x_test, link = link) l_cratio <- link_cratio(x_test, link = link) if (link != "cloglog") { expect_equal(l_sratio, -l_cratio) } else { expect_false(isTRUE(all.equal(l_sratio, -l_cratio))) } } } } } }) test_that("dcategorical() works correctly", { source(testthat::test_path(file.path("helpers", "inv_link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike_oneobs.R"))) for (ndraws in ndraws_vec) { for (ncat in ncat_vec) { eta_test_list <- list(cbind(0, matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws))) if (ndraws == 1) { eta_test_list <- c(eta_test_list, list(c(0, rnorm(ncat - 1)))) } for (eta_test in eta_test_list) { d_categorical <- dcategorical(seq_len(ncat), eta_test) d_categorical_ch <- inv_link_categorical_ch(eta_test) expect_equivalent(d_categorical, d_categorical_ch) expect_equal(dim(d_categorical), c(ndraws, ncat)) } } } }) test_that("inv_link_categorical() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "inv_link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) zeros_arr <- array(0, dim = c(ndraws, nobsv, 1)) x_test <- abind::abind(zeros_arr, x_test) il_categorical <- inv_link_categorical(x_test) il_categorical_ch <- inv_link_categorical_ch(x_test) expect_equivalent(il_categorical, il_categorical_ch) expect_equal(dim(il_categorical), c(ndraws, nobsv, ncat)) } } } }) test_that("link_categorical() works correctly for arrays", { source(testthat::test_path(file.path("helpers", "link_categorical_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) l_categorical <- link_categorical(x_test) l_categorical_ch <- link_categorical_ch(x_test) expect_equivalent(l_categorical, l_categorical_ch) expect_equal(dim(l_categorical), c(ndraws, nobsv, ncat)) } } } }) test_that("inv_link_categorical() inverts link_categorical()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rdirichlet(ndraws * nobsv, alpha = rep(1, ncat)), dim = c(ndraws, nobsv, ncat)) l_categorical <- link_categorical(x_test) il_categorical <- inv_link_categorical(l_categorical) expect_equivalent(il_categorical, x_test) } } } }) test_that("link_categorical() inverts inv_link_categorical()", { source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { x_test <- array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1)) zeros_arr <- array(0, dim = c(ndraws, nobsv, 1)) x_test <- abind::abind(zeros_arr, x_test) il_categorical <- inv_link_categorical(x_test) l_categorical <- link_categorical(il_categorical) expect_equivalent(l_categorical, x_test) } } } }) brms/tests/testthat/tests.make_stancode.R0000644000175000017500000031015214135237456020446 0ustar nileshnileshcontext("Tests for make_stancode") # simplifies manual calling of tests expect_match2 <- brms:::expect_match2 SW <- brms:::SW # parsing the Stan code ensures syntactial correctness of models # setting this option to FALSE speeds up testing not_cran <- identical(Sys.getenv("NOT_CRAN"), "true") options(brms.parse_stancode = not_cran, brms.backend = "rstan") test_that("specified priors appear in the Stan code", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), g = rep(1:5, 2), h = factor(rep(1:5, each = 2))) prior <- c(prior(std_normal(), coef = x1), prior(normal(0,2), coef = x2), prior(normal(0,5), Intercept), prior(cauchy(0,1), sd, group = g), prior(cauchy(0,2), sd, group = g, coef = x1), prior(gamma(1, 1), class = sd, group = h)) scode <- make_stancode(y ~ x1*x2 + (x1*x2|g) + (1 | h), dat, prior = prior, sample_prior = "yes") expect_match2(scode, "target += std_normal_lpdf(b[1])") expect_match2(scode, "target += normal_lpdf(b[2] | 0, 2)") expect_match2(scode, "target += normal_lpdf(Intercept | 0, 5)") expect_match2(scode, "target += cauchy_lpdf(sd_1[1] | 0, 1)") expect_match2(scode, "- 1 * cauchy_lccdf(0 | 0, 1)") expect_match2(scode, "target += cauchy_lpdf(sd_1[2] | 0, 2)") expect_match2(scode, "target += student_t_lpdf(sigma | 3, 0, 3.7)") expect_match2(scode, "- 1 * student_t_lccdf(0 | 3, 0, 3.7)") expect_match2(scode, "target += gamma_lpdf(sd_2 | 1, 1)") expect_match2(scode, "prior_b_1 = normal_rng(0,1);") expect_match2(scode, "prior_sd_1_1 = cauchy_rng(0,1)") expect_match2(scode, "while (prior_sd_1_1 < 0)") expect_match2(scode, "prior_sd_2 = gamma_rng(1,1)") expect_match2(scode, "while (prior_sd_2 < 0)") prior <- c(prior(lkj(0.5), class = cor, group = g), prior(normal(0, 1), class = b), prior(normal(0, 5), class = Intercept), prior(cauchy(0, 5), class = sd)) scode <- make_stancode(y ~ x1 + cs(x2) + (0 + x1 + x2 | g), data = dat, family = acat(), prior = prior, sample_prior = TRUE) expect_match2(scode, "target += normal_lpdf(b | 0, 1)") expect_match2(scode, "target += normal_lpdf(Intercept | 0, 5)") expect_match2(scode, "target += cauchy_lpdf(sd_1 | 0, 5)") expect_match2(scode, "target += lkj_corr_cholesky_lpdf(L_1 | 0.5)") expect_match2(scode, "target += normal_lpdf(to_vector(bcs) | 0, 1)") expect_match2(scode, "prior_bcs = normal_rng(0,1)") prior <- c(prior(normal(0,5), nlpar = a), prior(normal(0,10), nlpar = b), prior(cauchy(0,1), class = sd, nlpar = a), prior(lkj(2), class = cor, group = g)) scode <- make_stancode( bf(y ~ a * exp(-b * x1), a + b ~ (1|ID|g), nl = TRUE), data = dat, prior = prior, sample_prior = TRUE ) expect_match2(scode, "target += normal_lpdf(b_a | 0, 5)") expect_match2(scode, "target += normal_lpdf(b_b | 0, 10)") expect_match2(scode, "target += cauchy_lpdf(sd_1[1] | 0, 1)") expect_match2(scode, "target += lkj_corr_cholesky_lpdf(L_1 | 2)") expect_match2(scode, "prior_b_a = normal_rng(0,5)") expect_match2(scode, "prior_sd_1_2 = student_t_rng(3,0,3.7)") expect_match2(scode, "prior_cor_1 = lkj_corr_rng(M_1,2)[1, 2]") prior <- c(prior(lkj(2), rescor), prior(cauchy(0, 5), sigma, resp = y), prior(cauchy(0, 1), sigma, resp = x1)) form <- bf(mvbind(y, x1) ~ x2) + set_rescor(TRUE) scode <- make_stancode(form, dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "target += lkj_corr_cholesky_lpdf(Lrescor | 2)") expect_match2(scode, "prior_sigma_y = cauchy_rng(0,5)") expect_match2(scode, "prior_rescor = lkj_corr_rng(nresp,2)[1, 2]") prior <- c(prior(uniform(-1, 1), ar), prior(normal(0, 0.5), ma), prior(normal(0, 5))) scode <- make_stancode(y ~ mo(g) + arma(cov = TRUE), dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "vector[Kar] ar;") expect_match2(scode, "vector[Kma] ma;") expect_match2(scode, "target += uniform_lpdf(ar | -1, 1)") expect_match2(scode, "target += normal_lpdf(ma | 0, 0.5)") expect_match2(scode, "- 1 * log_diff_exp(normal_lcdf(1 | 0, 0.5), normal_lcdf(-1 | 0, 0.5))" ) expect_match2(scode, "target += normal_lpdf(bsp | 0, 5)") expect_match2(scode, "target += dirichlet_lpdf(simo_1 | con_simo_1)") expect_match2(scode, "prior_simo_1 = dirichlet_rng(con_simo_1)") expect_match2(scode, "prior_ar = uniform_rng(-1,1)") expect_match2(scode, "while (prior_ar < -1 || prior_ar > 1)") # test for problem described in #213 prior <- c(prior(normal(0, 1), coef = x1), prior(normal(0, 2), coef = x1, dpar = sigma)) scode <- make_stancode(bf(y ~ x1, sigma ~ x1), dat, prior = prior) expect_match2(scode, "target += normal_lpdf(b[1] | 0, 1);") expect_match2(scode, "target += normal_lpdf(b_sigma[1] | 0, 2);") prior <- c(set_prior("target += normal_lpdf(b[1] | 0, 1)", check = FALSE), set_prior("", class = "sigma")) scode <- make_stancode(y ~ x1, dat, prior = prior, sample_prior = TRUE) expect_match2(scode, "target += normal_lpdf(b[1] | 0, 1)") expect_true(!grepl("sigma \\|", scode)) prior <- prior(gamma(0, 1), coef = x1) expect_warning(make_stancode(y ~ x1, dat, prior = prior), "no natural lower bound") prior <- prior(uniform(0,5), class = sd) expect_warning(make_stancode(y ~ x1 + (1|g), dat, prior = prior), "no natural upper bound") prior <- prior(uniform(-1, 1), class = cor) expect_error( make_stancode(y ~ x1 + (x1|g), dat, prior = prior), "prior for correlation matrices is the 'lkj' prior" ) }) test_that("special shrinkage priors appear in the Stan code", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10)) # horseshoe prior hs <- horseshoe(7, scale_global = 2, df_global = 3, df_slab = 6, scale_slab = 3) scode <- make_stancode(y ~ x1*x2, data = dat, prior = set_prior(hs), sample_prior = TRUE) expect_match2(scode, "vector[Kc] hs_local;") expect_match2(scode, "real hs_global;") expect_match2(scode, "target += student_t_lpdf(hs_local | hs_df, 0, 1)" ) expect_match2(scode, "target += student_t_lpdf(hs_global | hs_df_global, 0, hs_scale_global * sigma)" ) expect_match2(scode, "target += inv_gamma_lpdf(hs_slab | 0.5 * hs_df_slab, 0.5 * hs_df_slab)" ) expect_match2(scode, "b = horseshoe(zb, hs_local, hs_global, hs_scale_slab^2 * hs_slab);" ) scode <- make_stancode(y ~ x1*x2, data = dat, poisson(), prior = prior(horseshoe(scale_global = 3))) expect_match2(scode, "b = horseshoe(zb, hs_local, hs_global, hs_scale_slab^2 * hs_slab);" ) scode <- make_stancode(x1 ~ mo(y), dat, prior = prior(horseshoe())) expect_match2(scode, "target += std_normal_lpdf(zbsp);") expect_match2(scode, "target += student_t_lpdf(hs_localsp | hs_df, 0, 1)" ) expect_match2(scode, paste0( "bsp = horseshoe(zbsp, hs_localsp, hs_global, hs_scale_slab^2 * hs_slab);" ) ) # R2D2 prior scode <- make_stancode(y ~ x1*x2, data = dat, prior = prior(R2D2(0.5, 10)), sample_prior = TRUE) expect_match2(scode, "b = R2D2(zb, R2D2_phi, R2D2_tau2);") expect_match2(scode, "target += dirichlet_lpdf(R2D2_phi | R2D2_cons_D2);") expect_match2(scode, "target += beta_lpdf(R2D2_R2 | R2D2_mean_R2 * R2D2_prec_R2, (1 - R2D2_mean_R2) * R2D2_prec_R2);") expect_match2(scode, "R2D2_tau2 = sigma^2 * R2D2_R2 / (1 - R2D2_R2);") # lasso prior scode <- make_stancode(y ~ x1*x2, data = dat, prior = prior(lasso(2, scale = 10)), sample_prior = TRUE) expect_match2(scode, "target += chi_square_lpdf(lasso_inv_lambda | lasso_df);") expect_match2(scode, "target += double_exponential_lpdf(b | 0, lasso_scale * lasso_inv_lambda);" ) scode <- make_stancode(x1 ~ mo(y), dat, prior = prior(lasso())) expect_match2(scode, "double_exponential_lpdf(bsp | 0, lasso_scale * lasso_inv_lambda)" ) # horseshoe and lasso prior applied in a non-linear model hs_a1 <- horseshoe(7, scale_global = 2, df_global = 3) lasso_a2 <- lasso(2, scale = 10) R2D2_a3 <- R2D2(0.5, 10) scode <- make_stancode( bf(y ~ a1 + a2 + a3, a1 ~ x1, a2 ~ 0 + x2, a3 ~ x2, nl = TRUE), data = dat, sample_prior = TRUE, prior = c(set_prior(hs_a1, nlpar = "a1"), set_prior(lasso_a2, nlpar = "a2"), set_prior(R2D2_a3, nlpar = "a3")) ) expect_match2(scode, "vector[K_a1] hs_local_a1;") expect_match2(scode, "real hs_global_a1;") expect_match2(scode, "target += student_t_lpdf(hs_local_a1 | hs_df_a1, 0, 1)" ) expect_match2(scode, "target += student_t_lpdf(hs_global_a1 | hs_df_global_a1, 0, hs_scale_global_a1 * sigma)" ) expect_match2(scode, "target += inv_gamma_lpdf(hs_slab_a1 | 0.5 * hs_df_slab_a1, 0.5 * hs_df_slab_a1)" ) expect_match2(scode, "b_a1 = horseshoe(zb_a1, hs_local_a1, hs_global_a1, hs_scale_slab_a1^2 * hs_slab_a1);" ) expect_match2(scode, "target += chi_square_lpdf(lasso_inv_lambda_a2 | lasso_df_a2);" ) expect_match2(scode, "target += double_exponential_lpdf(b_a2 | 0, lasso_scale_a2 * lasso_inv_lambda_a2);" ) expect_match2(scode, "b_a3 = R2D2(zb_a3, R2D2_phi_a3, R2D2_tau2_a3);") # check error messages expect_error(make_stancode(y ~ x1*x2, data = dat, prior = prior(horseshoe(-1))), "Degrees of freedom of the local priors") expect_error(make_stancode(y ~ x1*x2, data = dat, prior = prior(horseshoe(1, -1))), "Scale of the global prior") expect_error(make_stancode(y ~ x1*x2, data = dat, prior = prior(lasso(-1))), "Degrees of freedom of the shrinkage parameter prior") expect_error(make_stancode(y ~ cs(x1), dat, acat(), prior = prior(lasso())), "Special priors are not yet allowed") bprior <- prior(horseshoe()) + prior(normal(0, 1), coef = "y") expect_error(make_stancode(x1 ~ y, dat, prior = bprior), "Defining separate priors for single coefficients") expect_error(make_stancode(x1 ~ y, dat, prior = prior(lasso(), lb = 0)), "Setting boundaries on coefficients is not allowed") }) test_that("priors can be fixed to constants", { dat <- data.frame(y = 1:12, x1 = rnorm(12), x2 = rnorm(12), g = rep(1:6, each = 2), h = factor(rep(1:2, each = 6))) prior <- prior(normal(0, 1), b) + prior(constant(3), b, coef = x1) + prior(constant(-1), b, coef = x2) + prior(constant(10), Intercept) + prior(normal(0, 5), sd) + prior(constant(1), sd, group = g, coef = x2) + prior(constant(2), sd, group = g, coef = x1) + prior(constant(0.3), sigma) scode <- make_stancode(y ~ x1*x2 + (x1*x2 | g), dat, prior = prior) expect_match2(scode, "b[1] = 3;") expect_match2(scode, "b[2] = -1;") expect_match2(scode, "b[3] = par_b_3;") expect_match2(scode, "target += normal_lpdf(b[3] | 0, 1);") expect_match2(scode, "Intercept = 1") expect_match2(scode, "sd_1[3] = 1;") expect_match2(scode, "sd_1[2] = 2;") expect_match2(scode, "sd_1[4] = par_sd_1_4;") expect_match2(scode, "target += normal_lpdf(sd_1[4] | 0, 5)") expect_match2(scode, "sigma = 0.3;") prior <- prior(constant(3)) scode <- make_stancode(y ~ x2 + x1 + cs(g), dat, family = sratio(), prior = prior) expect_match2(scode, "b = rep_vector(3, rows(b));") expect_match2(scode, "bcs = rep_matrix(3, rows(bcs), cols(bcs));") prior <- prior(normal(0, 3)) + prior(constant(3), coef = x1) + prior(constant(-1), coef = g) scode <- make_stancode(y ~ x1 + cs(x2) + cs(g), dat, family = sratio(), prior = prior) expect_match2(scode, "b[1] = 3;") expect_match2(scode, "bcs[1] = par_bcs_1;") expect_match2(scode, "target += normal_lpdf(bcs[1] | 0, 3);") expect_match2(scode, "bcs[2] = rep_row_vector(-1, cols(bcs[2]));") prior <- prior(constant(3), class = "sd", group = "g") + prior("constant([[1, 0], [0, 1]])", class = "cor") scode <- make_stancode(y ~ x1 + (x1 | gr(g, by = h)), dat, prior = prior) expect_match2(scode, "sd_1 = rep_matrix(3, rows(sd_1), cols(sd_1));") expect_match2(scode, "L_1[2] = [[1, 0], [0, 1]];") prior <- prior(constant(0.5), class = lscale, coef = gpx1h1) + prior(normal(0, 10), class = lscale, coef = gpx1h2) scode <- make_stancode(y ~ gp(x1, by = h), dat, prior = prior) expect_match2(scode, "lscale_1[1][1] = 0.5;") expect_match2(scode, "lscale_1[2][1] = par_lscale_1_2_1;") expect_match2(scode, "target += normal_lpdf(lscale_1[2][1] | 0, 10)") # test that improper base priors are correctly recognized (#919) prior <- prior(constant(-1), b, coef = x2) scode <- make_stancode(y ~ x1*x2, dat, prior = prior) expect_match2(scode, "real par_b_1;") expect_match2(scode, "b[3] = par_b_3;") # test error messages prior <- prior(normal(0, 1), Intercept) + prior(constant(3), Intercept, coef = 2) expect_error( make_stancode(y ~ x1, data = dat, family = cumulative(), prior = prior), "Can either estimate or fix all values" ) }) test_that("link functions appear in the Stan code", { dat <- data.frame(y = 1:10, x = rnorm(10)) expect_match2(make_stancode(y ~ s(x), dat, family = poisson()), "target += poisson_log_lpmf(Y | mu);") expect_match2(make_stancode(mvbind(y, y + 1) ~ x, dat, family = skew_normal("log")), "mu_y[n] = exp(mu_y[n]);") expect_match2(make_stancode(y ~ x, dat, family = von_mises(tan_half)), "mu[n] = inv_tan_half(mu[n]);") expect_match2(make_stancode(y ~ x, dat, family = weibull()), "mu[n] = exp(mu[n]) / tgamma(1 + 1 / shape);") expect_match2(make_stancode(y ~ x, dat, family = exponential("identity")), "mu[n] = inv(mu[n]);") expect_match2(make_stancode(y ~ x, dat, family = poisson("sqrt")), "mu[n] = square(mu[n]);") expect_match2(make_stancode(y ~ s(x), dat, family = bernoulli()), "target += bernoulli_logit_lpmf(Y | mu);") }) test_that("Stan GLM primitives are applied correctly", { dat <- data.frame(x = rnorm(10), y = 1:10) scode <- make_stancode(y ~ x, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | Xc, Intercept, b, sigma)") scode <- make_stancode(y ~ x, dat, family = bernoulli) expect_match2(scode, "bernoulli_logit_glm_lpmf(Y | Xc, Intercept, b)") scode <- make_stancode(y ~ x, dat, family = poisson) expect_match2(scode, "poisson_log_glm_lpmf(Y | Xc, Intercept, b)") scode <- make_stancode(y ~ x, dat, family = negbinomial) expect_match2(scode, "neg_binomial_2_log_glm_lpmf(Y | Xc, Intercept, b, shape)" ) scode <- make_stancode(y ~ x, dat, family = brmsfamily("negbinomial2")) expect_match2(scode, "neg_binomial_2_log_glm_lpmf(Y | Xc, Intercept, b, inv(sigma))" ) scode <- make_stancode(y ~ 0 + x, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | X, 0, b, sigma)") bform <- bf(y ~ x) + bf(x ~ 1, family = negbinomial()) + set_rescor(FALSE) scode <- make_stancode(bform, dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y_y | Xc_y, Intercept_y, b_y, sigma_y)" ) scode <- make_stancode(bf(y ~ x, decomp = "QR"), dat, family = gaussian) expect_match2(scode, "normal_id_glm_lpdf(Y | XQ, Intercept, bQ, sigma);") }) test_that("customized covariances appear in the Stan code", { M <- diag(1, nrow = length(unique(inhaler$subject))) rownames(M) <- unique(inhaler$subject) dat2 <- list(M = M) scode <- make_stancode(rating ~ treat + (1 | gr(subject, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]))") scode <- make_stancode(rating ~ treat + (1 + treat | gr(subject, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1 = scale_r_cor_cov(z_1, sd_1, L_1, Lcov_1);") expect_match2(scode, "cor_1[choose(k - 1, 2) + j] = Cor_1[j, k];") scode <- make_stancode(rating ~ (1 + treat | gr(subject, cor = FALSE, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]));") expect_match2(scode, "r_1_2 = (sd_1[2] * (Lcov_1 * z_1[2]));") inhaler$by <- inhaler$subject %% 2 scode <- make_stancode(rating ~ (1 + treat | gr(subject, by = by, cov = M)), data = inhaler, data2 = dat2) expect_match2(scode, "r_1 = scale_r_cor_by_cov(z_1, sd_1, L_1, Jby_1, Lcov_1);") expect_warning( scode <- make_stancode(rating ~ treat + period + carry + (1|subject), data = inhaler, cov_ranef = list(subject = 1)), "Argument 'cov_ranef' is deprecated" ) expect_match2(scode, "r_1_1 = (sd_1[1] * (Lcov_1 * z_1[1]))") }) test_that("truncation appears in the Stan code", { scode <- make_stancode(time | trunc(0) ~ age + sex + disease, data = kidney, family = "gamma") expect_match2(scode, "target += gamma_lpdf(Y[n] | shape, mu[n]) -") expect_match2(scode, "gamma_lccdf(lb[n] | shape, mu[n]);") scode <- make_stancode(time | trunc(ub = 100) ~ age + sex + disease, data = kidney, family = student("log")) expect_match2(scode, "target += student_t_lpdf(Y[n] | nu, mu[n], sigma) -") expect_match2(scode, "student_t_lcdf(ub[n] | nu, mu[n], sigma);") scode <- make_stancode(count | trunc(0, 150) ~ Trt, data = epilepsy, family = "poisson") expect_match2(scode, "target += poisson_lpmf(Y[n] | mu[n]) -") expect_match2(scode, "log_diff_exp(poisson_lcdf(ub[n] | mu[n]), poisson_lcdf(lb[n] - 1 | mu[n]));" ) }) test_that("make_stancode handles models without fixed effects", { expect_match2(make_stancode(count ~ 0 + (1|patient) + (1+Trt|visit), data = epilepsy, family = "poisson"), "mu = rep_vector(0.0, N);") }) test_that("make_stancode correctly restricts FE parameters", { data <- data.frame(y = rep(0:1, each = 5), x = rnorm(10)) scode <- make_stancode(y ~ x, data, prior = set_prior("", lb = 2)) expect_match2(scode, "vector[Kc] b") scode <- make_stancode( y ~ x, data, prior = set_prior("normal (0, 2)", ub = "4") ) expect_match2(scode, "vector[Kc] b") expect_match2(scode, "- 1 * normal_lcdf(4 | 0, 2)") prior <- set_prior("normal(0,5)", lb = "-3", ub = 5) scode <- make_stancode(y ~ 0 + x, data, prior = prior) expect_match2(scode, "vector[K] b") }) test_that("self-defined functions appear in the Stan code", { # cauchit link scode <- make_stancode(rating ~ treat, data = inhaler, family = bernoulli("cauchit")) expect_match2(scode, "real inv_cauchit(real y)") # softplus link scode <- make_stancode(rating ~ treat, data = inhaler, family = brmsfamily("poisson", "softplus")) expect_match2(scode, "real log_expm1(real x)") # squareplus link scode <- make_stancode(rating ~ treat, data = inhaler, family = brmsfamily("poisson", "squareplus")) expect_match2(scode, "real squareplus(real x)") # tan_half link expect_match2(make_stancode(rating ~ treat, data = inhaler, family = von_mises("tan_half")), "real inv_tan_half(real y)") # logm1 link expect_match2(make_stancode(rating ~ treat, data = inhaler, family = frechet()), "real expp1(real y)") # inverse gaussian models scode <- make_stancode(time | cens(censored) ~ age, data = kidney, family = inverse.gaussian) expect_match2(scode, "real inv_gaussian_lpdf(real y") expect_match2(scode, "real inv_gaussian_lcdf(real y") expect_match2(scode, "real inv_gaussian_lccdf(real y") expect_match2(scode, "real inv_gaussian_vector_lpdf(vector y") # von Mises models scode <- make_stancode(time ~ age, data = kidney, family = von_mises) expect_match2(scode, "real von_mises_real_lpdf(real y") expect_match2(scode, "real von_mises_vector_lpdf(vector y") # zero-inflated and hurdle models expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_poisson"), "real zero_inflated_poisson_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_negbinomial"), "real zero_inflated_neg_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_binomial"), "real zero_inflated_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_inflated_beta"), "real zero_inflated_beta_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = "zero_one_inflated_beta"), "real zero_one_inflated_beta_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_poisson()), "real hurdle_poisson_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_negbinomial), "real hurdle_neg_binomial_lpmf(int y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_gamma("log")), "real hurdle_gamma_lpdf(real y") expect_match2(make_stancode(count ~ Trt, data = epilepsy, family = hurdle_lognormal("identity")), "real hurdle_lognormal_lpdf(real y") # linear models with special covariance structures expect_match2( make_stancode(rating ~ treat + ar(cov = TRUE), data = inhaler), "real normal_time_hom_lpdf(vector y" ) expect_match2( make_stancode(time ~ age + ar(cov = TRUE), data = kidney, family = "student"), "real student_t_time_hom_lpdf(vector y" ) # ARMA covariance matrices expect_match2( make_stancode(rating ~ treat + ar(cov = TRUE), data = inhaler), "matrix cholesky_cor_ar1(real ar" ) expect_match2( make_stancode(time ~ age + ma(cov = TRUE), data = kidney), "matrix cholesky_cor_ma1(real ma" ) expect_match2( make_stancode(time ~ age + arma(cov = TRUE), data = kidney), "matrix cholesky_cor_arma1(real ar, real ma" ) }) test_that("invalid combinations of modeling options are detected", { data <- data.frame(y1 = rnorm(10), y2 = rnorm(10), wi = 1:10, ci = sample(-1:1, 10, TRUE)) expect_error( make_stancode(y1 | cens(ci) ~ y2 + ar(cov = TRUE), data = data), "Invalid addition arguments for this model" ) form <- bf(mvbind(y1, y2) ~ 1 + ar(cov = TRUE)) + set_rescor(TRUE) expect_error( make_stancode(form, data = data), "Explicit covariance terms cannot be modeled when 'rescor'" ) expect_error( make_stancode(y1 | resp_se(wi) ~ y2 + ma(), data = data), "Please set cov = TRUE in ARMA structures" ) }) test_that("Stan code for multivariate models is correct", { dat <- data.frame( y1 = rnorm(10), y2 = rnorm(10), x = 1:10, g = rep(1:2, each = 5), censi = sample(0:1, 10, TRUE) ) # models with residual correlations form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) prior <- prior(horseshoe(2), resp = "y1") + prior(horseshoe(2), resp = "y2") scode <- make_stancode(form, dat, prior = prior) expect_match2(scode, "target += multi_normal_cholesky_lpdf(Y | Mu, LSigma);") expect_match2(scode, "LSigma = diag_pre_multiply(sigma, Lrescor);") expect_match2(scode, "target += student_t_lpdf(hs_local_y1 | hs_df_y1, 0, 1)") expect_match2(scode, "target += student_t_lpdf(hs_local_y2 | hs_df_y2, 0, 1)") expect_match2(scode, "rescor[choose(k - 1, 2) + j] = Rescor[j, k];") form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) prior <- prior(lasso(2, 10), resp = "y1") + prior(lasso(2, 10), resp = "y2") scode <- make_stancode(form, dat, student(), prior = prior) expect_match2(scode, "target += multi_student_t_lpdf(Y | nu, Mu, Sigma);") expect_match2(scode, "matrix[nresp, nresp] Sigma = multiply_lower") expect_match2(scode, "target += gamma_lpdf(nu | 2, 0.1)") expect_match2(scode, "target += chi_square_lpdf(lasso_inv_lambda_y1 | lasso_df_y1)" ) expect_match2(scode, "target += chi_square_lpdf(lasso_inv_lambda_y2 | lasso_df_y2)" ) form <- bf(mvbind(y1, y2) | weights(x) ~ 1) + set_rescor(TRUE) scode <- make_stancode(form, dat) expect_match2(scode, "target += weights[n] * (multi_normal_cholesky_lpdf(Y[n] | Mu[n], LSigma));" ) # models without residual correlations expect_warning( bform <- bf(y1 | cens(censi) ~ x + y2 + (x|2|g)) + gaussian() + cor_ar() + (bf(x ~ 1) + mixture(poisson, nmix = 2)) + (bf(y2 ~ s(y2) + (1|2|g)) + skew_normal()), "Using 'cor_brms' objects for 'autocor' is deprecated" ) bprior <- prior(normal(0, 5), resp = y1) + prior(normal(0, 10), resp = y2) scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "r_1_y2_3 = r_1[, 3]") expect_match2(scode, "err_y1[n] = Y_y1[n] - mu_y1[n]") expect_match2(scode, "target += normal_lccdf(Y_y1[n] | mu_y1[n], sigma_y1)") expect_match2(scode, "target += skew_normal_lpdf(Y_y2 | mu_y2, omega_y2, alpha_y2)") expect_match2(scode, "ps[1] = log(theta1_x) + poisson_log_lpmf(Y_x[n] | mu1_x[n])") expect_match2(scode, "target += normal_lpdf(b_y1 | 0, 5)") expect_match2(scode, "target += normal_lpdf(bs_y2 | 0, 10)") # multivariate binomial models bform <- bf(x ~ 1) + bf(g ~ 1) + binomial() scode <- make_stancode(bform, dat) expect_match2(scode, "binomial_logit_lpmf(Y_x | trials_x, mu_x)") expect_match2(scode, "binomial_logit_lpmf(Y_g | trials_g, mu_g)") bform <- bform + weibull() scode <- make_stancode(bform, dat) expect_match2(scode, "mu_g[n] = exp(mu_g[n]) / tgamma(1 + 1 / shape_g)") }) test_that("Stan code for categorical models is correct", { dat <- data.frame(y = rep(c(1, 2, 3, "a_b"), 2), x = 1:8, .g = 1:8) prior <- prior(normal(0, 5), "b", dpar = muab) + prior(normal(0, 10), "b", dpar = mu2) + prior(cauchy(0, 1), "Intercept", dpar = mu2) + prior(normal(0, 2), "Intercept", dpar = mu3) scode <- make_stancode(y ~ x + (1 | gr(.g, id = "ID")), data = dat, family = categorical(), prior = prior) expect_match2(scode, "target += categorical_logit_lpmf(Y[n] | mu[n]);") expect_match2(scode, "mu[n] = transpose([0, mu2[n], mu3[n], muab[n]]);") expect_match2(scode, "mu2 = Intercept_mu2 + Xc_mu2 * b_mu2;") expect_match2(scode, "muab[n] += r_1_muab_3[J_1[n]] * Z_1_muab_3[n];") expect_match2(scode, "target += normal_lpdf(b_mu2 | 0, 10);") expect_match2(scode, "target += normal_lpdf(b_muab | 0, 5);") expect_match2(scode, "target += cauchy_lpdf(Intercept_mu2 | 0, 1);") expect_match2(scode, "target += normal_lpdf(Intercept_mu3 | 0, 2);") expect_match2(scode, "r_1 = scale_r_cor(z_1, sd_1, L_1);") scode <- make_stancode(y ~ x + (1 |ID| .g), data = dat, family = categorical(refcat = NA)) expect_match2(scode, "mu[n] = transpose([mu1[n], mu2[n], mu3[n], muab[n]]);") }) test_that("Stan code for multinomial models is correct", { N <- 15 dat <- data.frame( y1 = rbinom(N, 10, 0.3), y2 = rbinom(N, 10, 0.5), y3 = rbinom(N, 10, 0.7), x = rnorm(N) ) dat$size <- with(dat, y1 + y2 + y3) dat$y <- with(dat, cbind(y1, y2, y3)) prior <- prior(normal(0, 10), "b", dpar = muy2) + prior(cauchy(0, 1), "Intercept", dpar = muy2) + prior(normal(0, 2), "Intercept", dpar = muy3) scode <- make_stancode(bf(y | trials(size) ~ 1, muy2 ~ x), data = dat, family = multinomial(), prior = prior) expect_match2(scode, "int Y[N, ncat];") expect_match2(scode, "target += multinomial_logit2_lpmf(Y[n] | mu[n]);") expect_match2(scode, "muy2 = Intercept_muy2 + Xc_muy2 * b_muy2;") expect_match2(scode, "target += normal_lpdf(b_muy2 | 0, 10);") expect_match2(scode, "target += cauchy_lpdf(Intercept_muy2 | 0, 1);") expect_match2(scode, "target += normal_lpdf(Intercept_muy3 | 0, 2);") }) test_that("Stan code for dirichlet models is correct", { N <- 15 dat <- as.data.frame(rdirichlet(N, c(3, 2, 1))) names(dat) <- c("y1", "y2", "y3") dat$x <- rnorm(N) dat$y <- with(dat, cbind(y1, y2, y3)) # dirichlet in probability-sum(alpha) concentration prior <- prior(normal(0, 5), class = "b", dpar = "muy3") + prior(exponential(10), "phi") scode <- make_stancode(bf(y ~ 1, muy3 ~ x), data = dat, family = dirichlet(), prior = prior) expect_match2(scode, "vector[ncat] Y[N];") expect_match2(scode, "target += dirichlet_logit_lpdf(Y[n] | mu[n], phi);") expect_match2(scode, "muy3 = Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "target += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "target += exponential_lpdf(phi | 10);") scode <- make_stancode(bf(y ~ x, phi ~ x), data = dat, family = dirichlet()) expect_match2(scode, "target += dirichlet_logit_lpdf(Y[n] | mu[n], phi[n]);") expect_match2(scode, "vector[N] phi = Intercept_phi + Xc_phi * b_phi;") expect_match2(scode, "phi[n] = exp(phi[n]);") # dirichlet2 in alpha parameterization prior <- prior(normal(0, 5), class = "b", dpar = "muy3") scode <- make_stancode(bf(y ~ 1, muy3 ~ x), data = dat, family = brmsfamily("dirichlet2"), prior = prior) expect_match2(scode, "vector[ncat] Y[N];") expect_match2(scode, "muy3[n] = exp(muy3[n]);") expect_match2(scode, "target += dirichlet_lpdf(Y[n] | mu[n]);") expect_match2(scode, "muy3 = Intercept_muy3 + Xc_muy3 * b_muy3;") expect_match2(scode, "mu[n] = transpose([muy1[n], muy2[n], muy3[n]]);") expect_match2(scode, "target += normal_lpdf(b_muy3 | 0, 5);") expect_match2(scode, "target += student_t_lpdf(Intercept_muy1 | 3, 0, 2.5);") }) test_that("Stan code for ARMA models is correct", { dat <- data.frame(y = rep(1:4, 2), x = 1:8, time = 1:8) scode <- make_stancode(y ~ x + ar(time), dat, student()) expect_match2(scode, "err[n] = Y[n] - mu[n];") expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") scode <- make_stancode(y ~ x + ma(time, q = 2), dat, student()) expect_match2(scode, "mu[n] += Err[n, 1:Kma] * ma;") expect_warning( scode <- make_stancode(mvbind(y, x) ~ 1, dat, gaussian(), autocor = cor_ar()), "Argument 'autocor' should be specified within the 'formula' argument" ) expect_match2(scode, "err_y[n] = Y_y[n] - mu_y[n];") bform <- bf(y ~ x, sigma ~ x) + acformula(~arma(time, cov = TRUE)) scode <- make_stancode(bform, dat, family = student) expect_match2(scode, "student_t_time_het_lpdf(Y | nu, mu, sigma, chol_cor") bform <- bf(y ~ exp(eta) - 1, eta ~ x, autocor = ~ar(time), nl = TRUE) scode <- make_stancode(bform, dat, family = student, prior = prior(normal(0, 1), nlpar = eta)) expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") # correlations of latent residuals scode <- make_stancode( y ~ x + ar(time, cov = TRUE), dat, family = poisson, prior = prior(cauchy(0, 10), class = sderr) ) expect_match2(scode, "chol_cor = cholesky_cor_ar1(ar[1], max_nobs_tg);") expect_match2(scode, "err = scale_time_err(zerr, sderr, chol_cor, nobs_tg, begin_tg, end_tg);" ) expect_match2(scode, "vector[N] mu = Intercept + Xc * b + err;") expect_match2(scode, "target += cauchy_lpdf(sderr | 0, 10);") scode <- make_stancode( y ~ x + ar(time), dat, family = poisson, prior = prior(cauchy(0, 10), class = sderr) ) expect_match2(scode, "mu[n] += Err[n, 1:Kar] * ar;") expect_match2(scode, "err = sderr * zerr;") expect_match2(scode, "vector[N] mu = Intercept + Xc * b + err;") expect_match2(scode, "target += cauchy_lpdf(sderr | 0, 10);") }) test_that("Stan code for compound symmetry models is correct", { dat <- data.frame(y = rep(1:4, 2), x = 1:8, time = 1:8) scode <- make_stancode( y ~ x + cosy(time), dat, prior = prior(normal(0, 2), cosy) ) expect_match2(scode, "real cosy;") expect_match2(scode, "chol_cor = cholesky_cor_cosy(cosy, max_nobs_tg);") expect_match2(scode, "target += normal_lpdf(cosy | 0, 2);") scode <- make_stancode(bf(y ~ x + cosy(time), sigma ~ x), dat) expect_match2(scode, "normal_time_het_lpdf(Y | mu, sigma, chol_cor") scode <- make_stancode(y ~ x + cosy(time), dat, family = poisson) expect_match2(scode, "chol_cor = cholesky_cor_cosy(cosy, max_nobs_tg);") }) test_that("Stan code for intercept only models is correct", { expect_match2(make_stancode(rating ~ 1, data = inhaler), "b_Intercept = Intercept;") expect_match2(make_stancode(rating ~ 1, data = inhaler, family = cratio()), "b_Intercept = Intercept;") expect_match2(make_stancode(rating ~ 1, data = inhaler, family = categorical()), "b_mu3_Intercept = Intercept_mu3;") }) test_that("Stan code of ordinal models is correct", { dat <- data.frame(y = c(rep(1:4, 2), 1, 1), x1 = rnorm(10), x2 = rnorm(10), g = factor(rep(1:2, 5))) scode <- make_stancode( y ~ x1, dat, family = cumulative(), prior = prior(normal(0, 2), Intercept, coef = 2) ) expect_match2(scode, "target += ordered_logistic_lpmf(Y[n] | mu[n], Intercept);" ) expect_match2(scode, "target += student_t_lpdf(Intercept[1] | 3, 0, 2.5);") expect_match2(scode, "target += normal_lpdf(Intercept[2] | 0, 2);") scode <- make_stancode( y ~ x1, dat, cumulative("probit", threshold = "equidistant"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "real cumulative_probit_lpmf(int y") expect_match2(scode, "p = Phi(disc * (thres[1] - mu));") expect_match2(scode, "real delta;") expect_match2(scode, "Intercept[k] = first_Intercept + (k - 1.0) * delta;") expect_match2(scode, "b_Intercept = Intercept + dot_product(means_X, b);") expect_match2(scode, "target += normal_lpdf(first_Intercept | 0, 2);") scode <- make_stancode(y ~ x1, dat, family = cratio("probit")) expect_match2(scode, "real cratio_probit_lpmf(int y") expect_match2(scode, "q[k] = normal_lcdf(disc * (mu - thres[k])|0,1);") scode <- make_stancode(y ~ x1 + cs(x2) + cs(g), dat, family = sratio()) expect_match2(scode, "real sratio_logit_lpmf(int y") expect_match2(scode, "matrix[N, Kcs] Xcs;") expect_match2(scode, "matrix[Kcs, nthres] bcs;") expect_match2(scode, "mucs = Xcs * bcs;") expect_match2(scode, "target += sratio_logit_lpmf(Y[n] | mu[n], disc, Intercept - transpose(mucs[n]));" ) scode <- make_stancode(y ~ x1 + cse(x2) + (cse(1)|g), dat, family = acat()) expect_match2(scode, "real acat_logit_lpmf(int y") expect_match2(scode, "mucs[n, 1] = mucs[n, 1] + r_1_1[J_1[n]] * Z_1_1[n];") expect_match2(scode, "b_Intercept = Intercept + dot_product(means_X, b);") scode <- make_stancode(y ~ x1 + (cse(x2)||g), dat, family = acat("probit_approx")) expect_match2(scode, paste("mucs[n, 3] = mucs[n, 3] + r_1_3[J_1[n]] * Z_1_3[n]", "+ r_1_6[J_1[n]] * Z_1_6[n];")) expect_match2(scode, "target += acat_probit_approx_lpmf(Y[n] | mu[n], disc, Intercept - transpose(mucs[n]));" ) # sum-to-zero thresholds scode <- make_stancode( y ~ x1, dat, cumulative("probit", threshold = "sum_to_zero"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "Intercept_stz = Intercept - mean(Intercept);") expect_match2(scode, "cumulative_probit_lpmf(Y[n] | mu[n], disc, Intercept_stz);") expect_match2(scode, "vector[nthres] b_Intercept = Intercept_stz;") # non-linear ordinal models scode <- make_stancode( bf(y ~ eta, eta ~ x1, nl = TRUE), dat, family = cumulative(), prior = prior(normal(0, 2), nlpar = eta) ) expect_match2(scode, "ordered[nthres] Intercept;") expect_match2(scode, "target += ordered_logistic_lpmf(Y[n] | mu[n], Intercept);" ) # ordinal mixture models with fixed intercepts scode <- make_stancode( bf(y ~ 1, mu1 ~ x1, mu2 ~ 1), data = dat, family = mixture(cumulative(), nmix = 2, order = "mu") ) expect_match2(scode, "Intercept_mu2 = fixed_Intercept;") expect_match2(scode, "target += student_t_lpdf(fixed_Intercept | 3, 0, 2.5);") }) test_that("ordinal disc parameters appear in the Stan code", { scode <- make_stancode( bf(rating ~ period + carry + treat, disc ~ period), data = inhaler, family = cumulative(), prior = prior(normal(0,5), dpar = disc) ) expect_match2(scode, "target += cumulative_logit_lpmf(Y[n] | mu[n], disc[n], Intercept)" ) expect_match2(scode, "target += normal_lpdf(b_disc | 0, 5)") expect_match2(scode, "disc[n] = exp(disc[n])") }) test_that("grouped ordinal thresholds appear in the Stan code", { dat <- data.frame( y = sample(1:6, 10, TRUE), y2 = sample(1:6, 10, TRUE), gr = rep(c("a", "b"), each = 5), th = rep(5:6, each = 5), x = rnorm(10) ) prior <- prior(normal(0,1), class = "Intercept", group = "b") scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = sratio(), prior = prior ) expect_match2(scode, "int nthres[ngrthres];") expect_match2(scode, "merged_Intercept[Kthres_start[1]:Kthres_end[1]] = Intercept_1;") expect_match2(scode, "target += sratio_logit_merged_lpmf(Y[n]") expect_match2(scode, "target += normal_lpdf(Intercept_2 | 0, 1);") # centering needs to be deactivated automatically expect_match2(scode, "vector[nthres[1]] b_Intercept_1 = Intercept_1;") # model with equidistant thresholds scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = cumulative(threshold = "equidistant"), prior = prior ) expect_match2(scode, "target += ordered_logistic_merged_lpmf(Y[n]") expect_match2(scode, "real first_Intercept_1;") expect_match2(scode, "target += normal_lpdf(first_Intercept_2 | 0, 1);") expect_match2(scode, "Intercept_2[k] = first_Intercept_2 + (k - 1.0) * delta_2;") # sum-to-zero constraints scode <- make_stancode( y | thres(gr = gr) ~ x, data = dat, cumulative(threshold = "sum_to_zero"), prior = prior(normal(0, 2), Intercept) ) expect_match2(scode, "merged_Intercept_stz[Kthres_start[2]:Kthres_end[2]] = Intercept_stz_2;") expect_match2(scode, "ordered_logistic_merged_lpmf(Y[n] | mu[n], merged_Intercept_stz, Jthres[n]);") # ordinal mixture model scode <- make_stancode( y | thres(th, gr) ~ x, data = dat, family = mixture(cratio, acat, order = "mu"), prior = prior ) expect_match2(scode, "ps[1] = log(theta1) + cratio_logit_merged_lpmf(Y[n]") expect_match2(scode, "ps[2] = log(theta2) + acat_logit_merged_lpmf(Y[n]") expect_match2(scode, "vector[nmthres] merged_Intercept_mu1;") expect_match2(scode, "merged_Intercept_mu2[Kthres_start[1]:Kthres_end[1]] = Intercept_mu2_1;") expect_match2(scode, "vector[nthres[1]] b_mu1_Intercept_1 = Intercept_mu1_1;") # multivariate ordinal model bform <- bf(y | thres(th, gr) ~ x, family = sratio) + bf(y2 | thres(th, gr) ~ x, family = cumulative) scode <- make_stancode(bform, data = dat) expect_match2(scode, "target += student_t_lpdf(Intercept_y2_1 | 3, 0, 2.5);") expect_match2(scode, "merged_Intercept_y[Kthres_start_y[2]:Kthres_end_y[2]] = Intercept_y_2;") }) test_that("monotonic effects appear in the Stan code", { dat <- data.frame(y = rpois(120, 10), x1 = rep(1:4, 30), x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE), g = rep(1:10, each = 12)) prior <- c(prior(normal(0,1), class = b, coef = mox1), prior(dirichlet(c(1,0.5,2)), simo, coef = mox11), prior(dirichlet(c(1,0.5,2)), simo, coef = mox21)) scode <- make_stancode(y ~ y*mo(x1)*mo(x2), dat, prior = prior) expect_match2(scode, "int Xmo_3[N];") expect_match2(scode, "simplex[Jmo[1]] simo_1;") expect_match2(scode, "(bsp[2]) * mo(simo_2, Xmo_2[n])") expect_match2(scode, "(bsp[6]) * mo(simo_7, Xmo_7[n]) * mo(simo_8, Xmo_8[n]) * Csp_3[n]" ) expect_match2(scode, "target += normal_lpdf(bsp[1] | 0, 1)") expect_match2(scode, "target += dirichlet_lpdf(simo_1 | con_simo_1);") expect_match2(scode, "target += dirichlet_lpdf(simo_8 | con_simo_8);") scode <- make_stancode(y ~ mo(x1) + (mo(x1) | x2), dat) expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]]) * mo(simo_1, Xmo_1[n])") expect_true(!grepl("Z_1_w", scode)) # test issue reported in discourse post #12978 scode <- make_stancode(y ~ mo(x1) + (mo(x1) | x2) + (mo(x1) | g), dat) expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]] + r_2_2[J_2[n]]) * mo(simo_1, Xmo_1[n])") # test issue #813 scode <- make_stancode(y ~ mo(x1):y, dat) expect_match2(scode, "mu[n] += (bsp[1]) * mo(simo_1, Xmo_1[n]) * Csp_1[n];") # test issue #924 (conditional monotonicity) prior <- c(prior(dirichlet(c(1,0.5,2)), simo, coef = "v"), prior(dirichlet(c(1,0.5,2)), simo, coef = "w")) scode <- make_stancode(y ~ y*mo(x1, id = "v")*mo(x2, id = "w"), dat, prior = prior) expect_match2(scode, "target += dirichlet_lpdf(simo_1 | con_simo_1);") expect_match2(scode, "target += dirichlet_lpdf(simo_2 | con_simo_2);") expect_match2(scode, "simplex[Jmo[6]] simo_6 = simo_2;") expect_match2(scode, "simplex[Jmo[7]] simo_7 = simo_1;") expect_error( make_stancode(y ~ mo(x1) + (mo(x2) | x2), dat), "Special group-level terms require" ) prior <- prior(beta(1, 1), simo, coef = mox11) expect_error( make_stancode(y ~ mo(x1), dat, prior = prior), "'dirichlet' is the only valid prior for simplex parameters" ) }) test_that("Stan code for non-linear models is correct", { flist <- list(a ~ x, b ~ z + (1|g)) data <- data.frame( y = rgamma(9, 1, 1), x = rnorm(9), z = rnorm(9), v = 1L:9L, g = rep(1:3, 3) ) prior <- c(set_prior("normal(0,5)", nlpar = "a"), set_prior("normal(0,1)", nlpar = "b")) # syntactic validity is already checked within make_stancode scode <- make_stancode( bf(y ~ a - exp(b^z) * (z <= a) * v, flist = flist, nl = TRUE), data = data, prior = prior ) expect_match2(scode, "mu[n] = nlp_a[n] - exp(nlp_b[n] ^ C_1[n]) * (C_1[n] <= nlp_a[n]) * C_2[n];" ) expect_match2(scode, "vector[N] C_1;") expect_match2(scode, "int C_2[N];") # non-linear predictor can be computed outside a loop scode <- make_stancode(bf(y ~ a - exp(b + z), flist = flist, nl = TRUE, loop = FALSE), data = data, prior = prior) expect_match2(scode, "mu = nlp_a - exp(nlp_b + C_1);") # check if that only works with threading scode <- make_stancode(bf(y ~ a - exp(b + z), flist = flist, nl = TRUE, loop = FALSE), data = data, prior = prior, threads = threading(2), parse = FALSE) expect_match2(scode, "mu = nlp_a - exp(nlp_b + C_1[start:end]);") flist <- list(a1 ~ 1, a2 ~ z + (x|g)) prior <- c(set_prior("beta(1,1)", nlpar = "a1", lb = 0, ub = 1), set_prior("normal(0,1)", nlpar = "a2")) scode <- make_stancode( bf(y ~ a1 * exp(-x/(a2 + z)), flist = flist, nl = TRUE), data = data, family = Gamma("log"), prior = prior ) expect_match2(scode, paste("mu[n] = shape * exp(-(nlp_a1[n] *", "exp( - C_1[n] / (nlp_a2[n] + C_2[n]))));")) bform <- bf(y ~ x) + nlf(sigma ~ a1 * exp(-x/(a2 + z))) + lf(a1 ~ 1, a2 ~ z + (x|g)) + lf(alpha ~ x) scode <- make_stancode( bform, data, family = skew_normal(), prior = c( prior(normal(0, 1), nlpar = a1), prior(normal(0, 5), nlpar = a2) ) ) expect_match2(scode, "nlp_a1 = X_a1 * b_a1") expect_match2(scode, "sigma[n] = exp(nlp_a1[n] * exp( - C_sigma_1[n] / (nlp_a2[n] + C_sigma_2[n])))" ) expect_match2(scode, "target += normal_lpdf(b_a2 | 0, 5)") expect_error(make_stancode(bform, data, family = skew_normal()), "Priors on population-level coefficients are required") }) test_that("Stan code for nested non-linear parameters is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = 1:5) bform <- bf( y ~ lb + (1 - lb) * inv_logit(b * x), b + a ~ 1 + (1 | z), nlf(lb ~ inv_logit(a / x)), nl = TRUE ) bprior <- prior(normal(0, 1), nlpar = "a") + prior(normal(0, 1), nlpar = "b") scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "nlp_lb[n] = inv_logit(nlp_a[n] / C_lb_1[n]);") expect_match2(scode, "mu[n] = nlp_lb[n] + (1 - nlp_lb[n]) * inv_logit(nlp_b[n] * C_1[n]);" ) }) test_that("make_stancode accepts very long non-linear formulas", { data <- data.frame(y = rnorm(10), this_is_a_very_long_predictor = rnorm(10)) expect_silent(make_stancode(bf(y ~ b0 + this_is_a_very_long_predictor + this_is_a_very_long_predictor + this_is_a_very_long_predictor, b0 ~ 1, nl = TRUE), data = data, prior = prior(normal(0,1), nlpar = "b0"))) }) test_that("no loop in trans-par is defined for simple 'identity' models", { expect_true(!grepl(make_stancode(time ~ age, data = kidney), "mu[n] = (mu[n]);", fixed = TRUE)) expect_true(!grepl(make_stancode(time ~ age, data = kidney, family = poisson("identity")), "mu[n] = (mu[n]);", fixed = TRUE)) }) test_that("known standard errors appear in the Stan code", { scode <- make_stancode(time | se(age) ~ sex, data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, se)") scode <- make_stancode(time | se(age) + weights(age) ~ sex, data = kidney) expect_match2(scode, "target += weights[n] * (normal_lpdf(Y[n] | mu[n], se[n]))") scode <- make_stancode(time | se(age, sigma = TRUE) ~ sex, data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, sqrt(square(sigma) + se2))") scode <- make_stancode(bf(time | se(age, sigma = TRUE) ~ sex, sigma ~ sex), data = kidney) expect_match2(scode, "target += normal_lpdf(Y | mu, sqrt(square(sigma) + se2))") }) test_that("functions defined in 'stan_funs' appear in the functions block", { test_fun <- paste0(" real test_fun(real a, real b) {\n", " return a + b;\n", " }\n") scode <- SW(make_stancode(time ~ age, data = kidney, stan_funs = test_fun)) expect_match2(scode, test_fun) }) test_that("FCOR matrices appear in the Stan code", { data <- data.frame(y = 1:5) V <- diag(5) expect_match2(make_stancode(y ~ fcor(V), data = data, family = gaussian(), data2 = list(V = V)), "target += normal_fcor_hom_lpdf(Y | mu, sigma, Lfcor);") expect_match2(make_stancode(y ~ fcor(V), data = data, family = student(), data2 = list(V = V)), "target += student_t_fcor_hom_lpdf(Y | nu, mu, sigma, Lfcor);") }) test_that("Stan code for GAMMs is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10), g = factor(rep(1:2, 5))) scode <- make_stancode(y ~ s(x) + (1|g), data = dat, prior = set_prior("normal(0,2)", "sds")) expect_match2(scode, "Zs_1_1 * s_1_1") expect_match2(scode, "matrix[N, knots_1[1]] Zs_1_1") expect_match2(scode, "target += std_normal_lpdf(zs_1_1)") expect_match2(scode, "target += normal_lpdf(sds_1_1 | 0,2)") prior <- c(set_prior("normal(0,5)", nlpar = "lp"), set_prior("normal(0,2)", "sds", nlpar = "lp")) scode <- make_stancode(bf(y ~ lp, lp ~ s(x) + (1|g), nl = TRUE), data = dat, prior = prior) expect_match2(scode, "Zs_lp_1_1 * s_lp_1_1") expect_match2(scode, "matrix[N, knots_lp_1[1]] Zs_lp_1_1") expect_match2(scode, "target += std_normal_lpdf(zs_lp_1_1)") expect_match2(scode, "target += normal_lpdf(sds_lp_1_1 | 0,2)") scode <- make_stancode( y ~ s(x) + t2(x,y), data = dat, prior = set_prior("normal(0,1)", "sds") + set_prior("normal(0,2)", "sds", coef = "t2(x, y)") ) expect_match2(scode, "Zs_2_2 * s_2_2") expect_match2(scode, "matrix[N, knots_2[2]] Zs_2_2") expect_match2(scode, "target += std_normal_lpdf(zs_2_2)") expect_match2(scode, "target += normal_lpdf(sds_1_1 | 0,1)") expect_match2(scode, "target += normal_lpdf(sds_2_2 | 0,2)") scode <- make_stancode(y ~ g + s(x, by = g), data = dat) expect_match2(scode, "vector[knots_2[1]] zs_2_1") expect_match2(scode, "s_2_1 = sds_2_1 * zs_2_1") }) test_that("Stan code of response times models is correct", { dat <- epilepsy dat$cens <- sample(-1:1, nrow(dat), TRUE) scode <- make_stancode(count ~ Trt + (1|patient), data = dat, family = exgaussian("log"), prior = prior(gamma(1,1), class = beta)) expect_match2(scode, "target += exp_mod_normal_lpdf(Y | mu - beta, sigma, inv(beta))" ) expect_match2(scode, "mu[n] = exp(mu[n])") expect_match2(scode, "target += gamma_lpdf(beta | 1, 1)") scode <- make_stancode(bf(count ~ Trt + (1|patient), sigma ~ Trt, beta ~ Trt), data = dat, family = exgaussian()) expect_match2(scode, "target += exp_mod_normal_lpdf(Y | mu - beta, sigma, inv(beta))" ) expect_match2(scode, "beta[n] = exp(beta[n])") scode <- make_stancode(count | cens(cens) ~ Trt + (1|patient), data = dat, family = exgaussian("inverse")) expect_match2(scode, "exp_mod_normal_lccdf(Y[n] | mu[n] - beta, sigma, inv(beta))") scode <- make_stancode(count ~ Trt, dat, family = shifted_lognormal()) expect_match2(scode, "target += lognormal_lpdf(Y - ndt | mu, sigma)") scode <- make_stancode(count | cens(cens) ~ Trt, dat, family = shifted_lognormal()) expect_match2(scode, "target += lognormal_lcdf(Y[n] - ndt | mu[n], sigma)") # test issue #837 scode <- make_stancode(mvbind(count, zBase) ~ Trt, data = dat, family = shifted_lognormal()) expect_match2(scode, "target += uniform_lpdf(ndt_count | 0, min_Y_count)") expect_match2(scode, "target += uniform_lpdf(ndt_zBase | 0, min_Y_zBase)") }) test_that("Stan code of wiener diffusion models is correct", { dat <- data.frame(q = 1:10, resp = sample(0:1, 10, TRUE), x = rnorm(10)) scode <- make_stancode(q | dec(resp) ~ x, data = dat, family = wiener()) expect_match2(scode, "target += wiener_diffusion_lpdf(Y[n] | dec[n], bs, ndt, bias, mu[n])" ) scode <- make_stancode(bf(q | dec(resp) ~ x, bs ~ x, ndt ~ x, bias ~ x), data = dat, family = wiener()) expect_match2(scode, "target += wiener_diffusion_lpdf(Y[n] | dec[n], bs[n], ndt[n], bias[n], mu[n])" ) expect_match2(scode, "bias[n] = inv_logit(bias[n]);") scode <- make_stancode(bf(q | dec(resp) ~ x, ndt = 0.5), data = dat, family = wiener()) expect_match2(scode, "real ndt = 0.5;") expect_error(make_stancode(q ~ x, data = dat, family = wiener()), "Addition argument 'dec' is required for family 'wiener'") }) test_that("Group IDs appear in the Stan code", { form <- bf(count ~ Trt + (1+Trt|3|visit) + (1|patient), shape ~ (1|3|visit) + (Trt||patient)) scode <- make_stancode(form, data = epilepsy, family = negbinomial()) expect_match2(scode, "r_2_1 = r_2[, 1]") expect_match2(scode, "r_2_shape_3 = r_2[, 3]") form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt||patient), a ~ Trt + (1+Trt|3|visit) + (1|patient), nl = TRUE) scode <- make_stancode(form, data = epilepsy, family = student(), prior = set_prior("normal(0,5)", nlpar = "a")) expect_match2(scode, "r_2_a_2 = r_2[, 2];") expect_match2(scode, "r_1_sigma_2 = (sd_1[2] * (z_1[2]));") }) test_that("distributional gamma models are handled correctly", { # test fix of issue #124 scode <- make_stancode( bf(time ~ age * sex + disease + (1|patient), shape ~ age + (1|patient)), data = kidney, family = Gamma("log") ) expect_match(scode, paste0( brms:::escape_all("shape[n] = exp(shape[n]);"), ".+", brms:::escape_all("mu[n] = shape[n] * exp(-(mu[n]));") )) scode <- make_stancode( bf(time ~ inv_logit(a) * exp(b * age), a + b ~ sex + (1|patient), nl = TRUE, shape ~ age + (1|patient)), data = kidney, family = Gamma("identity"), prior = c(set_prior("normal(2,2)", nlpar = "a"), set_prior("normal(0,3)", nlpar = "b")) ) expect_match(scode, paste0( brms:::escape_all("shape[n] = exp(shape[n]);"), ".+", brms:::escape_all("mu[n] = shape[n] / (inv_logit(nlp_a[n]) * exp(nlp_b[n] * C_1[n]));") )) }) test_that("weighted, censored, and truncated likelihoods are correct", { dat <- data.frame(y = 1:9, x = rep(-1:1, 3), y2 = 10:18) scode <- make_stancode(y | weights(y2) ~ 1, dat, poisson()) expect_match2(scode, "target += weights[n] * (poisson_log_lpmf(Y[n] | mu[n]));") scode <- make_stancode(y | trials(y2) + weights(y2) ~ 1, dat, binomial()) expect_match2(scode, "target += weights[n] * (binomial_logit_lpmf(Y[n] | trials[n], mu[n]));" ) scode <- make_stancode(y | cens(x, y2) ~ 1, dat, poisson()) expect_match2(scode, "target += poisson_lpmf(Y[n] | mu[n]);") scode <- make_stancode(y | cens(x) ~ 1, dat, weibull()) expect_match2(scode, "target += weibull_lccdf(Y[n] | shape, mu[n]);") dat$x[1] <- 2 scode <- make_stancode(y | cens(x, y2) ~ 1, dat, gaussian()) expect_match2(scode, paste0( "target += log_diff_exp(\n", " normal_lcdf(rcens[n] | mu[n], sigma)," )) dat$x <- 1 expect_match2(make_stancode(y | cens(x) + weights(x) ~ 1, dat, weibull()), "target += weights[n] * weibull_lccdf(Y[n] | shape, mu[n]);") scode <- make_stancode(y | cens(x) + trunc(0.1) ~ 1, dat, weibull()) expect_match2(scode, "target += weibull_lccdf(Y[n] | shape, mu[n]) -") expect_match2(scode, " weibull_lccdf(lb[n] | shape, mu[n]);") scode <- make_stancode(y | cens(x) + trunc(ub = 30) ~ 1, dat) expect_match2(scode, "target += normal_lccdf(Y[n] | mu[n], sigma) -") expect_match2(scode, " normal_lcdf(ub[n] | mu[n], sigma);") scode <- make_stancode(y | weights(x) + trunc(0, 30) ~ 1, dat) expect_match2(scode, "target += weights[n] * (normal_lpdf(Y[n] | mu[n], sigma) -") expect_match2(scode, " log_diff_exp(normal_lcdf(ub[n] | mu[n], sigma),") }) test_that("noise-free terms appear in the Stan code", { N <- 30 dat <- data.frame( y = rnorm(N), x = rnorm(N), z = rnorm(N), xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)), ID = rep(1:5, each = N / 5) ) me_prior <- prior(normal(0,5)) + prior(normal(0, 10), "meanme") + prior(cauchy(0, 5), "sdme", coef = "mez") + prior(lkj(2), "corme") scode <- make_stancode( y ~ me(x, xsd)*me(z, zsd)*x, data = dat, prior = me_prior, sample_prior = "yes" ) expect_match2(scode, "(bsp[1]) * Xme_1[n] + (bsp[2]) * Xme_2[n] + (bsp[3]) * Xme_1[n] * Xme_2[n]" ) expect_match2(scode, "(bsp[6]) * Xme_1[n] * Xme_2[n] * Csp_3[n]") expect_match2(scode, "target += normal_lpdf(Xn_2 | Xme_2, noise_2)") expect_match2(scode, "target += normal_lpdf(bsp | 0, 5)") expect_match2(scode, "target += std_normal_lpdf(to_vector(zme_1))") expect_match2(scode, "target += normal_lpdf(meanme_1 | 0, 10)") expect_match2(scode, "target += cauchy_lpdf(sdme_1[2] | 0, 5)") expect_match2(scode, "target += lkj_corr_cholesky_lpdf(Lme_1 | 2)") expect_match2(scode, "+ transpose(diag_pre_multiply(sdme_1, Lme_1) * zme_1)") expect_match2(scode, "corme_1[choose(k - 1, 2) + j] = Corme_1[j, k];") scode <- make_stancode( y ~ me(x, xsd)*z + (me(x, xsd)*z | ID), data = dat ) expect_match2(scode, "(bsp[1] + r_1_3[J_1[n]]) * Xme_1[n]") expect_match2(scode, "(bsp[2] + r_1_4[J_1[n]]) * Xme_1[n] * Csp_1[n]") expect_match2(make_stancode(y ~ I(me(x, xsd)^2), data = dat), "(bsp[1]) * (Xme_1[n]^2)") # test that noise-free variables are unique across model parts scode <- make_stancode( bf(y ~ me(x, xsd)*me(z, zsd)*x, sigma ~ me(x, xsd)), data = dat, prior = prior(normal(0,5)) ) expect_match2(scode, "mu[n] += (bsp[1]) * Xme_1[n]") expect_match2(scode, "sigma[n] += (bsp_sigma[1]) * Xme_1[n]") scode <- make_stancode( bf(y ~ a * b, a + b ~ me(x, xsd), nl = TRUE), data = dat, prior = prior(normal(0,5), nlpar = a) + prior(normal(0, 5), nlpar = b) ) expect_match2(scode, "nlp_a[n] += (bsp_a[1]) * Xme_1[n]") expect_match2(scode, "nlp_b[n] += (bsp_b[1]) * Xme_1[n]") bform <- bf(mvbind(y, z) ~ me(x, xsd)) + set_rescor(TRUE) + set_mecor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "mu_y[n] += (bsp_y[1]) * Xme_1[n]") expect_match2(scode, "mu_z[n] += (bsp_z[1]) * Xme_1[n]") expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") # noise-free terms with grouping factors bform <- bf(y ~ me(x, xsd, ID) + me(z, xsd) + (me(x, xsd, ID) | ID)) scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nme_1] Xn_1;") expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") expect_match2(scode, "Xme_2 = meanme_2[1] + sdme_2[1] * zme_2;") expect_match2(scode, "(bsp[1] + r_1_2[J_1[n]]) * Xme_1[Jme_1[n]]") bform <- bform + set_mecor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "Xme_1 = meanme_1[1] + sdme_1[1] * zme_1;") }) test_that("Stan code of multi-membership models is correct", { dat <- data.frame(y = rnorm(10), g1 = sample(1:10, 10, TRUE), g2 = sample(1:10, 10, TRUE), w1 = rep(1, 10), w2 = rep(abs(rnorm(10)))) expect_match2(make_stancode(y ~ (1|mm(g1, g2)), data = dat), paste0(" W_1_1[n] * r_1_1[J_1_1[n]] * Z_1_1_1[n]", " + W_1_2[n] * r_1_1[J_1_2[n]] * Z_1_1_2[n]") ) expect_match2(make_stancode(y ~ (1+w1|mm(g1,g2)), data = dat), paste0(" W_1_1[n] * r_1_2[J_1_1[n]] * Z_1_2_1[n]", " + W_1_2[n] * r_1_2[J_1_2[n]] * Z_1_2_2[n]") ) expect_match2(make_stancode(y ~ (1+mmc(w1, w2)|mm(g1,g2)), data = dat), " W_1_2[n] * r_1_2[J_1_2[n]] * Z_1_2_2[n];" ) }) test_that("by variables in grouping terms are handled correctly", { dat <- data.frame( y = rnorm(100), x = rnorm(100), g = rep(1:10, each = 10), z = factor(rep(c(0, 4.5, 3, 2, 5), each = 20)) ) scode <- make_stancode(y ~ x + (1 | gr(g, by = z)), dat) expect_match2(scode, "r_1_1 = (transpose(sd_1[1, Jby_1]) .* (z_1[1]));") scode <- make_stancode(y ~ x + (x | gr(g, by = z)), dat) expect_match2(scode, "r_1 = scale_r_cor_by(z_1, sd_1, L_1, Jby_1);") expect_match2(scode, "target += student_t_lpdf(to_vector(sd_1) | 3, 0, 2.5);") expect_match2(scode, "target += lkj_corr_cholesky_lpdf(L_1[5] | 1);") }) test_that("Group syntax | and || is handled correctly,", { data <- data.frame(y = rnorm(10), x = rnorm(10), g1 = rep(1:5, each = 2), g2 = rep(1:2, 5)) scode <- make_stancode(y ~ x + (1+x||g1) + (I(x/4)|g2), data) expect_match2(scode, "r_1_2 = (sd_1[2] * (z_1[2]));") expect_match2(scode, "r_2_1 = r_2[, 1];") expect_match2(scode, "r_2 = scale_r_cor(z_2, sd_2, L_2);") }) test_that("predicting zi and hu works correctly", { scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = "zero_inflated_poisson") expect_match2(scode, "target += zero_inflated_poisson_log_logit_lpmf(Y[n] | mu[n], zi[n])" ) expect_true(!grepl("inv_logit\\(", scode)) expect_true(!grepl("exp(mu[n])", scode, fixed = TRUE)) scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = zero_inflated_poisson(identity)) expect_match2(scode, "target += zero_inflated_poisson_logit_lpmf(Y[n] | mu[n], zi[n])" ) scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = "zero_inflated_binomial") expect_match2(scode, "target += zero_inflated_binomial_blogit_logit_lpmf(Y[n] | trials[n], mu[n], zi[n])" ) expect_true(!grepl("inv_logit\\(", scode)) fam <- zero_inflated_binomial("probit", link_zi = "identity") scode <- make_stancode(bf(count ~ Trt, zi ~ Trt), epilepsy, family = fam) expect_match2(scode, "target += zero_inflated_binomial_lpmf(Y[n] | trials[n], mu[n], zi[n])" ) expect_match2(scode, "mu[n] = Phi(mu[n]);") scode <- make_stancode( bf(count ~ Trt, zi ~ Trt), epilepsy, family = zero_inflated_beta() ) expect_match2(scode, "target += zero_inflated_beta_logit_lpdf(Y[n] | mu[n], phi, zi[n])" ) scode <- make_stancode(bf(count ~ Trt, hu ~ Trt), epilepsy, family = "hurdle_negbinomial") expect_match2(scode, "target += hurdle_neg_binomial_log_logit_lpmf(Y[n] | mu[n], shape, hu[n])" ) expect_true(!grepl("inv_logit\\(", scode)) expect_true(!grepl("exp(mu[n])", scode, fixed = TRUE)) scode <- make_stancode(bf(count ~ Trt, hu ~ Trt), epilepsy, family = "hurdle_gamma") expect_match2(scode, "target += hurdle_gamma_logit_lpdf(Y[n] | shape, mu[n], hu[n])" ) expect_true(!grepl("inv_logit\\(", scode)) expect_match2(scode, "mu[n] = shape * exp(-(mu[n]));") scode <- make_stancode( bf(count ~ Trt, hu ~ Trt), epilepsy, family = hurdle_gamma(link_hu = "identity") ) expect_match2(scode, "target += hurdle_gamma_lpdf(Y[n] | shape, mu[n], hu[n])") expect_true(!grepl("inv_logit\\(", scode)) expect_match2(scode, "mu[n] = shape * exp(-(mu[n]));") }) test_that("fixing auxiliary parameters is possible", { scode <- make_stancode(bf(y ~ 1, sigma = 0.5), data = list(y = rnorm(10))) expect_match2(scode, "real sigma = 0.5;") }) test_that("Stan code of quantile regression models is correct", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x, data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lpdf(Y[n] | mu[n], sigma, quantile)") scode <- make_stancode(bf(y ~ x, quantile = 0.75), data, family = asym_laplace()) expect_match2(scode, "real quantile = 0.75;") scode <- make_stancode(y | cens(c) ~ x, data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lccdf(Y[n] | mu[n], sigma, quantile)") scode <- make_stancode(bf(y ~ x, sigma ~ x), data, family = asym_laplace()) expect_match2(scode, "target += asym_laplace_lpdf(Y[n] | mu[n], sigma[n], quantile)") scode <- make_stancode(bf(y ~ x, quantile = 0.75), data, family = brmsfamily("zero_inflated_asym_laplace")) expect_match2(scode, "target += zero_inflated_asym_laplace_lpdf(Y[n] | mu[n], sigma, quantile, zi)" ) }) test_that("Stan code of addition term 'rate' is correct", { data <- data.frame(y = rpois(10, 1), x = rnorm(10), time = 1:10) scode <- make_stancode(y | rate(time) ~ x, data, poisson()) expect_match2(scode, "target += poisson_log_lpmf(Y | mu + log_denom);") scode <- make_stancode(y | rate(time) ~ x, data, poisson("identity")) expect_match2(scode, "target += poisson_lpmf(Y | mu .* denom);") scode <- make_stancode(y | rate(time) ~ x, data, negbinomial()) expect_match2(scode, "target += neg_binomial_2_log_lpmf(Y | mu + log_denom, shape * denom);") scode <- make_stancode(y | rate(time) ~ x, data, brmsfamily("negbinomial2")) expect_match2(scode, "target += neg_binomial_2_log_lpmf(Y | mu + log_denom, inv(sigma) * denom);") scode <- make_stancode(y | rate(time) + cens(1) ~ x, data, geometric()) expect_match2(scode, "target += neg_binomial_2_lpmf(Y[n] | mu[n] * denom[n], 1 * denom[n]);") }) test_that("Stan code of GEV models is correct", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x, data, gen_extreme_value()) expect_match2(scode, "target += gen_extreme_value_lpdf(Y[n] | mu[n], sigma, xi)") expect_match2(scode, "xi = scale_xi(tmp_xi, Y, mu, sigma)") scode <- make_stancode(bf(y ~ x, sigma ~ x), data, gen_extreme_value()) expect_match2(scode, "xi = scale_xi_vector(tmp_xi, Y, mu, sigma)") scode <- make_stancode(bf(y ~ x, xi ~ x), data, gen_extreme_value()) expect_match2(scode, "xi[n] = expm1(xi[n])") scode <- make_stancode(bf(y ~ x, xi = 0), data, gen_extreme_value()) expect_match2(scode, "real xi = 0; // shape parameter") scode <- make_stancode(y | cens(c) ~ x, data, gen_extreme_value()) expect_match2(scode, "target += gen_extreme_value_lccdf(Y[n] | mu[n], sigma, xi)") }) test_that("Stan code of Cox models is correct", { data <- data.frame(y = rexp(100), ce = sample(0:1, 100, TRUE), x = rnorm(100)) bform <- bf(y | cens(ce) ~ x) scode <- make_stancode(bform, data, brmsfamily("cox")) expect_match2(scode, "target += cox_log_lpdf(Y[n] | mu[n], bhaz[n], cbhaz[n]);") expect_match2(scode, "vector[N] cbhaz = Zcbhaz * sbhaz;") expect_match2(scode, "target += dirichlet_lpdf(sbhaz | con_sbhaz);") expect_match2(scode, "simplex[Kbhaz] sbhaz;") scode <- make_stancode(bform, data, brmsfamily("cox", "identity")) expect_match2(scode, "target += cox_lccdf(Y[n] | mu[n], bhaz[n], cbhaz[n]);") }) test_that("offsets appear in the Stan code", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) scode <- make_stancode(y ~ x + offset(c), data) expect_match2(scode, "+ offsets;") scode <- make_stancode(bf(y ~ a, a ~ offset(log(c + 1)), nl = TRUE), data, prior = prior(normal(0,1), nlpar = a)) expect_match2(scode, "+ offsets_a;") }) test_that("prior only models are correctly checked", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) prior <- prior(normal(0, 5), b) + prior("", Intercept) expect_error(make_stancode(y ~ x, data, prior = prior, sample_prior = "only"), "Sampling from priors is not possible") prior <- prior(normal(0, 5), b) + prior(normal(0, 10), Intercept) scode <- make_stancode(y ~ x, data, prior = prior, sample_prior = "only") expect_match2(scode, "target += normal_lpdf(Intercept | 0, 10)") }) test_that("Stan code of mixture model is correct", { data <- data.frame(y = 1:10, x = rnorm(10), c = 1) scode <- make_stancode( bf(y ~ x, sigma2 ~ x), data, family = mixture(gaussian, gaussian), sample_prior = TRUE ) expect_match2(scode, "ordered[2] ordered_Intercept;") expect_match2(scode, "Intercept_mu2 = ordered_Intercept[2];") expect_match2(scode, "target += dirichlet_lpdf(theta | con_theta);") expect_match2(scode, "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], sigma1);") expect_match2(scode, "ps[2] = log(theta2) + normal_lpdf(Y[n] | mu2[n], sigma2[n]);") expect_match2(scode, "target += log_sum_exp(ps);") expect_match2(scode, "simplex[2] prior_theta = dirichlet_rng(con_theta);") data$z <- abs(data$y) scode <- make_stancode(bf(z | weights(c) ~ x, shape1 ~ x, theta1 = 1, theta2 = 2), data = data, mixture(Gamma("log"), weibull)) expect_match(scode, "data \\{[^\\}]*real theta1;") expect_match(scode, "data \\{[^\\}]*real theta2;") expect_match2(scode, "ps[1] = log(theta1) + gamma_lpdf(Y[n] | shape1[n], mu1[n]);") expect_match2(scode, "target += weights[n] * log_sum_exp(ps);") scode <- make_stancode(bf(abs(y) | se(c) ~ x), data = data, mixture(gaussian, student)) expect_match2(scode, "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], se[n]);") expect_match2(scode, "ps[2] = log(theta2) + student_t_lpdf(Y[n] | nu2, mu2[n], se[n]);") fam <- mixture(gaussian, student, exgaussian) scode <- make_stancode(bf(y ~ x), data = data, family = fam) expect_match(scode, "parameters \\{[^\\}]*real Intercept_mu3;") expect_match2(scode, "ps[2] = log(theta2) + student_t_lpdf(Y[n] | nu2, mu2[n], sigma2);" ) expect_match2(scode, "ps[3] = log(theta3) + exp_mod_normal_lpdf(Y[n] | mu3[n] - beta3, sigma3, inv(beta3));" ) scode <- make_stancode(bf(y ~ x, theta1 ~ x, theta3 ~ x), data = data, family = fam) expect_match2(scode, "log_sum_exp_theta = log(exp(theta1[n]) + exp(theta2[n]) + exp(theta3[n]));") expect_match2(scode, "theta2 = rep_vector(0.0, N);") expect_match2(scode, "theta3[n] = theta3[n] - log_sum_exp_theta;") expect_match2(scode, "ps[1] = theta1[n] + normal_lpdf(Y[n] | mu1[n], sigma1);") fam <- mixture(cumulative, sratio) scode <- make_stancode(y ~ x, data, family = fam) expect_match2(scode, "ordered_logistic_lpmf(Y[n] | mu1[n], Intercept_mu1);") expect_match2(scode, "sratio_logit_lpmf(Y[n] | mu2[n], disc2, Intercept_mu2);") # censored mixture model fam <- mixture(gaussian, gaussian) scode <- make_stancode(y | cens(2, y2 = 2) ~ x, data, fam) expect_match2(scode, "ps[2] = log(theta2) + normal_lccdf(Y[n] | mu2[n], sigma2);" ) expect_match2(scode, paste0( "ps[2] = log(theta2) + log_diff_exp(\n", " normal_lcdf(rcens[n] | mu2[n], sigma2)," )) # truncated mixture model scode <- make_stancode(y | trunc(3) ~ x, data, fam) expect_match2(scode, paste0( "ps[1] = log(theta1) + normal_lpdf(Y[n] | mu1[n], sigma1) -\n", " normal_lccdf(lb[n] | mu1[n], sigma1);" )) # non-linear mixture model bform <- bf(y ~ 1) + nlf(mu1 ~ eta^2) + nlf(mu2 ~ log(eta) + a) + lf(eta + a ~ x) + mixture(gaussian, nmix = 2) bprior <- prior(normal(0, 1), nlpar = "eta") + prior(normal(0, 1), nlpar = "a") scode <- make_stancode(bform, data = data, prior = bprior) expect_match2(scode, "mu1[n] = nlp_eta[n] ^ 2;") expect_match2(scode, "mu2[n] = log(nlp_eta[n]) + nlp_a[n];") }) test_that("sparse matrix multiplication is applied correctly", { data <- data.frame(y = rnorm(10), x = rnorm(10)) # linear model scode <- make_stancode( bf(y ~ x, sparse = TRUE) + lf(sigma ~ x, sparse = TRUE), data, prior = prior(normal(0, 5), coef = "Intercept") ) expect_match2(scode, "wX = csr_extract_w(X);") expect_match2(scode, "mu = csr_matrix_times_vector(rows(X), cols(X), wX, vX, uX, b);" ) expect_match2(scode, "uX_sigma[size(csr_extract_u(X_sigma))] = csr_extract_u(X_sigma);" ) expect_match2(scode, paste0( "sigma = csr_matrix_times_vector(rows(X_sigma), cols(X_sigma), ", "wX_sigma, vX_sigma, uX_sigma, b_sigma);" ) ) expect_match2(scode, "target += normal_lpdf(b[1] | 0, 5);") expect_match2(scode, "target += normal_lpdf(Y | mu, sigma);") # non-linear model scode <- make_stancode( bf(y ~ a, lf(a ~ x, sparse = TRUE), nl = TRUE), data, prior = prior(normal(0, 1), nlpar = a) ) expect_match2(scode, "vX_a[size(csr_extract_v(X_a))] = csr_extract_v(X_a);" ) expect_match2(scode, "nlp_a = csr_matrix_times_vector(rows(X_a), cols(X_a), wX_a, vX_a, uX_a, b_a);" ) }) test_that("QR decomposition is included in the Stan code", { data <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10)) bform <- bf(y ~ x1 + x2, decomp = "QR") + lf(sigma ~ 0 + x1 + x2, decomp = "QR") # simple priors scode <- make_stancode(bform, data, prior = prior(normal(0, 2))) expect_match2(scode, "XQ = qr_thin_Q(Xc) * sqrt(N - 1);") expect_match2(scode, "b = XR_inv * bQ;") expect_match2(scode, "target += normal_lpdf(bQ | 0, 2);") expect_match2(scode, "XQ * bQ") expect_match2(scode, "XR_sigma = qr_thin_R(X_sigma) / sqrt(N - 1);") # horseshoe prior scode <- make_stancode(bform, data, prior = prior(horseshoe(1))) expect_match2(scode, "target += std_normal_lpdf(zb);") expect_match2(scode, "bQ = horseshoe(") }) test_that("Stan code for Gaussian processes is correct", { set.seed(1234) dat <- data.frame(y = rnorm(40), x1 = rnorm(40), x2 = rnorm(40), z = factor(rep(3:6, each = 10))) prior <- prior(gamma(0.1, 0.1), sdgp) scode <- make_stancode(y ~ gp(x1) + gp(x2, by = x1, gr = FALSE), dat, prior = prior) expect_match2(scode, "target += inv_gamma_lpdf(lscale_1[1]") expect_match2(scode, "target += gamma_lpdf(sdgp_1 | 0.1, 0.1)") expect_match2(scode, "gp_pred_2 = gp(Xgp_2, sdgp_2[1], lscale_2[1], zgp_2);") expect_match2(scode, "Cgp_2 .* gp_pred_2;") prior <- prior + prior(normal(0, 1), lscale, coef = gpx1) scode <- make_stancode(y ~ gp(x1) + gp(x2, by = x1, gr = TRUE), data = dat, prior = prior) expect_match2(scode, "target += normal_lpdf(lscale_1[1][1] | 0, 1)") expect_match2(scode, "gp_pred_2 = gp(Xgp_2, sdgp_2[1], lscale_2[1], zgp_2);") expect_match2(scode, "+ Cgp_2 .* gp_pred_2[Jgp_2]") # non-isotropic GP scode <- make_stancode(y ~ gp(x1, x2, by = z, iso = FALSE), data = dat) expect_match2(scode, "target += inv_gamma_lpdf(lscale_1[1][2]") expect_match2(scode, "target += inv_gamma_lpdf(lscale_1[4][2]") # Suppress Stan parser warnings that can currently not be avoided scode <- make_stancode(y ~ gp(x1, x2) + gp(x1, by = z, gr = FALSE), dat, silent = TRUE) expect_match2(scode, "gp(Xgp_1, sdgp_1[1], lscale_1[1], zgp_1)") expect_match2(scode, "mu[Igp_2_2] += Cgp_2_2 .* gp_pred_2_2;") # approximate GPS scode <- make_stancode( y ~ gp(x1, k = 10, c = 5/4) + gp(x2, by = x1, k = 10, c = 5/4), data = dat ) expect_match2(scode, "target += inv_gamma_lpdf(lscale_1") expect_match2(scode, "rgp_1 = sqrt(spd_cov_exp_quad(slambda_1, sdgp_1[1], lscale_1[1])) .* zgp_1;" ) expect_match2(scode, "Cgp_2 .* gp_pred_2[Jgp_2]") prior <- c(prior(normal(0, 10), lscale, coef = gpx1, nlpar = a), prior(gamma(0.1, 0.1), sdgp, nlpar = a), prior(normal(0, 1), b, nlpar = a)) scode <- make_stancode(bf(y ~ a, a ~ gp(x1), nl = TRUE), data = dat, prior = prior) expect_match2(scode, "target += normal_lpdf(lscale_a_1[1][1] | 0, 10)") expect_match2(scode, "target += gamma_lpdf(sdgp_a_1 | 0.1, 0.1)") expect_match2(scode, "gp(Xgp_a_1, sdgp_a_1[1], lscale_a_1[1], zgp_a_1)") prior <- prior(gamma(2, 2), lscale, coef = gpx1z5, nlpar = "a") scode <- make_stancode(bf(y ~ a, a ~ gp(x1, by = z, gr = TRUE), nl = TRUE), data = dat, prior = prior, silent = TRUE) expect_match2(scode, "nlp_a[Igp_a_1_1] += Cgp_a_1_1 .* gp_pred_a_1_1[Jgp_a_1_1];" ) expect_match2(scode, "gp(Xgp_a_1_3, sdgp_a_1[3], lscale_a_1[3], zgp_a_1_3)") expect_match2(scode, "target += gamma_lpdf(lscale_a_1[3][1] | 2, 2);") expect_match2(scode, "target += std_normal_lpdf(zgp_a_1_3);") # test warnings prior <- prior(normal(0, 1), lscale) expect_warning( make_stancode(y ~ gp(x1), data = dat, prior = prior), "The global prior 'normal(0, 1)' of class 'lscale' will not be used", fixed = TRUE ) }) test_that("Stan code for SAR models is correct", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) W <- matrix(0, nrow = 10, ncol = 10) dat2 <- list(W = W) scode <- make_stancode( y ~ x + sar(W), data = dat, prior = prior(normal(0.5, 1), lagsar), data2 = dat2 ) expect_match2(scode, "target += normal_lagsar_lpdf(Y | mu, sigma, lagsar, Msar, eigenMsar)" ) expect_match2(scode, "target += normal_lpdf(lagsar | 0.5, 1)") scode <- make_stancode( y ~ x + sar(W, type = "lag"), data = dat, family = student(), data2 = dat2 ) expect_match2(scode, "target += student_t_lagsar_lpdf(Y | nu, mu, sigma, lagsar, Msar, eigenMsar)" ) scode <- make_stancode(y ~ x + sar(W, type = "error"), data = dat, data2 = dat2) expect_match2(scode, "target += normal_errorsar_lpdf(Y | mu, sigma, errorsar, Msar, eigenMsar)" ) scode <- make_stancode( y ~ x + sar(W, "error"), data = dat, family = student(), prior = prior(beta(2, 3), errorsar), data2 = dat2 ) expect_match2(scode, "target += student_t_errorsar_lpdf(Y | nu, mu, sigma, errorsar, Msar, eigenMsar)" ) expect_match2(scode, "target += beta_lpdf(errorsar | 2, 3)") expect_error( make_stancode(bf(y ~ sar(W), sigma ~ x), data = dat), "SAR models are not implemented when predicting 'sigma'" ) }) test_that("Stan code for CAR models is correct", { dat = data.frame(y = rnorm(10), x = rnorm(10)) edges <- cbind(1:10, 10:1) W <- matrix(0, nrow = 10, ncol = 10) for (i in seq_len(nrow(edges))) { W[edges[i, 1], edges[i, 2]] <- 1 } rownames(W) <- seq_len(nrow(W)) dat2 <- list(W = W) scode <- make_stancode(y ~ x + car(W), dat, data2 = dat2) expect_match2(scode, "real sparse_car_lpdf(vector phi") expect_match2(scode, "target += sparse_car_lpdf(") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") scode <- make_stancode(y ~ x + car(W, type = "esicar"), dat, data2 = dat2) expect_match2(scode, "real sparse_icar_lpdf(vector phi") expect_match2(scode, "target += sparse_icar_lpdf(") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "rcar[Nloc] = - sum(zcar)") scode <- make_stancode(y ~ x + car(W, type = "icar"), dat, data2 = dat2) expect_match2(scode, "target += -0.5 * dot_self(zcar[edges1] - zcar[edges2])") expect_match2(scode, "target += normal_lpdf(sum(zcar) | 0, 0.001 * Nloc)") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "rcar = zcar * sdcar") scode <- make_stancode(y ~ x + car(W, type = "bym2"), dat, data2 = dat2) expect_match2(scode, "target += -0.5 * dot_self(zcar[edges1] - zcar[edges2])") expect_match2(scode, "target += normal_lpdf(sum(zcar) | 0, 0.001 * Nloc)") expect_match2(scode, "mu[n] += rcar[Jloc[n]]") expect_match2(scode, "target += beta_lpdf(rhocar | 1, 1)") expect_match2(scode, paste0( "rcar = (sqrt(1 - rhocar) * nszcar + ", "sqrt(rhocar * inv(car_scale)) * zcar) * sdcar" )) # apply a CAR term on a distributional parameter other than 'mu' scode <- make_stancode(bf(y ~ x, sigma ~ car(W)), dat, data2 = dat2) expect_match2(scode, "real sparse_car_lpdf(vector phi") expect_match2(scode, "target += sparse_car_lpdf(") expect_match2(scode, "sigma[n] += rcar_sigma[Jloc_sigma[n]]") }) test_that("Stan code for skew_normal models is correct", { dat = data.frame(y = rnorm(10), x = rnorm(10)) scode <- make_stancode(y ~ x, dat, skew_normal()) expect_match2(scode, "delta = alpha / sqrt(1 + alpha^2);") expect_match2(scode, "omega = sigma / sqrt(1 - sqrt(2 / pi())^2 * delta^2);") expect_match2(scode, "mu[n] = mu[n] - omega * delta * sqrt(2 / pi());") scode <- make_stancode(bf(y ~ x, sigma ~ x), dat, skew_normal()) expect_match2(scode, "omega[n] = sigma[n] / sqrt(1 - sqrt(2 / pi())^2 * delta^2);") expect_match2(scode, "mu[n] = mu[n] - omega[n] * delta * sqrt(2 / pi());") scode <- make_stancode(bf(y | se(x) ~ x, alpha ~ x), dat, skew_normal()) expect_match2(scode, "delta[n] = alpha[n] / sqrt(1 + alpha[n]^2);") expect_match2(scode, "omega[n] = se[n] / sqrt(1 - sqrt(2 / pi())^2 * delta[n]^2);") expect_match2(scode, "mu[n] = mu[n] - omega[n] * delta[n] * sqrt(2 / pi());") scode <- make_stancode(y ~ x, dat, mixture(skew_normal, nmix = 2)) expect_match2(scode, "omega1 = sigma1 / sqrt(1 - sqrt(2 / pi())^2 * delta1^2);") expect_match2(scode, "mu2[n] = mu2[n] - omega2 * delta2 * sqrt(2 / pi());") }) test_that("Stan code for missing value terms works correctly", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10, z = 1) dat$x[c(1, 3, 9)] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi() ~ g) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "Yl_x[Jmi_x] = Ymi_x;") expect_match2(scode, "(bsp_y[1]) * Yl_x[n] + (bsp_y[2]) * Yl_x[n] * Csp_y_1[n];") expect_match2(scode, "target += normal_lpdf(Yl_x | mu_x, sigma_x);") bform <- bf(y ~ mi(x) + (mi(x) | g)) + bf(x | mi() ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[1] + r_1_y_2[J_1_y[n]]) * Yl_x[n] + r_1_y_1[J_1_y[n]] * Z_1_y_1[n];" ) bform <- bf(y ~ a, a ~ mi(x), nl = TRUE) + bf(x | mi() ~ 1) + set_rescor(FALSE) bprior <- prior(normal(0, 1), nlpar = "a", resp = "y") scode <- make_stancode(bform, dat, prior = bprior) expect_match2(scode, "nlp_y_a[n] += (bsp_y_a[1]) * Yl_x[n];") expect_match2(scode, "target += normal_lpdf(bsp_y_a | 0, 1);") bform <- bf(y ~ mi(x)*mo(g)) + bf(x | mi() ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[3]) * Yl_x[n] * mo(simo_y_2, Xmo_y_2[n]);") bform <- bf(y ~ 1, sigma ~ 1) + bf(x | mi() ~ 1) + set_rescor(TRUE) scode <- make_stancode(bform, dat) expect_match2(scode, "Yl[n][2] = Yl_x[n];") expect_match2(scode, "sigma[n] = transpose([sigma_y[n], sigma_x]);") expect_match2(scode, "LSigma[n] = diag_pre_multiply(sigma[n], Lrescor);") bform <- bf(x | mi() ~ y, family = "lognormal") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi] Ymi;") bform <- bf(y ~ I(log(mi(x))) * g) + bf(x | mi() + trunc(lb = 1) ~ y, family = "lognormal") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi_x] Ymi_x;") expect_match2(scode, "(bsp_y[1]) * (log(Yl_x[n])) + (bsp_y[2]) * (log(Yl_x[n])) * Csp_y_1[n]" ) bform <- bf(y ~ mi(x)*g) + bf(x | mi() + cens(z) ~ y, family = "beta") scode <- make_stancode(bform, dat) expect_match2(scode, "vector[Nmi_x] Ymi_x;") expect_match2(scode, "target += beta_lpdf(Yl_x[n] | mu_x[n] * phi_x, (1 - mu_x[n]) * phi_x);" ) bform <- bf(y | mi() ~ mi(x), shape ~ mi(x), family=weibull()) + bf(x| mi() ~ z, family=gaussian()) + set_rescor(FALSE) scode <- make_stancode(bform, data = dat) expect_match2(scode, "mu_y[n] = exp(mu_y[n]) / tgamma(1 + 1 / shape_y[n]);") expect_match2(scode, "shape_y[n] += (bsp_shape_y[1]) * Yl_x[n];") }) test_that("Stan code for overimputation works correctly", { dat = data.frame(y = rnorm(10), x_x = rnorm(10), g = 1:10, z = 1) dat$x[c(1, 3, 9)] <- NA bform <- bf(y ~ mi(x_x)*g) + bf(x_x | mi(g) ~ 1) + set_rescor(FALSE) scode <- make_stancode(bform, dat, sample_prior = "yes") expect_match2(scode, "target += normal_lpdf(Yl_xx | mu_xx, sigma_xx)") expect_match2(scode, "target += normal_lpdf(Y_xx[Jme_xx] | Yl_xx[Jme_xx], noise_xx[Jme_xx])" ) expect_match2(scode, "vector[N_xx] Yl_xx;") }) test_that("Missing value terms can be combined with 'subset'", { dat <- data.frame( y = rnorm(10), x = c(rnorm(9), NA), z = rnorm(10), g2 = 10:1, g1 = sample(1:5, 10, TRUE), s = c(FALSE, rep(TRUE, 9)) ) bform <- bf(y ~ mi(x, idx = g1)*mi(z)) + bf(x | mi() + index(g2) + subset(s) ~ 1) + bf(z | mi() ~ s) + set_rescor(FALSE) scode <- make_stancode(bform, dat) expect_match2(scode, "(bsp_y[1]) * Yl_x[idxl_y_x_1[n]]") expect_match2(scode, "(bsp_y[2]) * Yl_z[n]") expect_match2(scode, "(bsp_y[3]) * Yl_x[idxl_y_x_1[n]] * Yl_z[n]") expect_match2(scode, "int idxl_y_x_1[N_y];") }) test_that("Stan code for advanced count data distribution is correct", { scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = brmsfamily("discrete_weibull") ) expect_match2(scode, "mu[n] = inv_logit(mu[n]);") expect_match2(scode, "target += discrete_weibull_lpmf(Y[n] | mu[n], shape);") scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = brmsfamily("com_poisson") ) expect_match2(scode, "target += com_poisson_log_lpmf(Y[n] | mu[n], shape);") }) test_that("argument 'stanvars' is handled correctly", { bprior <- prior(normal(mean_intercept, 10), class = "Intercept") mean_intercept <- 5 stanvars <- stanvar(mean_intercept) scode <- make_stancode(count ~ Trt, data = epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "real mean_intercept;") # define a multi_normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = "vector[K] M;") + stanvar(diag(2), "V", scode = "matrix[K, K] V;") scode <- make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "vector[K] M;") expect_match2(scode, "matrix[K, K] V;") # define a hierarchical prior on the regression coefficients bprior <- set_prior("normal(0, tau)", class = "b") + set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) stanvars <- stanvar(scode = "real tau;", block = "parameters") scode <- make_stancode(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_match2(scode, "real tau;") expect_match2(scode, "target += normal_lpdf(b | 0, tau);") # ensure that variables are passed to the likelihood of a threaded model foo <- 0.5 stanvars <- stanvar(foo) + stanvar(scode = "real tau;", block = "parameters", pll_args = "real tau") scode <- make_stancode(count ~ 1, data = epilepsy, family = poisson(), stanvars = stanvars, threads = threading(2), parse = FALSE) expect_match2(scode, "partial_log_lik_lpmf(int[] seq, int start, int end, data int[] Y, real Intercept, data real foo, real tau)" ) expect_match2(scode, "reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y, Intercept, foo, tau)" ) # specify Stan code in the likelihood part of the model block stanvars <- stanvar(scode = "mu += 1.0;", block = "likelihood", position = "start") scode <- make_stancode(count ~ Trt + (1|patient), data = epilepsy, stanvars = stanvars) expect_match2(scode, "mu += 1.0;") stanvars <- stanvar(scode = "mu += 1.0;", block = "likelihood", position = "start") scode <- make_stancode(count ~ Trt + (1|patient), data = epilepsy, stanvars = stanvars, threads = 2, parse = FALSE) expect_match2(scode, "mu += 1.0;") # add transformation at the end of a block stanvars <- stanvar(scode = "r_1_1 = r_1_1 * 2;", block = "tparameters", position = "end") scode <- make_stancode(count ~ Trt + (1 | patient), epilepsy, stanvars = stanvars) expect_match2(scode, "r_1_1 = (sd_1[1] * (z_1[1]));\n r_1_1 = r_1_1 * 2;") # use the non-centered parameterization for 'b' # unofficial feature not supported anymore for the time being # bprior <- set_prior("target += normal_lpdf(zb | 0, 1)", check = FALSE) + # set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) # stanvars <- stanvar(scode = "vector[Kc] zb;", block = "parameters") + # stanvar(scode = "real tau;", block = "parameters") + # stanvar(scode = "vector[Kc] b = zb * tau;", # block="tparameters", name = "b") # scode <- make_stancode(count ~ Trt, epilepsy, # prior = bprior, stanvars = stanvars) # expect_match2(scode, "vector[Kc] b = zb * tau;") # stanvars <- stanvar(scode = "vector[Ksp] zbsp;", block = "parameters") + # stanvar(scode = "real tau;", block = "parameters") + # stanvar(scode = "vector[Ksp] bsp = zbsp * tau;", # block = "tparameters", name = "bsp") # scode <- make_stancode(count ~ mo(Base), epilepsy, stanvars = stanvars) # expect_match2(scode, "vector[Ksp] bsp = zbsp * tau;") }) test_that("custom families are handled correctly", { dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) # define a custom beta-binomial family log_lik_beta_binomial2 <- function(i, draws) { mu <- draws$dpars$mu[, i] tau <- draws$dpars$tau trials <- draws$data$vint1[i] y <- draws$data$Y[i] beta_binomial2_lpmf(y, mu, tau, trials) } posterior_predict_beta_binomial2 <- function(i, draws, ...) { mu <- draws$dpars$mu[, i] tau <- draws$dpars$tau trials <- draws$data$vint1[i] beta_binomial2_rng(mu, tau, trials) } posterior_epred_beta_binomial2 <- function(draws) { mu <- draws$dpars$mu trials <- draws$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]"), log_lik = log_lik_beta_binomial2, posterior_epred = posterior_epred_beta_binomial2, posterior_predict = posterior_predict_beta_binomial2 ) # define custom stan functions # real R is just to also test the vreal addition argument stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N, real R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int N, real R) { return beta_binomial_rng(N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars ) expect_match2(scode, "int vint1[N];") expect_match2(scode, "real tau;") expect_match2(scode, "mu[n] = inv_logit(mu[n]);") expect_match2(scode, "target += gamma_lpdf(tau | 0.1, 0.1);") expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau, vint1[n], vreal1[n]);" ) scode <- make_stancode( bf(y | vint(size) + vreal(size) ~ x, tau ~ x), data = dat, family = beta_binomial2, stanvars = stanvars ) expect_match2(scode, "tau[n] = exp(tau[n]);") expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau[n], vint1[n], vreal1[n]);" ) # check custom families in mixture models scode <- make_stancode( y | vint(size) + vreal(size) + trials(size) ~ x, data = dat, family = mixture(binomial, beta_binomial2), stanvars = stanvars ) expect_match2(scode, "log(theta2) + beta_binomial2_lpmf(Y[n] | mu2[n], tau2, vint1[n], vreal1[n]);" ) # check custom families in multivariate models bform <- bf( y | vint(size) + vreal(size) + trials(size) ~ x, family = beta_binomial2 ) + bf(x ~ 1, family = gaussian()) scode <- make_stancode(bform, data = dat, stanvars = stanvars) expect_match2(scode, "target += beta_binomial2_lpmf(Y_y[n] | mu_y[n], tau_y, vint1_y[n], vreal1_y[n]);" ) # check vectorized custom families beta_binomial2_vec <- custom_family( "beta_binomial2_vec", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1", "vreal1"), loop = FALSE ) stan_funs_vec <- " real beta_binomial2_vec_lpmf(int[] y, vector mu, real phi, int[] N, real[] R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int N, real R) { return beta_binomial_rng(N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs_vec, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2_vec, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars ) expect_match2(scode, "target += beta_binomial2_vec_lpmf(Y | mu, tau, vint1, vreal1);" ) }) test_that("likelihood of distributional beta models is correct", { # test issue #404 dat <- data.frame(prop = rbeta(100, shape1 = 2, shape2 = 2)) scode <- make_stancode( bf(prop ~ 1, phi ~ 1), data = dat, family = Beta() ) expect_match2(scode, "beta_lpdf(Y[n] | mu[n] * phi[n], (1 - mu[n]) * phi[n])") }) test_that("student-t group-level effects work without errors", { scode <- make_stancode(count ~ Trt + (1|gr(patient, dist = "st")), epilepsy) expect_match2(scode, "dfm_1 = sqrt(df_1 * udf_1);") expect_match2(scode, "dfm_1 .* (sd_1[1] * (z_1[1]));") expect_match2(scode, "target += gamma_lpdf(df_1 | 2, 0.1);") expect_match2(scode, "target += inv_chi_square_lpdf(udf_1 | df_1);") bprior <- prior(normal(20, 5), class = df, group = patient) scode <- make_stancode( count ~ Trt + (Trt|gr(patient, dist = "st")), epilepsy, prior = bprior ) expect_match2(scode, "r_1 = rep_matrix(dfm_1, M_1) .* scale_r_cor(z_1, sd_1, L_1);" ) expect_match2(scode, "target += normal_lpdf(df_1 | 20, 5);") }) test_that("centering design matrices can be changed correctly", { dat <- data.frame(y = 1:10, x = 1:10) scode <- make_stancode( bf(y ~ x, center = FALSE), data = dat, family = weibull(), prior = prior(normal(0,1), coef = Intercept) ) expect_match2(scode, "mu = X * b;") expect_match2(scode, "target += normal_lpdf(b[1] | 0, 1);") bform <- bf(y ~ eta, nl = TRUE) + lf(eta ~ x, center = TRUE) scode <- make_stancode(bform, data = dat) expect_match2(scode, "nlp_eta = Intercept_eta + Xc_eta * b_eta;") }) test_that("to_vector() is correctly removed from prior of SD parameters", { # see https://discourse.mc-stan.org/t/prior-for-sd-generate-parsing-text-error/12292/5 dat <- data.frame( y = rnorm(100), ID = 1:10, group = rep(1:2, each = 5) ) bform <- bf( y ~ 1 + (1 | p | gr(ID, by=group)), sigma ~ 1 + (1 | p | gr(ID, by=group)) ) bprior <- c( prior(normal(0, 0.1), class = sd) , prior(normal(0, 0.01), class = sd, dpar = sigma) ) scode <- make_stancode( bform, data = dat, prior = bprior, sample_prior = TRUE ) expect_match2(scode, "prior_sd_1_1 = normal_rng(0,0.1);") expect_match2(scode, "prior_sd_1_2 = normal_rng(0,0.01);") }) test_that("threaded Stan code is correct", { dat <- data.frame( count = rpois(236, lambda = 20), visit = rep(1:4, each = 59), patient = factor(rep(1:59, 4)), Age = rnorm(236), Trt = factor(sample(0:1, 236, TRUE)), AgeSD = abs(rnorm(236, 1)), Exp = sample(1:5, 236, TRUE), volume = rnorm(236), gender = factor(c(rep("m", 30), rep("f", 29))) ) # only parse models if cmdstan can be found on the system cmdstan_version <- try(cmdstanr::cmdstan_version(), silent = TRUE) found_cmdstan <- !is(cmdstan_version, "try-error") options( brms.parse_stancode = found_cmdstan && not_cran, brms.backend = "cmdstanr" ) threads <- threading(2, grainsize = 20) bform <- bf( count ~ Trt*Age + mo(Exp) + s(Age) + offset(Age) + (1+Trt|visit), sigma ~ Trt + gp(Age) + gp(volume, by = Trt) ) scode <- make_stancode(bform, dat, family = student(), threads = threads) expect_match2(scode, "real partial_log_lik_lpmf(int[] seq, int start,") expect_match2(scode, "mu[n] += (bsp[1]) * mo(simo_1, Xmo_1[nn])") expect_match2(scode, "ptarget += student_t_lpdf(Y[start:end] | nu, mu, sigma);") expect_match2(scode, "+ gp_pred_sigma_1[Jgp_sigma_1[start:end]]") expect_match2(scode, ".* gp_pred_sigma_2_1[Jgp_sigma_2_1[which_gp_sigma_2_1]];") expect_match2(scode, "sigma[start_at_one(Igp_sigma_2_2[which_gp_sigma_2_2], start)] +=") expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y,") scode <- make_stancode( visit ~ cs(Trt) + Age, dat, family = sratio(), threads = threads, ) expect_match2(scode, "matrix[N, nthres] mucs = Xcs[start:end] * bcs;") expect_match2(scode, "ptarget += sratio_logit_lpmf(Y[nn] | mu[n], disc, Intercept - transpose(mucs[n]));" ) scode <- make_stancode( bf(visit ~ a * Trt ^ b, a ~ mo(Exp), b ~ s(Age), nl = TRUE), data = dat, family = Gamma("log"), prior = set_prior("normal(0, 1)", nlpar = c("a", "b")), threads = threads ) expect_match2(scode, "mu[n] = shape * exp(-(nlp_a[n] * C_1[nn] ^ nlp_b[n]));") expect_match2(scode, "ptarget += gamma_lpdf(Y[start:end] | shape, mu);") bform <- bf(mvbind(count, Exp) ~ Trt) + set_rescor(TRUE) scode <- make_stancode(bform, dat, gaussian(), threads = threads) expect_match2(scode, "ptarget += multi_normal_cholesky_lpdf(Y[start:end] | Mu, LSigma);") bform <- bf(brms::mvbind(count, Exp) ~ Trt) + set_rescor(FALSE) scode <- make_stancode(bform, dat, gaussian(), threads = threads) expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf_count, seq_count,") expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf_Exp, seq_Exp,") expect_match2(scode, "ptarget += normal_id_glm_lpdf(Y_Exp[start:end] | Xc_Exp[start:end], Intercept_Exp, b_Exp, sigma_Exp);" ) scode <- make_stancode( visit ~ Trt, dat, family = mixture(poisson(), nmix = 2), threads = threading(4, grainsize = 10, static = TRUE) ) expect_match2(scode, "ps[1] = log(theta1) + poisson_log_lpmf(Y[nn] | mu1[n]);") expect_match2(scode, "ptarget += log_sum_exp(ps);") expect_match2(scode, "target += reduce_sum_static(partial_log_lik_lpmf,") }) test_that("Un-normalized Stan code is correct", { # only parse models if cmdstan >= 2.25 can be found on the system cmdstan_version <- try(cmdstanr::cmdstan_version(), silent = TRUE) found_cmdstan <- !is(cmdstan_version, "try-error") options( brms.parse_stancode = found_cmdstan && cmdstan_version >= "2.25" && not_cran, brms.backend = "cmdstanr" ) scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior(student_t(5,0,10), class = b) + prior(cauchy(0,2), class = sd), normalize = FALSE ) expect_match2(scode, "target += poisson_log_glm_lupmf(Y | Xc, mu, b);") expect_match2(scode, "target += student_t_lupdf(b | 5, 0, 10);") expect_match2(scode, "target += student_t_lupdf(Intercept | 3, 1.4, 2.5);") expect_match2(scode, "target += cauchy_lupdf(sd_1 | 0, 2);") expect_match2(scode, "target += std_normal_lupdf(z_1[1]);") scode <- make_stancode( count ~ zAge + zBase * Trt + (1|patient) + (1|obs), data = epilepsy, family = poisson(), prior = prior(student_t(5,0,10), class = b) + prior(cauchy(0,2), class = sd), normalize = FALSE, threads = threading(2) ) expect_match2(scode, "target += reduce_sum(partial_log_lik_lpmf, seq, grainsize, Y, Xc, b, Intercept, J_1, Z_1_1, r_1_1, J_2, Z_2_1, r_2_1);") expect_match2(scode, "ptarget += poisson_log_glm_lupmf(Y[start:end] | Xc[start:end], mu, b);") expect_match2(scode, "target += student_t_lupdf(b | 5, 0, 10);") expect_match2(scode, "target += student_t_lupdf(Intercept | 3, 1.4, 2.5);") expect_match2(scode, "target += cauchy_lupdf(sd_1 | 0, 2);") expect_match2(scode, "target += std_normal_lupdf(z_1[1]);") # Check that brms custom distributions stay normalized scode <- make_stancode( rating ~ period + carry + cs(treat), data = inhaler, family = sratio("cloglog"), normalize = FALSE ) expect_match2(scode, "target += sratio_cloglog_lpmf(Y[n] | mu[n], disc, Intercept - transpose(mucs[n]));") # Check that user-specified custom distributions stay normalized dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]"), ) stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int N, real R) { return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); } " stanvars <- stanvar(scode = stan_funs, block = "functions") scode <- make_stancode( y | vint(size) + vreal(size) ~ x, data = dat, family = beta_binomial2, prior = prior(gamma(0.1, 0.1), class = "tau"), stanvars = stanvars, normalize = FALSE, backend = "cmdstanr" ) expect_match2(scode, "target += beta_binomial2_lpmf(Y[n] | mu[n], tau, vint1[n], vreal1[n]);") expect_match2(scode, "gamma_lupdf(tau | 0.1, 0.1);") }) test_that("Normalizing Stan code works correctly", { expect_equal( normalize_stancode("// a\nb;\n b + c = 4; // kde\ndata"), normalize_stancode("// dasflkjldl\n // adsfadsfa\n b;\n\n \n \t\rb + c = 4;\ndata") ) expect_equal( normalize_stancode("data /* adfa */ {\nint a;\n /* asdddede \n asdfas \n asf */}\n"), normalize_stancode("data {\nint a;\n} /* aa \n adfasdf \n asdfadsf ddd */\n") ) expect_equal( normalize_stancode("data \n {\nint a;\n\n } \t\n"), normalize_stancode("data {\nint a;\n} \n") ) expect_equal( normalize_stancode("/* \n\n */\na*/"), normalize_stancode("a*/") ) expect_equal( normalize_stancode("//adsfadf \ra // asdfasdf\r\n"), normalize_stancode("a") ) expect_equal( normalize_stancode("/* * \n * \n * fg / */hhh"), normalize_stancode("hhh") ) expect_equal( normalize_stancode("a //b"), normalize_stancode("a") ) expect_false(normalize_stancode("// a\ndata {\nint a;\n}\n") == normalize_stancode("// a\ndata {\nint b;\n}\n")) # should not remove single whitespace expect_false(normalize_stancode("da ta") == normalize_stancode("data")) # should handle wrong nested comments expect_false(normalize_stancode("/* \n\n */\na*/") == normalize_stancode("b*/")) }) brms/tests/testthat/tests.posterior_predict.R0000644000175000017500000003270214111751670021404 0ustar nileshnileshcontext("Tests for posterior_predict helper functions") test_that("posterior_predict for location shift models runs without errors", { ns <- 30 nobs <- 10 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), sigma = rchisq(ns, 3), nu = rgamma(ns, 4) ) i <- sample(nobs, 1) pred <- brms:::posterior_predict_gaussian(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_student(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for various skewed models runs without errors", { ns <- 50 nobs <- 2 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), beta = rchisq(ns, 3), mu = matrix(rnorm(ns * nobs), ncol = nobs), alpha = rnorm(ns), ndt = 1 ) pred <- brms:::posterior_predict_lognormal(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_shifted_lognormal(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_exgaussian(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_skew_normal(1, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for aysm_laplace models runs without errors", { ns <- 50 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), quantile = rbeta(ns, 2, 1), mu = matrix(rnorm(ns*2), ncol = 2), zi = rbeta(ns, 10, 10) ) pred <- brms:::posterior_predict_asym_laplace(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_asym_laplace(1, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for multivariate linear models runs without errors", { ns <- 10 nvars <- 3 ncols <- 4 nobs <- nvars * ncols Sigma = array(cov(matrix(rnorm(300), ncol = 3)), dim = c(3, 3, 10)) prep <- structure(list(ndraws = ns), class = "mvbrmsprep") prep$mvpars <- list( Mu = array(rnorm(ns*nobs*nvars), dim = c(ns, nobs, nvars)), Sigma = aperm(Sigma, c(3, 1, 2)) ) prep$dpars <- list(nu = rgamma(ns, 5)) prep$data <- list(N = nobs, N_trait = ncols) pred <- brms:::posterior_predict_gaussian_mv(1, prep = prep) expect_equal(dim(pred), c(ns, nvars)) pred <- brms:::posterior_predict_student_mv(2, prep = prep) expect_equal(dim(pred), c(ns, nvars)) }) test_that("posterior_predict for ARMA covariance models runs without errors", { ns <- 20 nobs <- 15 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns*nobs), ncol = nobs), sigma = rchisq(ns, 3), nu = rgamma(ns, 5) ) prep$ac <- list( ar = matrix(rbeta(ns, 0.5, 0.5), ncol = 1), ma = matrix(rnorm(ns, 0.2, 1), ncol = 1), begin_tg = c(1, 5, 12), end_tg = c(4, 11, 15) ) prep$data <- list(se = rgamma(ns, 10)) prep$family$fun <- "gaussian_time" pred <- brms:::posterior_predict_gaussian_time(1, prep = prep) expect_equal(length(pred), ns * 4) prep$family$fun <- "student_time" pred <- brms:::posterior_predict_student_time(2, prep = prep) expect_equal(length(pred), ns * 7) }) test_that("loglik for SAR models runs without errors", { ns = 3 prep <- structure(list(ndraws = ns, nobs = 10), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(30), nrow = ns), nu = rep(2, ns), sigma = rep(10, ns) ) prep$ac <- list(lagsar = matrix(c(0.3, 0.5, 0.7)), Msar = diag(10)) pred <- brms:::posterior_predict_gaussian_lagsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) pred <- brms:::posterior_predict_student_lagsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) prep$ac$errorsar <- prep$ac$lagsar prep$ac$lagsar <- NULL pred <- brms:::posterior_predict_gaussian_errorsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) pred <- brms:::posterior_predict_student_errorsar(1, prep = prep) expect_equal(dim(pred), c(3, 10)) }) test_that("posterior_predict for FCOR models runs without errors", { ns <- 3 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(nobs * ns), nrow = ns), sigma = rep(1, ns), nu = rep(2, ns) ) prep$ac <- list(Mfcor = diag(nobs)) pred <- brms:::posterior_predict_gaussian_fcor(1, prep = prep) expect_equal(dim(pred), c(ns, nobs)) pred <- brms:::posterior_predict_student_fcor(1, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for count and survival models runs without errors", { ns <- 25 nobs <- 10 trials <- sample(10:30, nobs, replace = TRUE) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns*nobs), ncol = nobs), shape = rgamma(ns, 4), xi = 0 ) prep$dpars$nu <- prep$dpars$sigma <- prep$dpars$shape + 1 prep$data <- list(trials = trials) i <- sample(nobs, 1) prep$dpars$mu <- brms:::inv_cloglog(prep$dpars$eta) pred <- brms:::posterior_predict_binomial(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_discrete_weibull(i, prep = prep) expect_equal(length(pred), ns) prep$dpars$mu <- exp(prep$dpars$eta) pred <- brms:::posterior_predict_poisson(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_negbinomial(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_negbinomial2(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_geometric(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_com_poisson(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_exponential(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_gamma(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_frechet(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_inverse.gaussian(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_gen_extreme_value(i, prep = prep) expect_equal(length(pred), ns) prep$family$link <- "log" pred <- brms:::posterior_predict_weibull(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for bernoulli and beta models works correctly", { ns <- 17 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = brms:::inv_logit(matrix(rnorm(ns * nobs * 2), ncol = 2 * nobs)), phi = rgamma(ns, 4) ) i <- sample(1:nobs, 1) pred <- brms:::posterior_predict_bernoulli(i, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_beta(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for circular models runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = 2 * atan(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)), kappa = rgamma(ns, 4) ) i <- sample(seq_len(nobs), 1) pred <- brms:::posterior_predict_von_mises(i, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for zero-inflated and hurdle models runs without erros", { ns <- 50 nobs <- 8 trials <- sample(10:30, nobs, replace = TRUE) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns * nobs * 2), ncol = nobs * 2), shape = rgamma(ns, 4), phi = rgamma(ns, 1), zi = rbeta(ns, 1, 1), coi = rbeta(ns, 5, 7) ) prep$dpars$hu <- prep$dpars$zoi <- prep$dpars$zi prep$data <- list(trials = trials) prep$dpars$mu <- exp(prep$dpars$eta) pred <- brms:::posterior_predict_hurdle_poisson(1, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_hurdle_negbinomial(2, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_hurdle_gamma(5, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_poisson(3, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_negbinomial(6, prep = prep) expect_equal(length(pred), ns) prep$dpars$mu <- brms:::inv_logit(prep$dpars$eta) pred <- brms:::posterior_predict_zero_inflated_binomial(4, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_inflated_beta(8, prep = prep) expect_equal(length(pred), ns) pred <- brms:::posterior_predict_zero_one_inflated_beta(7, prep = prep) expect_equal(length(pred), ns) }) test_that("posterior_predict for ordinal models runs without erros", { ns <- 50 nobs <- 8 nthres <- 3 ncat <- nthres + 1 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = array(rnorm(ns * nobs), dim = c(ns, nobs)), disc = rexp(ns) ) prep$thres$thres <- array(0, dim = c(ns, nthres)) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family$link <- "logit" prep$family$family <- "cumulative" pred <- sapply(1:nobs, brms:::posterior_predict_cumulative, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "sratio" pred <- sapply(1:nobs, brms:::posterior_predict_sratio, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "cratio" pred <- sapply(1:nobs, brms:::posterior_predict_cratio, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$family <- "acat" pred <- sapply(1:nobs, brms:::posterior_predict_acat, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$family$link <- "probit" pred <- sapply(1:nobs, brms:::posterior_predict_acat, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for categorical and related models runs without erros", { set.seed(1234) ns <- 50 nobs <- 8 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu1 = array(rnorm(ns*nobs, 0, 0.1), dim = c(ns, nobs)), mu2 = array(rnorm(ns*nobs, 0, 0.1), dim = c(ns, nobs)) ) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family <- categorical() pred <- sapply(1:nobs, brms:::posterior_predict_categorical, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$data$trials <- sample(1:20, nobs) prep$family <- multinomial() pred <- brms:::posterior_predict_multinomial(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) prep$dpars$phi <- rexp(ns, 1) prep$family <- dirichlet() pred <- brms:::posterior_predict_dirichlet(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) expect_equal(rowSums(pred), rep(1, nrow(pred))) prep$family <- brmsfamily("dirichlet2") prep$dpars$mu1 <- rexp(ns, 10) prep$dpars$mu2 <- rexp(ns, 10) prep$dpars$mu3 <- rexp(ns, 10) pred <- brms:::posterior_predict_dirichlet2(i = sample(1:nobs, 1), prep = prep) expect_equal(dim(pred), c(ns, ncat)) expect_equal(rowSums(pred), rep(1, nrow(pred))) }) test_that("truncated posterior_predict run without errors", { ns <- 30 nobs <- 15 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), sigma = rchisq(ns, 3) ) prep$data <- list(lb = sample(-(4:7), nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_gaussian, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$dpars$mu <- exp(prep$dpars$mu) prep$data <- list(ub = sample(70:80, nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_poisson, prep = prep) expect_equal(dim(pred), c(ns, nobs)) prep$data <- list(lb = rep(0, nobs), ub = sample(70:75, nobs, TRUE)) pred <- sapply(1:nobs, brms:::posterior_predict_poisson, prep = prep) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_predict for the wiener diffusion model runs without errors", { skip("skip as long as RWiener fails on R-devel for 3.6.0") ns <- 5 nobs <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), bs = rchisq(ns, 3), ndt = rep(0.5, ns), bias = rbeta(ns, 1, 1) ) prep$data <- list(Y = abs(rnorm(ns)) + 0.5, dec = c(1, 0, 1)) i <- sample(1:nobs, 1) expect_equal(nrow(brms:::posterior_predict_wiener(i, prep)), ns) }) test_that("posterior_predict_custom runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rbeta(ns * nobs * 2, 1, 1), ncol = nobs * 2) ) prep$data <- list(trials = rep(1, nobs)) prep$family <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "trials[n]" ) posterior_predict_beta_binomial2 <- function(i, prep) { mu <- prep$dpars$mu[, i] rbinom(prep$ndraws, size = prep$data$trials[i], prob = mu) } expect_equal(length(brms:::posterior_predict_custom(sample(1:nobs, 1), prep)), ns) }) brms/tests/testthat/tests.stan_functions.R0000644000175000017500000002302413737534571020712 0ustar nileshnileshcontext("Tests for self-defined Stan functions") test_that("self-defined Stan functions work correctly", { # for some reason expose_stan_functions doesn't work within R CMD CHECK skip_if_not(exists("new_stan_functions", asNamespace("brms"))) rstan::expose_stan_functions(brms:::new_stan_functions) # ARMA matrix generating functions cov_ar1_R <- get_cov_matrix_ar1(ar = matrix(0.5), sigma = 2, se2 = 0, nrows = 3)[1, , ] expect_equal(cov_matrix_ar1(0.5, 2, 3), cov_ar1_R) cov_ma1_R <- matrix(get_cov_matrix_ma1(ma = matrix(-0.3), sigma = 3, se2 = 0, nrows = 1)[1, , ]) expect_equal(cov_matrix_ma1(-0.3, 3, 1), cov_ma1_R) cov_arma1_R <- get_cov_matrix_arma1(ar = matrix(-0.5), ma = matrix(0.7), sigma = 4, se2 = 0, nrows = 5)[1, , ] expect_equal(cov_matrix_arma1(-0.5, 0.7, 4, 5), cov_arma1_R) # log-likelihood functions for covariance models y <- rnorm(9) eta <- rnorm(9) ll_stan <- normal_cov_lpdf(y, eta = eta, se2 = 1:9, I = 2, begin = c(1, 5), end = c(4, 9), nobs = c(4, 5), res_cov_matrix = cov_arma1_R) ll_R <- c(dmulti_normal(y[1:4], eta[1:4], cov_arma1_R[1:4, 1:4] + diag(1:4)), dmulti_normal(y[5:9], eta[5:9], cov_arma1_R[1:5, 1:5] + diag(5:9))) expect_equal(ll_stan, sum(ll_R)) ll_stan <- student_t_cov_lpdf(y, nu = 10, eta = eta, se2 = 1:9, I = 2, begin = c(1, 5), end = c(4, 9), nobs = c(4, 5), res_cov_matrix = cov_arma1_R) ll_R <- c(dmulti_student(y[1:4], df = 10, mu = eta[1:4], Sigma = cov_arma1_R[1:4, 1:4] + diag(1:4)), dmulti_student(y[5:9], df = 10, mu = eta[5:9], Sigma = cov_arma1_R[1:5, 1:5] + diag(5:9))) expect_equal(ll_stan, sum(ll_R)) # inverse gaussian functions shape <- rgamma(1, 20, 1) mu <- 20 y <- statmod::rinvgauss(1, mean = mu, shape = shape) expect_equal(inv_gaussian_lpdf(y, mu, shape, log(y), sqrt(y)), dinvgauss(y, mean = mu, shape = shape, log = TRUE)) expect_equal(inv_gaussian_lcdf(y, mu, shape, log(y), sqrt(y)), pinvgauss(y, mean = mu, shape = shape, log = TRUE)) expect_equal(inv_gaussian_lccdf(y, mu, shape, log(y), sqrt(y)), log(1 - pinvgauss(y, mean = mu, shape = shape))) mu <- 18:22 y <- statmod::rinvgauss(5, mean = mu, shape = shape) expect_equal(inv_gaussian_vector_lpdf(y, mu, shape, sum(log(y)), sqrt(y)), sum(dinvgauss(y, mean = mu, shape = shape, log = TRUE))) # exgaussian functions beta <- rgamma(1, 1, 0.1) sigma <- rgamma(1, 10, 0.1) mu <- 10 y <- rexgaussian(1, mu = mu, sigma = sigma, beta = beta) expect_equal(exgaussian_lpdf(y, mu, sigma, beta), dexgaussian(y, mu, sigma, beta, log = TRUE)) expect_equal(exgaussian_lcdf(y, mu, sigma, beta), pexgaussian(y, mu, sigma, beta, log = TRUE)) expect_equal(exgaussian_lccdf(y, mu, sigma, beta), pexgaussian(y, mu, sigma, beta, lower.tail = FALSE, log = TRUE)) # asym_laplace functions mu <- 10 quantile <- rbeta(1, 2, 1) sigma <- rgamma(1, 10, 0.1) y <- rasym_laplace(1, mu = mu, sigma = sigma, quantile = quantile) expect_equal(asym_laplace_lpdf(y, mu, sigma, quantile), dasym_laplace(y, mu, sigma, quantile, log = TRUE)) expect_equal(asym_laplace_lcdf(y, mu, sigma, quantile), pasym_laplace(y, mu, sigma, quantile, log = TRUE)) expect_equal(asym_laplace_lccdf(y, mu, sigma, quantile), pasym_laplace(y, mu, sigma, quantile, lower.tail = FALSE, log = TRUE)) # wiener diffusion model functions alpha = 2 tau = 0.5 beta = 0.5 delta = 0.5 y <- rWiener(1, alpha, tau, beta, delta) y$resp <- ifelse(y$resp == "lower", 0, 1) expect_equal(wiener_diffusion_lpdf(y$q, y$resp, alpha, tau, beta, delta), dWiener(y$q, alpha, tau, beta, delta, resp = y$resp, log = TRUE)) # zero-inflated and hurdle log-densities draws <- draws2 <- list(eta = matrix(rnorm(4), ncol = 4), shape = 2, phi = 2, sigma = 2) draws$data <- list(Y = c(0, 10), N_trait = 2, max_obs = 15) draws2$data <- list(Y = c(0, 0.5), N_trait = 2) for (i in seq_along(draws$data$Y)) { eta_zi_args <- list(y = draws$data$Y[i], eta = draws$eta[i], eta_zi = draws$eta[i+2]) zi_args <- list(y = draws$data$Y[i], eta = draws$eta[i], zi = inv_logit(eta_zi_args$eta_zi)) eta_hu_args <- list(y = draws$data$Y[i], eta = draws$eta[i], eta_hu = draws$eta[i+2]) hu_args <- list(y = draws$data$Y[i], eta = draws$eta[i], hu = inv_logit(eta_hu_args$eta_hu)) draws$f$link <- "log" expect_equal(do.call(zero_inflated_poisson_lpmf, zi_args), loglik_zero_inflated_poisson(i, draws)) expect_equal(do.call(zero_inflated_poisson_logit_lpmf, eta_zi_args), loglik_zero_inflated_poisson(i, draws)) expect_equal(do.call(zero_inflated_neg_binomial_lpmf, c(zi_args, shape = draws$shape)), loglik_zero_inflated_negbinomial(i, draws)) expect_equal(do.call(zero_inflated_neg_binomial_logit_lpmf, c(eta_zi_args, shape = draws$shape)), loglik_zero_inflated_negbinomial(i, draws)) expect_equal(do.call(hurdle_poisson_lpmf, hu_args), loglik_hurdle_poisson(i, draws)) expect_equal(do.call(hurdle_poisson_logit_lpmf, eta_hu_args), loglik_hurdle_poisson(i, draws)) expect_equal(do.call(hurdle_neg_binomial_lpmf, c(hu_args, shape = draws$shape)), loglik_hurdle_negbinomial(i, draws)) expect_equal(do.call(hurdle_neg_binomial_logit_lpmf, c(eta_hu_args, shape = draws$shape)), loglik_hurdle_negbinomial(i, draws)) expect_equal(do.call(hurdle_gamma_lpdf, c(hu_args, shape = draws$shape)), loglik_hurdle_gamma(i, draws)) expect_equal(do.call(hurdle_gamma_logit_lpdf, c(eta_hu_args, shape = draws$shape)), loglik_hurdle_gamma(i, draws)) draws$f$link <- "identity" expect_equal(do.call(hurdle_lognormal_lpdf, c(hu_args, sigma = draws$sigma)), loglik_hurdle_lognormal(i, draws)) expect_equal(do.call(hurdle_lognormal_logit_lpdf, c(eta_hu_args, sigma = draws$sigma)), loglik_hurdle_lognormal(i, draws)) draws$f$link <- "logit" expect_equal(do.call(zero_inflated_binomial_lpmf, c(zi_args, trials = draws$data$max_obs)), loglik_zero_inflated_binomial(i, draws)) expect_equal(do.call(zero_inflated_binomial_logit_lpmf, c(eta_zi_args, trials = draws$data$max_obs)), loglik_zero_inflated_binomial(i, draws)) # zero_inflated_beta requires Y to be in (0,1) draws2$f$link <- "logit" eta_zi_args <- list(y = draws2$data$Y[i], eta = draws$eta[i], eta_zi = draws$eta[i+2]) zi_args <- list(y = draws2$data$Y[i], eta = draws$eta[i], zi = inv_logit(eta_zi_args$eta_zi)) expect_equal(do.call(zero_inflated_beta_lpdf, c(zi_args, phi = draws$phi)), loglik_zero_inflated_beta(i, draws2)) expect_equal(do.call(zero_inflated_beta_logit_lpdf, c(eta_zi_args, phi = draws$phi)), loglik_zero_inflated_beta(i, draws2)) } # ordinal log-densities eta <- rnorm(1) etap <- array(rnorm(6), dim = c(2, 1, 3)) thres <- sort(rnorm(3)) # cumulative and sratio require thres - eta draws <- list(eta = rep(thres, each = 2) - array(eta, dim = c(2, 1, 3))) draws$data <- list(Y = 2, max_obs = 4) draws$f$link <- "probit" expect_equal(cumulative_lpmf(draws$data$Y, eta, thres), loglik_cumulative(1, draws)[1]) draws$f$link <- "logit" expect_equal(sratio_lpmf(draws$data$Y, eta, thres), loglik_sratio(1, draws)[1]) # acat and cratio require eta - thres # also category specific effects are included here draws$eta <- eta + etap - rep(thres, each = 2) draws$f$link <- "cloglog" expect_equal(cratio_lpmf(draws$data$Y, eta, etap[1, , ], thres), loglik_cratio(1, draws)[1]) draws$f$link <- "cauchit" expect_equal(acat_lpmf(draws$data$Y, eta, etap[1, , ], thres), loglik_acat(1, draws)[1]) # kronecker product A <- matrix(c(3, 2, 1, 2, 4, 1, 1, 1, 5), nrow = 3) B <- matrix(c(3, 2, 2, 4), nrow = 2) sd <- c(2, 7) expect_equal(t(chol(base::kronecker(A, diag(sd) %*% B %*% diag(sd)))), kronecker(t(chol(A)), diag(sd) %*% t(chol(B)))) # as_matrix expect_equal(as_matrix(1:28, 4, 7), rbind(1:7, 8:14, 15:21, 22:28)) expect_equal(as_matrix(1:28, 3, 4), rbind(1:4, 5:8, 9:12)) # cauchit and cloglog link expect_equal(inv_cauchit(1.5), pcauchy(1.5)) expect_equal(cauchit(0.7), qcauchy(0.7)) expect_equal(cloglog(0.2), link(0.2, "cloglog")) # monotonic # slightly arkward way to call this function to make sure # is doesn't conflict with the brms R function of the same name monotonic_temp <- get("monotonic", globalenv()) expect_equal(monotonic_temp(1:10, 4), sum(1:4)) expect_equal(monotonic_temp(rnorm(5), 0), 0) }) brms/tests/testthat/tests.brm.R0000644000175000017500000001322514117576250016430 0ustar nileshnilesh# calling context() avoids a strange bug in testthat 2.0.0 # cannot actually run brms models in tests as it takes way too long context("Tests for brms error messages") test_that("brm works fully with mock backend", { skip_on_cran() dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) # Positive control - forced error gets thrown and propagated expect_error(brm(y ~ x + (1|g), dat, backend = "mock", stan_model_args = list(compile_error = "Test error")), "Test error") # Positive control - bad Stan code from stanvars gets an error expect_error(suppressMessages( brm(y ~ x + (1|g), dat, backend = "mock", stanvars = stanvar(scode = "invalid;", block = "model")) )) # Testing some models mock_fit <- brm(y ~ x + (1|g), dat, mock_fit = 1, backend = "mock", rename = FALSE) expect_equal(mock_fit$fit, 1) }) test_that("brm(file = xx) works fully with mock backend", { skip_on_cran() dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) file <- tempfile(fileext = ".rds") mock_fit1 <- brm(y ~ x + (1|g), dat, mock_fit = "stored", backend = "mock", rename = FALSE, file = file) expect_true(file.exists(file)) mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # In default settings, even using different data/model should result in the # model being loaded from file changed_data <- dat[1:8, ] mock_fit2 <- brm(y ~ x + 0, changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # Now test using file_refit = "on_change" which should be more clever # No change mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file) expect_equal(mock_fit2$fit, "stored") # Change data, but not code mock_fit2 <- brm(y ~ x + (1|g), changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change") expect_equal(mock_fit2$fit, "new") # Change code but not data mock_fit2 <- brm(y ~ x + (1|g), dat, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change", prior = prior(normal(0,2), class = sd)) expect_equal(mock_fit2$fit, "new") # Change both mock_fit2 <- brm(y ~ x + 0, changed_data, mock_fit = "new", backend = "mock", rename = FALSE, file = file, file_refit = "on_change") expect_equal(mock_fit2$fit, "new") }) test_that("brm produces expected errors", { dat <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:5, 2)) # formula parsing expect_error(brm(~ x + (1|g), dat, file = "test"), "Response variable is missing") expect_error(brm(bf(y ~ a, nl = TRUE)), "No non-linear parameters specified") expect_error(brm(bf(y | se(sei) ~ x, sigma ~ x), dat), "Cannot predict or fix 'sigma' in this model") expect_error(brm(y | se(sei) ~ x, dat, family = weibull()), "Argument 'se' is not supported for family") expect_error(brm(y | se(sei) + se(sei2) ~ x, dat, family = gaussian()), "Each addition argument may only be defined once") expect_error(brm(y | abc(sei) ~ x, family = gaussian()), "The following addition terms are invalid:\n'abc(sei)'", fixed = TRUE) expect_error(brm(y | disp(sei) ~ x, dat, family = gaussian()), "The following addition terms are invalid:") expect_error(brm(bf(y ~ x, shape ~ x), family = gaussian()), "The parameter 'shape' is not a valid distributional") expect_error(brm(y ~ x + (1|abc|g/x), dat), "Can only combine group-level terms") expect_error(brm(y ~ x + (1|g) + (x|g), dat), "Duplicated group-level effects are not allowed") expect_error(brm(y~mo(g)*t2(x), dat), fixed = TRUE, "The term 'mo(g):t2(x)' is invalid") expect_error(brm(y~x*cs(g), dat), fixed = TRUE, "The term 'x:cs(g)' is invalid") expect_error(brm(y~me(x, 2 * g)*me(x, g), dat), "Variable 'x' is used in different calls to 'me'") expect_error(brm(y ~ 1 + set_rescor(TRUE), data = dat), "Function 'set_rescor' should not be part") # autocorrelation expect_error(brm(y ~ ar(x+y, g), dat), "Cannot coerce 'x \\+ y' to a single variable name") expect_error(brm(y ~ ar(gr = g1/g2), dat), "Illegal grouping term 'g1/g2'") expect_error(brm(y ~ ma(x), dat, poisson()), "Please set cov = TRUE") expect_error(brm(bf(y ~ 1) + arma(x), dat), "Autocorrelation terms can only be specified") # ordinal models expect_error(brm(rating ~ treat + (cs(period)|subject), data = inhaler, family = categorical()), "Category specific effects are not supported") # families and links expect_error(brm(y ~ x, dat, family = gaussian("logit")), "'logit' is not a supported link for family 'gaussian'") expect_error(brm(y ~ x, dat, family = poisson("inverse")), "'inverse' is not a supported link for family 'poisson'") expect_error(brm(y ~ x, dat, family = c("weibull", "sqrt")), "'sqrt' is not a supported link for family 'weibull'") expect_error(brm(y ~ x, dat, family = c("categorical", "probit")), "'probit' is not a supported link for family 'categorical'") expect_error(brm(y ~ x, dat, family = "ordinal"), "ordinal is not a supported family") }) brms/tests/testthat/tests.brmsformula.R0000644000175000017500000000376414111751667020211 0ustar nileshnileshcontext("Tests for brmsformula") test_that("brmsformula validates formulas of non-linear parameters", { expect_error(bf(y ~ a, ~ 1, a ~ 1), "Additional formulas must be named") expect_error(bf(y ~ a^x, a.b ~ 1), "not contain dots or underscores") expect_error(bf(y ~ a^(x+b), a_b ~ 1), "not contain dots or underscores") }) test_that("brmsformula validates formulas of auxiliary parameters", { expect_error(bf(y ~ a, ~ 1, sigma ~ 1), "Additional formulas must be named") }) test_that("brmsformula detects use if '~~'", { # checks fix of issue #749 expect_error(bf(y~~x), "~~") }) test_that("brmsformula does not change a 'brmsformula' object", { form <- bf(y ~ a, sigma ~ 1) expect_identical(form, bf(form)) form <- bf(y ~ a, sigma ~ 1, a ~ x, nl = TRUE) expect_identical(form, bf(form)) }) test_that("brmsformula detects auxiliary parameter equations", { expect_error(bf(y~x, sigma1 = "sigmaa2"), "Can only equate parameters of the same class") expect_error(bf(y~x, mu3 = "mu2"), "Equating parameters of class 'mu' is not allowed") expect_error(bf(y~x, sigma1 = "sigma1"), "Equating 'sigma1' with itself is not meaningful") expect_error(bf(y~x, shape1 ~ x, shape2 = "shape1"), "Cannot use predicted parameters on the right-hand side") expect_error(bf(y~x, shape1 = "shape3", shape2 = "shape1"), "Cannot use fixed parameters on the right-hand side") }) test_that("update_adterms works correctly", { form <- y | trials(size) ~ x expect_equal( update_adterms(form, ~ trials(10)), y | trials(10) ~ x ) expect_equal( update_adterms(form, ~ weights(w)), y | trials(size) + weights(w) ~ x ) expect_equal( update_adterms(form, ~ weights(w), action = "replace"), y | weights(w) ~ x ) expect_equal( update_adterms(y ~ x, ~ trials(10)), y | trials(10) ~ x ) }) brms/tests/testthat/tests.posterior_epred.R0000644000175000017500000001771114111751670021054 0ustar nileshnileshcontext("Tests for posterior_epred helper functions") # to reduce testing time on CRAN skip_on_cran() test_that("posterior_epred helper functions run without errors", { # actually run posterior_epred.brmsfit that call the helper functions fit <- brms:::rename_pars(brms:::brmsfit_example1) add_dummy_draws <- brms:::add_dummy_draws fit <- add_dummy_draws(fit, "shape", dist = "exp") fit <- add_dummy_draws(fit, "alpha", dist = "norm") fit <- add_dummy_draws(fit, "hu", dist = "beta", shape1 = 1, shape2 = 1) fit <- add_dummy_draws(fit, "zi", dist = "beta", shape1 = 1, shape2 = 1) fit <- add_dummy_draws(fit, "quantile", dist = "beta", shape1 = 2, shape2 = 1) fit <- add_dummy_draws(fit, "xi", dist = "unif", min = -1, max = 0.5) fit <- add_dummy_draws(fit, "ndt", dist = "exp") fit$formula$formula <- update(fit$formula$formula, .~. - arma(visit, patient)) prep <- brms:::prepare_predictions(fit) prep$dpars$mu <- brms:::get_dpar(prep, "mu") prep$dpars$sigma <- brms:::get_dpar(prep, "sigma") prep$dpars$nu <- brms:::get_dpar(prep, "nu") ndraws <- ndraws(fit) nobs <- nobs(fit) # test preparation of truncated models prep$data$lb <- 0 prep$data$ub <- 200 mu <- brms:::posterior_epred_trunc(prep) expect_equal(dim(mu), c(ndraws, nobs)) # pseudo log-normal model fit$family <- fit$formula$family <- lognormal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo shifted log-normal model fit$family <- fit$formula$family <- shifted_lognormal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo skew-normal model fit$family <- fit$formula$family <- skew_normal() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo asym_laplace model fit$family <- fit$formula$family <- asym_laplace() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo zero_inflated_asym_laplace model fit$family <- fit$formula$family <- brmsfamily("zero_inflated_asym_laplace") expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo gen_extreme_value model fit$family <- fit$formula$family <- gen_extreme_value() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo weibull model fit$formula$pforms <- NULL fit$family <- fit$formula$family <- weibull() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo binomial model fit$autocor <- brms:::cor_empty() fit$family <- fit$formula$family <- binomial() expect_equal(dim(SW(posterior_epred(fit, summary = FALSE))), c(ndraws, nobs)) # pseudo hurdle poisson model fit$family <- fit$formula$family <- hurdle_poisson() fit$formula <- bf(count ~ Trt*Age + mo(Exp) + offset(Age) + (1+Trt|visit), family = family(fit)) expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo zero-inflated poisson model fit$family <- fit$formula$family <- zero_inflated_poisson() expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # pseudo custom model posterior_epred_test <- function(prep) { prep$dpars$mu } fit$family <- fit$formula$family <- custom_family( "test", dpars = "mu", links = c("logit"), type = "int", vars = "trials[n]" ) expect_equal(dim(posterior_epred(fit, summary = FALSE)), c(ndraws, nobs)) # truncated continuous models prep$dpars$shape <- c(as.matrix(fit, variable = "shape")) mu <- brms:::posterior_epred_trunc_gaussian(prep, lb = 0, ub = 10) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_student(prep, lb = -Inf, ub = 15) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_lognormal(prep, lb = 2, ub = 15) expect_equal(dim(mu), c(ndraws, nobs)) prep$dpars$mu <- exp(prep$dpars$mu) mu <- brms:::posterior_epred_trunc_gamma(prep, lb = 1, ub = 7) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_exponential(prep, lb = 0, ub = Inf) expect_equal(dim(mu), c(ndraws, nobs)) mu <- SW(brms:::posterior_epred_trunc_weibull(prep, lb = -Inf, ub = Inf)) expect_equal(dim(mu), c(ndraws, nobs)) # truncated discrete models data <- list(Y = sample(100, 10), trials = 1:10, N = 10) lb <- matrix(0, nrow = ndraws, ncol = nobs) ub <- matrix(100, nrow = ndraws, ncol = nobs) mu <- brms:::posterior_epred_trunc_poisson(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_negbinomial(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_negbinomial2(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) mu <- brms:::posterior_epred_trunc_geometric(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) prep$data$trials <- 120 lb <- matrix(-Inf, nrow = ndraws, ncol = nobs) prep$dpars$mu <- brms:::ilink(prep$dpars$mu, "logit") mu <- brms:::posterior_epred_trunc_binomial(prep, lb = lb, ub = ub) expect_equal(dim(mu), c(ndraws, nobs)) }) test_that("posterior_epred_lagsar runs without errors", { prep <- list( dpars = list(mu = matrix(rnorm(30), nrow = 3)), ac = list( lagsar = matrix(c(0.3, 0.5, 0.7)), Msar = matrix(1:100, 10, 10) ), ndraws = 3, nobs = 10, family = gaussian() ) mu_new <- brms:::posterior_epred_lagsar(prep) expect_equal(dim(mu_new), dim(prep$dpars$mu)) expect_true(!identical(mu_new, prep$dpars$mu)) }) test_that("posterior_epred for advanced count data distributions runs without errors", { ns <- 15 nobs <- 5 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = array(rbeta(ns*nobs, 2, 2), dim = c(ns, nobs)), shape = array(rexp(ns*nobs, 3), dim = c(ns, nobs)) ) prep$family <- brmsfamily("discrete_weibull") pred <- suppressWarnings(brms:::posterior_epred_discrete_weibull(prep)) expect_equal(dim(pred), c(ns, nobs)) prep$family <- brmsfamily("com_poisson") pred <- suppressWarnings(brms:::posterior_epred_com_poisson(prep)) expect_equal(dim(pred), c(ns, nobs)) }) test_that("posterior_epred for multinomial and dirichlet models runs without errors", { ns <- 15 nobs <- 8 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu1 = array(rnorm(ns*nobs), dim = c(ns, nobs)), mu2 = array(rnorm(ns*nobs), dim = c(ns, nobs)) ) prep$data <- list(ncat = ncat, trials = sample(1:20, nobs)) prep$family <- multinomial() pred <- brms:::posterior_epred_multinomial(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) prep$family <- dirichlet() pred <- brms:::posterior_epred_dirichlet(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) prep$family <- brmsfamily("dirichlet2") prep$dpars$mu1 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) prep$dpars$mu2 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) prep$dpars$mu3 <- array(rexp(ns*nobs, 1), dim = c(ns, nobs)) pred <- brms:::posterior_epred_dirichlet2(prep = prep) expect_equal(dim(pred), c(ns, nobs, ncat)) }) test_that("posterior_epred() can be reproduced by using d()", { fit4 <- rename_pars(brms:::brmsfit_example4) epred4 <- posterior_epred(fit4) eta4 <- posterior_linpred(fit4) bprep4 <- prepare_predictions(fit4) thres4 <- bprep4$thres$thres disc4 <- bprep4$dpars$disc$fe$b %*% t(bprep4$dpars$disc$fe$X) disc4 <- exp(disc4) epred4_ch <- aperm(sapply(seq_len(dim(eta4)[2]), function(i) { dsratio(seq_len(ncol(thres4) + 1), eta4[, i, ], thres4, disc4[, i]) }, simplify = "array"), perm = c(1, 3, 2)) expect_equivalent(epred4, epred4_ch) }) brms/tests/testthat/tests.brmsterms.R0000644000175000017500000001005414111751667017664 0ustar nileshnileshcontext("Tests for formula parsing functions") test_that("brmsterms finds all variables in very long formulas", { expect_equal( all.vars(brmsterms(t2_brand_recall ~ psi_expsi + psi_api_probsolv + psi_api_ident + psi_api_intere + psi_api_groupint)$all), all.vars(t2_brand_recall ~ t2_brand_recall + psi_expsi + psi_api_probsolv + psi_api_ident + psi_api_intere + psi_api_groupint) ) }) test_that("brmsterms handles very long RE terms", { # tests issue #100 covariate_vector <- paste0("xxxxx", 1:80, collapse = "+") formula <- paste(sprintf("y ~ 0 + trait + trait:(%s)", covariate_vector), sprintf("(1+%s|id)", covariate_vector), sep = " + ") bterms <- brmsterms(as.formula(formula)) expect_equal(bterms$dpars$mu$re$group, "id") }) test_that("brmsterms correctly handles auxiliary parameter 'mu'", { bterms1 <- brmsterms(y ~ x + (x|g)) bterms2 <- brmsterms(bf(y ~ 1, mu ~ x + (x|g))) expect_equal(bterms1$dpars$mu, bterms2$dpars$mu) # commented out for now as updating is not yet enabled # bterms1 <- brmsterms(bf(y ~ z + x + (x|g))) # bterms2 <- brmsterms(bf(y ~ z, lf(mu ~ x + (x|g)))) # expect_equal(bterms1$dpars$mu, bterms2$dpars$mu) # # bterms1 <- brmsterms(bf(y ~ z, lf(mu ~ x + (x|g), cmc = FALSE))) # expect_true(!attr(bterms1$dpars$mu$fe, "cmc")) # # expect_error(brmsterms(bf(y ~ z, mu ~ x + (x|g), nl = TRUE)), # "Cannot combine non-linear formulas") }) test_that("brmsterms correctly check fixed auxiliary parameters", { bform <- bf(y~1, sigma = 4, family = gaussian) expect_true(is.brmsterms(brmsterms(bform))) bform <- bf(y~1, zi = 0.5, family = zero_inflated_beta()) expect_true(is.brmsterms(brmsterms(bform))) bform <- bf(y~1, shape = -2, family = Gamma()) expect_error(brmsterms(bform), "Parameter 'shape' must be positive") bform <- bf(y~1, quantile = 1.5, family = asym_laplace()) expect_error(brmsterms(bform), "Parameter 'quantile' must be between 0 and 1") }) test_that("check_re_formula returns correct REs", { old_form <- y ~ x + (1|patient) + (Trt_c|visit) form <- check_re_formula(~ (1 | visit), old_form) expect_equivalent(form, ~ (1 | gr(visit))) form <- check_re_formula(~ (1 + Trt_c|visit), old_form) expect_equivalent(form, ~ (1 + Trt_c | gr(visit))) form <- check_re_formula(~ (0 + Trt_c | visit) + (1|patient), old_form) expect_equivalent(form, ~ (1|gr(patient)) + (0 + Trt_c | gr(visit))) # checks for fix of issue #844 old_form <- y ~ 0 + x1 + x2 + (0 + x1 + x2 | x3) expect_error( check_re_formula(~ (0 + x2 + x1 | x3), old_form), "Order of terms in 're_formula' should match the original order" ) }) test_that("update_re_terms works correctly", { expect_equivalent(update_re_terms(y ~ x, ~ (1|visit)), y ~ x) expect_equivalent(update_re_terms(y ~ x*z + (1+Trt_c|patient), ~ (1|patient)), y ~ x*z + (1|gr(patient))) expect_equivalent(update_re_terms(y ~ x + (1|patient), ~ 1), y ~ x) expect_equivalent(update_re_terms(y ~ 1|patient, ~ 1), y ~ 1) expect_equivalent(update_re_terms(y ~ -1 + x + (1+visit|patient), NA), y ~ -1 + x) expect_equivalent(update_re_terms(y ~ x + (1+visit|patient), NULL), y ~ x + (1+visit|patient)) expect_equivalent(update_re_terms(y ~ (1|patient), NA), y ~ 1) expect_equivalent(update_re_terms(y ~ x + (1+x|visit), ~ (1|visit)), y ~ x + (1|gr(visit))) expect_equivalent(update_re_terms(y ~ x + (1|visit), ~ (1|visit) + (x|visit)), y ~ x + (1|gr(visit))) expect_equal(update_re_terms(bf(y ~ x, sigma = ~ x + (x|g)), ~ (1|g)), bf(y ~ x, sigma = ~ x + (1|gr(g)))) expect_equal(update_re_terms(bf(y ~ x, x ~ z + (1|g), nl = TRUE), ~ (1|g)), bf(y ~ x, x ~ z + (1|gr(g)), nl = TRUE)) }) test_that("unused variables are correctly incorporated", { bterms <- brmsterms(bf(y ~ 1, unused = ~ x)) expect_true("x" %in% all.vars(bterms$allvars)) }) brms/tests/testthat/tests.make_standata.R0000644000175000017500000012563214122061106020433 0ustar nileshnileshcontext("Tests for make_standata") test_that(paste("make_standata returns correct data names ", "for fixed and random effects"), { expect_equal(names(make_standata(rating ~ treat + period + carry + (1|subject), data = inhaler)), c("N", "Y", "K", "X", "Z_1_1", "J_1", "N_1", "M_1", "NC_1", "prior_only")) expect_equal(names(make_standata(rating ~ treat + period + carry + (1+treat|id|subject), data = inhaler, family = "categorical")), c("N", "Y", "ncat", "K_mu2", "X_mu2", "Z_1_mu2_1", "Z_1_mu2_2", "K_mu3", "X_mu3", "Z_1_mu3_3", "Z_1_mu3_4", "K_mu4", "X_mu4", "Z_1_mu4_5", "Z_1_mu4_6", "J_1", "N_1", "M_1", "NC_1", "prior_only")) expect_equal(names(make_standata(rating ~ treat + period + carry + (1+treat|subject), data = inhaler)), c("N", "Y", "K", "X", "Z_1_1", "Z_1_2", "J_1", "N_1", "M_1", "NC_1", "prior_only")) dat <- data.frame(y = 1:10, g = 1:10, h = 11:10, x = rep(0,10)) expect_equal(names(make_standata(y ~ x + (1|g) + (1|h), dat, "poisson")), c("N", "Y", "K", "X", "Z_1_1", "Z_2_1", "J_1", "J_2", "N_1", "M_1", "NC_1", "N_2", "M_2", "NC_2", "prior_only")) expect_true(all(c("Z_1_1", "Z_1_2", "Z_2_1", "Z_2_2") %in% names(make_standata(y ~ x + (1+x|g/h), dat)))) expect_equal(make_standata(y ~ x + (1+x|g+h), dat), make_standata(y ~ x + (1+x|g) + (1+x|h), dat)) }) test_that(paste("make_standata handles variables used as fixed effects", "and grouping factors at the same time"), { data <- data.frame(y = 1:9, x = factor(rep(c("a","b","c"), 3))) standata <- make_standata(y ~ x + (1|x), data = data) expect_equal(colnames(standata$X), c("Intercept", "xb", "xc")) expect_equal(standata$J_1, as.array(rep(1:3, 3))) standata2 <- make_standata(y ~ x + (1|x), data = data, control = list(not4stan = TRUE)) expect_equal(colnames(standata2$X), c("Intercept", "xb", "xc")) }) test_that("make_standata returns correct data names for addition terms", { dat <- data.frame(y = 1:10, w = 1:10, t = 1:10, x = rep(0,10), c = sample(-1:1,10,TRUE)) expect_equal(names(make_standata(y | se(w) ~ x, dat, gaussian())), c("N", "Y", "se", "K", "X", "sigma", "prior_only")) expect_equal(names(make_standata(y | weights(w) ~ x, dat, "gaussian")), c("N", "Y", "weights", "K", "X", "prior_only")) expect_equal(names(make_standata(y | cens(c) ~ x, dat, "student")), c("N", "Y", "cens", "K", "X", "prior_only")) expect_equal(names(make_standata(y | trials(t) ~ x, dat, "binomial")), c("N", "Y", "trials", "K", "X", "prior_only")) expect_equal(names(make_standata(y | trials(10) ~ x, dat, "binomial")), c("N", "Y", "trials", "K", "X", "prior_only")) expect_equal(names(make_standata(y | thres(11) ~ x, dat, "acat")), c("N", "Y", "nthres", "K", "X", "disc", "prior_only")) expect_equal(names(make_standata(y | thres(10) ~ x, dat, cumulative())), c("N", "Y", "nthres", "K", "X", "disc", "prior_only")) sdata <- make_standata(y | trunc(0,20) ~ x, dat, "gaussian") expect_true(all(sdata$lb == 0) && all(sdata$ub == 20)) sdata <- make_standata(y | trunc(ub = 21:30) ~ x, dat) expect_true(all(all(sdata$ub == 21:30))) }) test_that(paste("make_standata accepts correct response variables", "depending on the family"), { expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(-9.9,0,0.1)), family = "student")$Y, as.array(seq(-9.9,0,0.1))) expect_equal(make_standata(y | trials(10) ~ 1, data = data.frame(y = 1:10), family = "binomial")$Y, as.array(1:10)) expect_equal(make_standata(y ~ 1, data = data.frame(y = 10:20), family = "poisson")$Y, as.array(10:20)) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-c(1:2),5)), family = "bernoulli")$Y, as.array(rep(1:0,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(c(TRUE, FALSE),5)), family = "bernoulli")$Y, as.array(rep(1:0,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(11:20,5)), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = factor(rep(11:20,5))), family = "categorical")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)), family = "cumulative")$Y, as.array(rep(1:10,5))) dat <- data.frame(y = factor(rep(-4:5,5), order = TRUE)) expect_equal(make_standata(y ~ 1, data = dat, family = "acat")$Y, as.array(rep(1:10,5))) expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(1,10,0.1)), family = "exponential")$Y, as.array(seq(1,10,0.1))) dat <- data.frame(y1 = 1:10, y2 = 11:20, x = rep(0,10)) form <- bf(mvbind(y1, y2) ~ x) + set_rescor(TRUE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1, as.array(1:10)) expect_equal(sdata$Y_y2, as.array(11:20)) }) test_that(paste("make_standata rejects incorrect response variables", "depending on the family"), { expect_error(make_standata(y ~ 1, data = data.frame(y = factor(1:10)), family = "student"), "Family 'student' requires numeric responses") expect_error(make_standata(y ~ 1, data = data.frame(y = -5:5), family = "geometric"), "Family 'geometric' requires response greater than or equal to 0") expect_error(make_standata(y ~ 1, data = data.frame(y = -1:1), family = "bernoulli"), "contain only two different values") expect_error(make_standata(y ~ 1, data = data.frame(y = factor(-1:1)), family = "cratio"), "Family 'cratio' requires either positive integers or ordered factors") expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0.5:7.5), 2), family = "sratio"), "Family 'sratio' requires either positive integers or ordered factors") expect_error(make_standata(y ~ 1, data = data.frame(y = rep(-7.5:7.5), 2), family = "gamma"), "Family 'gamma' requires response greater than 0") expect_error(make_standata(y ~ 1, data = data.frame(y = c(0.1, 0.5, 1)), family = Beta()), "Family 'beta' requires response smaller than 1") expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 4)), family = von_mises()), "Family 'von_mises' requires response smaller than or equal to 3.14") expect_error(make_standata(y ~ 1, data = data.frame(y = c(-1, 2, 5)), family = hurdle_gamma()), "Family 'hurdle_gamma' requires response greater than or equal to 0") }) test_that("make_standata suggests using family bernoulli if appropriate", { expect_message(make_standata(y | trials(1) ~ 1, data = list(y = rep(0:1,5)), family = "binomial"), "family 'bernoulli' might be a more efficient choice.") expect_message(make_standata(y ~ 1, data = data.frame(y = rep(1:2, 5)), family = "acat"), "family 'bernoulli' might be a more efficient choice.") expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)), family = "categorical"), "family 'bernoulli' might be a more efficient choice.") }) test_that("make_standata returns correct values for addition terms", { dat <- data.frame(y = rnorm(9), s = 1:9, w = 1:9, c1 = rep(-1:1, 3), c2 = rep(c("left","none","right"), 3), c3 = c(rep(c(TRUE, FALSE), 4), FALSE), c4 = c(sample(-1:1, 5, TRUE), rep(2, 4)), t = 11:19) expect_equivalent(make_standata(y | se(s) ~ 1, data = dat)$se, as.array(1:9)) expect_equal(make_standata(y | weights(w) ~ 1, data = dat)$weights, as.array(1:9)) expect_equal(make_standata(y | cens(c1) ~ 1, data = dat)$cens, as.array(rep(-1:1, 3))) expect_equal(make_standata(y | cens(c2) ~ 1, data = dat)$cens, as.array(rep(-1:1, 3))) expect_equal(make_standata(y | cens(c3) ~ 1, data = dat)$cens, as.array(c(rep(1:0, 4), 0))) expect_equal(make_standata(y | cens(c4, y + 2) ~ 1, data = dat)$rcens, as.array(c(rep(0, 5), dat$y[6:9] + 2))) sdata <- suppressWarnings(make_standata(s ~ 1, dat, family = "binomial")) expect_equal(sdata$trials, as.array(rep(9, 9))) expect_equal(make_standata(s | trials(10) ~ 1, dat, family = "binomial")$trials, as.array(rep(10, 9))) expect_equal(make_standata(s | trials(t) ~ 1, data = dat, family = "binomial")$trials, as.array(11:19)) expect_equal(SW(make_standata(s | cat(19) ~ 1, data = dat, family = "cumulative"))$nthres, 18) }) test_that("make_standata rejects incorrect addition terms", { dat <- data.frame(y = rnorm(9), s = -(1:9), w = -(1:9), c = rep(-2:0, 3), t = 9:1, z = 1:9) expect_error(make_standata(y | se(s) ~ 1, data = dat), "Standard errors must be non-negative") expect_error(make_standata(y | weights(w) ~ 1, data = dat), "Weights must be non-negative") expect_error(make_standata(y | cens(c) ~ 1, data = dat)) expect_error(make_standata(z | trials(t) ~ 1, data = dat, family = "binomial"), "Number of trials is smaller than the number of events") }) test_that("make_standata handles multivariate models", { dat <- data.frame( y1 = 1:10, y2 = 11:20, x = rep(0, 10), g = rep(1:2, 5), censi = sample(0:1, 10, TRUE), tim = 10:1, w = 1:10 ) form <- bf(mvbind(y1, y2) | weights(w) ~ x) + set_rescor(TRUE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1, as.array(dat$y1)) expect_equal(sdata$Y_y2, as.array(dat$y2)) expect_equal(sdata$weights_y1, as.array(1:10)) expect_error(make_standata(bf(mvbind(y1, y2, y2) ~ x) + set_resor(FALSE), data = dat), "Cannot use the same response variable twice") form <- bf(mvbind(y1 / y2, y2, y1 * 3) ~ x) + set_rescor(FALSE) sdata <- make_standata(form, data = dat) expect_equal(sdata$Y_y1y2, as.array(dat$y1 / dat$y2)) sdata <- suppressWarnings( make_standata(mvbind(y1, y2) ~ x, dat, autocor = cor_ar(~ tim | g)) ) target1 <- c(seq(9, 1, -2), seq(10, 2, -2)) expect_equal(sdata$Y_y1, as.array(target1)) target2 <- c(seq(19, 11, -2), seq(20, 12, -2)) expect_equal(sdata$Y_y2, as.array(target2)) # models without residual correlations expect_warning( bform <- bf(y1 | cens(censi) ~ x + y2 + (x|2|g)) + gaussian() + cor_ar() + (bf(x ~ 1) + mixture(poisson, nmix = 2)) + (bf(y2 ~ s(y2) + (1|2|g)) + skew_normal()), "Using 'cor_brms' objects for 'autocor' is deprecated" ) bprior <- prior(normal(0, 5), resp = y1) + prior(normal(0, 10), resp = y2) + prior(dirichlet(2, 1), theta, resp = x) sdata <- make_standata(bform, dat, prior = bprior) sdata_names <- c( "N", "J_1_y1", "cens_y1", "Kma_y1", "Z_1_y2_3", "Zs_y2_1_1", "Y_y2", "con_theta_x", "X_mu2_x" ) expect_true(all(sdata_names %in% names(sdata))) expect_equal(sdata$con_theta_x, as.array(c(2, 1))) }) test_that("make_standata removes NAs correctly", { dat <- data.frame(y = c(rnorm(9), NA)) sdata <- suppressWarnings(make_standata(y ~ 1, dat)) expect_equal(as.numeric(sdata$Y), dat$y[1:9]) }) test_that("make_standata handles the 'subset' addition argument correctly", { dat1 <- data.frame( y1 = rnorm(15), y2 = NA, x1 = rnorm(15), x2 = NA, x3 = rnorm(15), sub1 = 1, sub2 = 0 ) dat2 <- data.frame( y1 = NA, y2 = rnorm(10), x1 = NA, x2 = rnorm(10), x3 = NA, sub1 = 0, sub2 = 1 ) dat <- rbind(dat1, dat2) bform <- bf(y1 | subset(sub1) ~ x1*x3 + sin(x1), family = gaussian()) + bf(y2 | subset(sub2) ~ x2, family = gaussian()) + set_rescor(FALSE) sdata <- make_standata(bform, dat) nsub1 <- sum(dat$sub1) nsub2 <- sum(dat$sub2) expect_equal(sdata$N_y1, nsub1) expect_equal(sdata$N_y2, nsub2) expect_equal(length(sdata$Y_y1), nsub1) expect_equal(nrow(sdata$X_y2), nsub2) }) test_that("make_standata returns correct data for ARMA terms", { dat <- data.frame(y = 1:10, x = rep(0, 10), tim = 10:1, g = rep(3:4, 5)) sdata <- make_standata(y ~ x + ma(tim, g), data = dat) expect_equal(sdata$J_lag, as.array(c(1, 1, 1, 1, 0, 1, 1, 1, 1, 0))) sdata <- make_standata(y ~ x + ar(tim, g, p = 2), data = dat) expect_equal(sdata$J_lag, as.array(c(1, 2, 2, 2, 0, 1, 2, 2, 2, 0))) sdata <- make_standata(y ~ x + ar(tim, g, cov = TRUE), data = dat) expect_equal(sdata$begin_tg, as.array(c(1, 6))) expect_equal(sdata$nobs_tg, as.array(c(5, 5))) bform <- bf(y ~ exp(b * x), b ~ 1, nl = TRUE, autocor = ~arma()) sdata <- make_standata(bform, dat) }) test_that("make_standata allows to retrieve the initial data order", { dat <- data.frame(y1 = rnorm(100), y2 = rnorm(100), id = sample(1:10, 100, TRUE), time = sample(1:100, 100)) # univariate model sdata1 <- make_standata(y1 ~ ar(time, id), data = dat, internal = TRUE) expect_equal(dat$y1, as.numeric(sdata1$Y[attr(sdata1, "old_order")])) # multivariate model form <- bf(mvbind(y1, y2) ~ ma(time, id)) + set_rescor(FALSE) sdata2 <- make_standata(form, data = dat, internal = TRUE) expect_equal(sdata2$Y_y1[attr(sdata2, "old_order")], as.array(dat$y1)) expect_equal(sdata2$Y_y2[attr(sdata2, "old_order")], as.array(dat$y2)) }) test_that("make_standata handles covariance matrices correctly", { A <- structure(diag(1, 4), dimnames = list(1:4, NULL)) sdata <- make_standata(count ~ Trt + (1|gr(visit, cov = A)), data = epilepsy, data2 = list(A = A)) expect_equivalent(sdata$Lcov_1, t(chol(A))) B <- structure(diag(1:5), dimnames = list(c(1,5,2,4,3), NULL)) sdata <- make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)) expect_equivalent(sdata$Lcov_1, t(chol(B[c(1,3,5,4), c(1,3,5,4)]))) B <- diag(1, 4) expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "Row or column names are required") B <- structure(diag(1, 4), dimnames = list(2:5, NULL)) expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "Levels of .* do not match") B <- A B[1,2] <- 0.5 expect_error(make_standata(count ~ Trt + (1|gr(visit, cov = B)), data = epilepsy, data2 = list(B = B)), "must be symmetric") expect_warning( sdata <- make_standata(count ~ Trt + (1|visit), data = epilepsy, cov_ranef = list(visit = A)), "Argument 'cov_ranef' is deprecated" ) expect_equivalent(sdata$Lcov_1, t(chol(A))) }) test_that("make_standata correctly prepares data for non-linear models", { flist <- list(a ~ x + (1|1|g), b ~ mo(z) + (1|1|g)) dat <- data.frame( y = rnorm(9), x = rnorm(9), z = sample(1:9, 9), g = rep(1:3, 3) ) bform <- bf(y ~ a - b^z, flist = flist, nl = TRUE) sdata <- make_standata(bform, data = dat) expect_equal(names(sdata), c("N", "Y", "C_1", "K_a", "X_a", "Z_1_a_1", "K_b", "X_b", "Ksp_b", "Imo_b", "Xmo_b_1", "Jmo_b", "con_simo_b_1", "Z_1_b_2", "J_1", "N_1", "M_1", "NC_1", "prior_only") ) expect_equal(colnames(sdata$X_a), c("Intercept", "x")) expect_equal(sdata$J_1, as.array(dat$g)) bform <- bf(y ~ x) + nlf(sigma ~ a1 * exp(-x/(a2 + z))) + lf(a1 ~ 1, a2 ~ z + (x|g)) + lf(alpha ~ x) sdata <- make_standata(bform, dat, family = skew_normal()) sdata_names <- c("C_sigma_1", "X_a2", "Z_1_a2_1") expect_true(all(sdata_names %in% names(sdata))) }) test_that("make_standata correctly prepares data for monotonic effects", { data <- data.frame( y = rpois(120, 10), x1 = rep(1:4, 30), z = rnorm(10), x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE) ) sdata <- make_standata(y ~ mo(x1)*mo(x2)*y, data = data) sdata_names <- c("Xmo_1", "Imo", "Jmo", "con_simo_8", "con_simo_5") expect_true(all(sdata_names %in% names(sdata))) expect_equivalent(sdata$Xmo_1, as.array(data$x1 - 1)) expect_equivalent(sdata$Xmo_2, as.array(as.numeric(data$x2) - 1)) expect_equal( as.vector(unname(sdata$Jmo)), rep(c(max(data$x1) - 1, length(unique(data$x2)) - 1), 4) ) expect_equal(sdata$con_simo_1, as.array(rep(1, 3))) prior <- set_prior("dirichlet(1:3)", coef = "mox11", class = "simo", dpar = "sigma") sdata <- make_standata(bf(y ~ 1, sigma ~ mo(x1)), data = data, prior = prior) expect_equal(sdata$con_simo_sigma_1, as.array(1:3)) prior <- c( set_prior("normal(0,1)", class = "b", coef = "mox1"), set_prior("dirichlet(c(1, 0.5, 2))", class = "simo", coef = "mox11"), prior_(~dirichlet(c(1, 0.5, 2)), class = "simo", coef = "mox1:mox21") ) sdata <- make_standata(y ~ mo(x1)*mo(x2), data = data, prior = prior) expect_equal(sdata$con_simo_1, as.array(c(1, 0.5, 2))) expect_equal(sdata$con_simo_3, as.array(c(1, 0.5, 2))) # test issue #924 (conditional monotonicity) prior <- c(prior(dirichlet(c(1, 0.5, 2)), simo, coef = "v"), prior(dirichlet(c(1,3)), simo, coef = "w")) sdata <- make_standata(y ~ y*mo(x1, id = "v")*mo(x2, id = "w"), data, prior = prior) expect_equal(sdata$con_simo_1, as.array(c(1, 0.5, 2))) expect_equal(sdata$con_simo_2, as.array(c(1, 3))) expect_true(!"sdata$con_simo_3" %in% names(sdata)) expect_error( make_standata(y ~ mo(z), data = data), "Monotonic predictors must be integers or ordered factors" ) prior <- c(set_prior("dirichlet(c(1,0.5,2))", class = "simo", coef = "mox21")) expect_error( make_standata(y ~ mo(x2), data = data, prior = prior), "Invalid Dirichlet prior" ) }) test_that("make_standata returns FCOR covariance matrices", { data <- data.frame(y = 1:5) data2 <- list(V = diag(5)) expect_equal(make_standata(y ~ fcor(V), data, data2 = data2)$Mfcor, data2$V, check.attributes = FALSE) expect_warning( expect_error( make_standata(y~1, data, autocor = cor_fixed(diag(2))), "Dimensions of 'M' for FCOR terms must be equal" ), "Using 'cor_brms' objects for 'autocor' is deprecated" ) }) test_that("make_standata returns data for GAMMs", { dat <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10), x3 = rnorm(10), z = rnorm(10), g = factor(rep(1:2, 5))) sdata <- make_standata(y ~ s(x1) + z + s(x2, by = x3), data = dat) expect_equal(sdata$nb_1, 1) expect_equal(as.vector(sdata$knots_2), 8) expect_equal(dim(sdata$Zs_1_1), c(10, 8)) expect_equal(dim(sdata$Zs_2_1), c(10, 8)) bform <- bf(y ~ lp, lp ~ s(x1) + z + s(x2, by = x3), nl = TRUE) sdata <- make_standata(bform, dat) expect_equal(sdata$nb_lp_1, 1) expect_equal(as.vector(sdata$knots_lp_2), 8) expect_equal(dim(sdata$Zs_lp_1_1), c(10, 8)) expect_equal(dim(sdata$Zs_lp_2_1), c(10, 8)) sdata <- make_standata(y ~ g + s(x2, by = g), data = dat) expect_true(all(c("knots_1", "knots_2") %in% names(sdata))) # test issue #562 dat$g <- as.character(dat$g) sdata <- make_standata(y ~ g + s(x2, by = g), data = dat) expect_true(all(c("knots_1", "knots_2") %in% names(sdata))) sdata <- make_standata(y ~ t2(x1, x2), data = dat) expect_equal(sdata$nb_1, 3) expect_equal(as.vector(sdata$knots_1), c(9, 6, 6)) expect_equal(dim(sdata$Zs_1_1), c(10, 9)) expect_equal(dim(sdata$Zs_1_3), c(10, 6)) expect_error(make_standata(y ~ te(x1, x2), data = dat), "smooths 'te' and 'ti' are not yet implemented") }) test_that("make_standata returns correct group ID data", { form <- bf(count ~ Trt + (1+Trt|3|visit) + (1|patient), shape ~ (1|3|visit) + (Trt||patient)) sdata <- make_standata(form, data = epilepsy, family = negbinomial()) expect_true(all(c("Z_1_1", "Z_2_2", "Z_3_shape_1", "Z_2_shape_3") %in% names(sdata))) form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt||patient), a ~ Trt + (1+Trt|3|visit) + (1|patient), nl = TRUE) sdata <- make_standata(form, data = epilepsy, family = student()) expect_true(all(c("Z_1_sigma_1", "Z_2_a_3", "Z_2_sigma_1", "Z_3_a_1") %in% names(sdata))) }) test_that("make_standata handles population-level intercepts", { dat <- data.frame(y = 10:1, x = 1:10) sdata <- make_standata(y ~ 0 + x, data = dat) expect_equal(unname(sdata$X[, 1]), dat$x) sdata <- make_standata(y ~ x, dat, cumulative(), control = list(not4stan = TRUE)) expect_equal(unname(sdata$X[, 1]), dat$x) sdata <- make_standata(y ~ 0 + Intercept + x, data = dat) expect_equal(unname(sdata$X), cbind(1, dat$x)) }) test_that("make_standata handles category specific effects", { sdata <- make_standata(rating ~ period + carry + cse(treat), data = inhaler, family = sratio()) expect_equivalent(sdata$Xcs, matrix(inhaler$treat)) sdata <- make_standata(rating ~ period + carry + cs(treat) + (cs(1)|subject), data = inhaler, family = acat()) expect_equivalent(sdata$Z_1_3, as.array(rep(1, nrow(inhaler)))) sdata <- make_standata(rating ~ period + carry + (cs(treat)|subject), data = inhaler, family = cratio()) expect_equivalent(sdata$Z_1_4, as.array(inhaler$treat)) expect_warning( make_standata(rating ~ 1 + cs(treat), data = inhaler, family = "cumulative"), "Category specific effects for this family should be considered experimental" ) expect_error(make_standata(rating ~ 1 + (treat + cs(1)|subject), data = inhaler, family = "cratio"), "category specific effects in separate group-level terms") }) test_that("make_standata handles wiener diffusion models", { dat <- data.frame(q = 1:10, resp = sample(0:1, 10, TRUE), x = rnorm(10)) dat$dec <- ifelse(dat$resp == 0, "lower", "upper") dat$test <- "a" sdata <- make_standata(q | dec(resp) ~ x, data = dat, family = wiener()) expect_equal(sdata$dec, as.array(dat$resp)) sdata <- make_standata(q | dec(dec) ~ x, data = dat, family = wiener()) expect_equal(sdata$dec, as.array(dat$resp)) expect_error(make_standata(q | dec(test) ~ x, data = dat, family = wiener()), "Decisions should be 'lower' or 'upper'") }) test_that("make_standata handles noise-free terms", { N <- 30 dat <- data.frame( y = rnorm(N), x = rnorm(N), z = rnorm(N), xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)), ID = rep(1:5, each = N / 5) ) sdata <- make_standata( bf(y ~ me(x, xsd)*me(z, zsd)*x, sigma ~ me(x, xsd)), data = dat ) expect_equal(sdata$Xn_1, as.array(dat$x)) expect_equal(sdata$noise_2, as.array(dat$zsd)) expect_equal(unname(sdata$Csp_3), as.array(dat$x)) expect_equal(sdata$Ksp, 6) expect_equal(sdata$NCme_1, 1) }) test_that("make_standata handles noise-free terms with grouping factors", { dat <- data.frame( y = rnorm(10), x1 = rep(1:5, each = 2), sdx = rep(1:5, each = 2), g = rep(c("b", "c", "a", "d", 1), each = 2) ) sdata <- make_standata(y ~ me(x1, sdx, gr = g), dat) expect_equal(unname(sdata$Xn_1), as.array(c(5, 3, 1, 2, 4))) expect_equal(unname(sdata$noise_1), as.array(c(5, 3, 1, 2, 4))) dat$sdx[2] <- 10 expect_error( make_standata(y ~ me(x1, sdx, gr = g), dat), "Measured values and measurement error should be unique" ) }) test_that("make_standata handles missing value terms", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10) miss <- c(1, 3, 9) dat$x[miss] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi() ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_x, as.array(miss)) expect_true(all(is.infinite(sdata$Y_x[miss]))) # dots in variable names are correctly handled #452 dat$x.2 <- dat$x bform <- bf(y ~ mi(x.2)*g) + bf(x.2 | mi() ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_x, as.array(miss)) dat$z <- rbeta(10, 1, 1) dat$z[miss] <- NA bform <- bf(exp(y) ~ mi(z)*g) + bf(z | mi() ~ g, family = Beta()) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jmi_z, as.array(miss)) }) test_that("make_standata handles overimputation", { dat = data.frame(y = rnorm(10), x = rnorm(10), g = 1:10, sdy = 1) miss <- c(1, 3, 9) dat$x[miss] <- dat$sdy[miss] <- NA bform <- bf(y ~ mi(x)*g) + bf(x | mi(sdy) ~ g) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_equal(sdata$Jme_x, as.array(setdiff(1:10, miss))) expect_true(all(is.infinite(sdata$Y_x[miss]))) expect_true(all(is.infinite(sdata$noise_x[miss]))) }) test_that("make_standata handles mi terms with 'subset'", { dat <- data.frame( y = rnorm(10), x = c(rnorm(9), NA), z = rnorm(10), g1 = sample(1:5, 10, TRUE), g2 = 10:1, g3 = 1:10, s = c(FALSE, rep(TRUE, 9)) ) bform <- bf(y ~ mi(x, idx = g1)) + bf(x | mi() + index(g2) + subset(s) ~ 1) + set_rescor(FALSE) sdata <- make_standata(bform, dat) expect_true(all(sdata$idxl_y_x_1 %in% 9:5)) # test a bunch of errors bform <- bf(y ~ mi(x, idx = g1)) + bf(x | mi() + index(g3) + subset(s) ~ 1) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "Could not match all indices in response 'x'" ) bform <- bf(y ~ mi(x, idx = g1)) + bf(x | mi() + subset(s) ~ 1) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "Response 'x' needs to have an 'index' addition term" ) bform <- bf(y ~ mi(x)) + bf(x | mi() + subset(s) + index(g2) ~ 1) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "mi() terms of subsetted variables require the 'idx' argument", fixed = TRUE ) bform <- bf(y | mi() ~ mi(x, idx = g1)) + bf(x | mi() + subset(s) + index(g2) ~ mi(y)) + set_rescor(FALSE) expect_error(make_standata(bform, dat), "mi() terms in subsetted formulas require the 'idx' argument", fixed = TRUE ) }) test_that("make_standata handles multi-membership models", { dat <- data.frame(y = rnorm(10), g1 = c(7:2, rep(10, 4)), g2 = 1:10, w1 = rep(1, 10), w2 = rep(abs(rnorm(10)))) sdata <- make_standata(y ~ (1|mm(g1,g2,g1,g2)), data = dat) expect_true(all(paste0(c("W_1_", "J_1_"), 1:4) %in% names(sdata))) expect_equal(sdata$W_1_4, as.array(rep(0.25, 10))) expect_equal(unname(sdata$Z_1_1_1), as.array(rep(1, 10))) expect_equal(unname(sdata$Z_1_1_2), as.array(rep(1, 10))) # this checks whether combintation of factor levels works as intended expect_equal(sdata$J_1_1, as.array(c(6, 5, 4, 3, 2, 1, 7, 7, 7, 7))) expect_equal(sdata$J_1_2, as.array(c(8, 1, 2, 3, 4, 5, 6, 9, 10, 7))) sdata <- make_standata(y ~ (1|mm(g1,g2, weights = cbind(w1, w2))), dat) expect_equal(sdata$W_1_1, as.array(dat$w1 / (dat$w1 + dat$w2))) # tests mmc terms sdata <- make_standata(y ~ (1+mmc(w1, w2)|mm(g1,g2)), data = dat) expect_equal(unname(sdata$Z_1_2_1), as.array(dat$w1)) expect_equal(unname(sdata$Z_1_2_2), as.array(dat$w2)) expect_error( make_standata(y ~ (mmc(w1, w2, y)|mm(g1,g2)), data = dat), "Invalid term 'mmc(w1, w2, y)':", fixed = TRUE ) expect_error( make_standata(y ~ (mmc(w1, w2)*y|mm(g1,g2)), data = dat), "The term 'mmc(w1, w2):y' is invalid", fixed = TRUE ) # tests if ":" works in multi-membership models sdata <- make_standata(y ~ (1|mm(w1:g1,w1:g2)), dat) expect_true(all(c("J_1_1", "J_1_2") %in% names(sdata))) }) test_that("by variables in grouping terms are handled correctly", { gvar <- c("1A", "1B", "2A", "2B", "3A", "3B", "10", "100", "2", "3") gvar <- rep(gvar, each = 10) g_order <- order(gvar) byvar <- c(0, 4.5, 3, 2, "x 1") byvar <- factor(rep(byvar, each = 20)) dat <- data.frame( y = rnorm(100), x = rnorm(100), g = gvar, g2 = gvar[g_order], z = byvar, z2 = byvar[g_order], z3 = factor(1:2) ) sdata <- make_standata(y ~ x + (x | gr(g, by = z)), dat) expect_equal(sdata$Nby_1, 5) expect_equal(sdata$Jby_1, as.array(c(2, 2, 1, 1, 5, 4, 4, 5, 3, 3))) sdata <- make_standata(y ~ x + (x | mm(g, g2, by = cbind(z, z2))), dat) expect_equal(sdata$Nby_1, 5) expect_equal(sdata$Jby_1, as.array(c(2, 2, 1, 1, 5, 4, 4, 5, 3, 3))) expect_error(make_standata(y ~ x + (1|gr(g, by = z3)), dat), "Some levels of 'g' correspond to multiple levels of 'z3'") }) test_that("make_standata handles calls to the 'poly' function", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) expect_equal(colnames(make_standata(y ~ 1 + poly(x, 3), dat)$X), c("Intercept", "polyx31", "polyx32", "polyx33")) }) test_that("make_standata allows fixed distributional parameters", { dat <- list(y = 1:10) expect_equal(make_standata(bf(y ~ 1, nu = 3), dat, student())$nu, 3) expect_equal(make_standata(y ~ 1, dat, acat())$disc, 1) expect_error(make_standata(bf(y ~ 1, bias = 0.5), dat), "Invalid fixed parameters: 'bias'") }) test_that("Cell-mean coding can be disabled", { df <- data.frame(y = 1:10, g = rep(c("a", "b"), 5)) bform <- bf(y ~ g) + lf(disc ~ 0 + g + (0 + g | y), cmc = FALSE) + cumulative() sdata <- make_standata(bform, df) target <- matrix(rep(0:1, 5), dimnames = list(1:10, "gb")) expect_equal(sdata$X_disc, target) expect_equal(unname(sdata$Z_1_disc_1), as.array(rep(0:1, 5))) expect_true(!"Z_1_disc_2" %in% names(sdata)) bform <- bf(y ~ 0 + g + (1 | y), cmc = FALSE) sdata <- make_standata(bform, df) expect_equal(sdata$X, target) expect_equal(unname(sdata$Z_1_1), as.array(rep(1, 10))) }) test_that("make_standata correctly includes offsets", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) sdata <- make_standata(bf(y ~ x + offset(c), sigma ~ offset(c + 1)), data) expect_equal(sdata$offsets, as.array(data$c)) expect_equal(sdata$offsets_sigma, as.array(data$c + 1)) sdata <- make_standata(y ~ x + offset(c) + offset(x), data) expect_equal(sdata$offsets, as.array(data$c + data$x)) }) test_that("make_standata includes data for mixture models", { data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1) form <- bf(y ~ x, mu1 ~ 1, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data) expect_equal(sdata$con_theta, as.array(c(1, 1))) expect_equal(dim(sdata$X_mu1), c(10, 1)) expect_equal(dim(sdata$X_mu2), c(10, 2)) form <- bf(y ~ x, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data, prior = prior(dirichlet(10, 2), theta)) expect_equal(sdata$con_theta, as.array(c(10, 2))) form <- bf(y ~ x, theta1 = 1, theta2 = 3, family = mixture(gaussian, gaussian)) sdata <- make_standata(form, data) expect_equal(sdata$theta1, 1/4) expect_equal(sdata$theta2, 3/4) }) test_that("make_standata includes data for Gaussian processes", { dat <- data.frame(y = rnorm(10), x1 = rnorm(10), z = factor(c(2, 2, 2, 3, 4, rep(5, 5)))) sdata <- make_standata(y ~ gp(x1), dat) expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 1) sdata <- make_standata(y ~ gp(x1, scale = FALSE), dat) expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), max(dat$x1) - min(dat$x1)) sdata <- SW(make_standata(y ~ gp(x1, by = z, gr = TRUE, scale = FALSE), dat)) expect_equal(sdata$Igp_1_2, as.array(4)) expect_equal(sdata$Jgp_1_4, as.array(1:5)) expect_equal(sdata$Igp_1_4, as.array(6:10)) sdata <- SW(make_standata(y ~ gp(x1, by = y, gr = TRUE), dat)) expect_equal(sdata$Cgp_1, as.array(dat$y)) }) test_that("make_standata includes data for approximate Gaussian processes", { dat <- data.frame(y = rnorm(10), x1 = sample(1:10, 10), z = factor(c(2, 2, 2, 3, 4, rep(5, 5)))) sdata <- make_standata(y ~ gp(x1, k = 5, c = 5/4), dat) expect_equal(sdata$NBgp_1, 5) expect_equal(dim(sdata$Xgp_1), c(10, 5)) expect_equal(dim(sdata$slambda_1), c(5, 1)) sdata <- SW(make_standata(y ~ gp(x1, by = z, k = 5, c = 5/4, scale = FALSE), dat)) expect_equal(sdata$Igp_1_2, as.array(4)) expect_equal(sdata$Cgp_1_2, as.array(1)) expect_equal(sdata$Igp_1_4, as.array(6:10)) }) test_that("make_standata includes data for SAR models", { dat <- data.frame(y = rnorm(10), x = rnorm(10)) W <- matrix(0, nrow = 10, ncol = 10) dat2 <- list(W = W) sdata <- make_standata(y ~ x + sar(W), data = dat, data2 = dat2) expect_equal(dim(sdata$M), rep(nrow(W), 2)) dat2 <- list(W = matrix(0, 2, 2)) expect_error( make_standata(y ~ x + sar(W), data = dat, data2 = dat2), "Dimensions of 'M' for SAR terms must be equal" ) }) test_that("make_standata includes data for CAR models", { dat = data.frame(y = rnorm(10), x = rnorm(10), obs = 1:10) edges <- cbind(1:10, 10:1) W <- matrix(0, nrow = 10, ncol = 10) for (i in seq_len(nrow(edges))) { W[edges[i, 1], edges[i, 2]] <- 1 } rownames(W) <- 1:nrow(W) dat2 <- list(W = W) sdata <- make_standata(y ~ x + car(W, gr = obs), dat, data2 = dat2) expect_equal(sdata$Nloc, 10) expect_equal(unname(sdata$Nneigh), rep(1, 10)) expect_equal(unname(sdata$edges1), as.array(10:6)) expect_equal(unname(sdata$edges2), as.array(1:5)) sdata_old <- SW(make_standata(y ~ x, dat, autocor = cor_car(W))) expect_equal(sdata, sdata_old) rownames(dat2$W) <- c("a", 2:9, "b") dat$group <- rep(c("a", "b"), each = 5) sdata <- make_standata(y ~ x + car(W, gr = group), dat, data2 = dat2) expect_equal(sdata$Nloc, 2) expect_equal(sdata$edges1, as.array(2)) expect_equal(sdata$edges2, as.array(1)) sdata <- make_standata(y ~ x + car(W, group, type = "bym2"), data = dat, data2 = dat2) expect_equal(length(sdata$car_scale), 1L) dat2$W[1, 10] <- 4 dat2$W[10, 1] <- 4 expect_message(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Converting all non-zero values in 'M' to 1") # test error messages rownames(dat2$W) <- c(1:9, "a") expect_error(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Row names of 'M' for CAR terms do not match") rownames(dat2$W) <- NULL expect_error(make_standata(y ~ car(W, gr = group), dat, data2 = dat2), "Row names are required for 'M'") dat2$W[1, 10] <- 0 expect_error(make_standata(y ~ car(W), dat, data2 = dat2), "'M' for CAR terms must be symmetric") dat2$W[10, 1] <- 0 expect_error(SW(make_standata(y ~ x + car(W), dat, data2 = dat2)), "all locations should have at least one neighbor") }) test_that("make_standata includes data of special priors", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10)) # horseshoe prior hs <- horseshoe(7, scale_global = 2, df_global = 3, df_slab = 6, scale_slab = 3) sdata <- make_standata(y ~ x1*x2, data = dat, prior = set_prior(hs)) expect_equal(sdata$hs_df, 7) expect_equal(sdata$hs_df_global, 3) expect_equal(sdata$hs_df_slab, 6) expect_equal(sdata$hs_scale_global, 2) expect_equal(sdata$hs_scale_slab, 3) hs <- horseshoe(par_ratio = 0.1) sdata <- make_standata(y ~ x1*x2, data = dat, prior = set_prior(hs)) expect_equal(sdata$hs_scale_global, 0.1 / sqrt(nrow(dat))) # R2D2 prior sdata <- make_standata(y ~ x1*x2, data = dat, prior = prior(R2D2(0.5, 10))) expect_equal(sdata$R2D2_mean_R2, 0.5) expect_equal(sdata$R2D2_prec_R2, 10) expect_equal(sdata$R2D2_cons_D2, as.array(rep(1, 3))) # lasso prior sdata <- make_standata(y ~ x1*x2, data = dat, prior = prior(lasso(2, scale = 10))) expect_equal(sdata$lasso_df, 2) expect_equal(sdata$lasso_scale, 10) # horseshoe and lasso prior applied in a non-linear model hs_a1 <- horseshoe(7, scale_global = 2, df_global = 3) lasso_a2 <- lasso(2, scale = 10) sdata <- make_standata( bf(y ~ a1 + a2, a1 ~ x1, a2 ~ 0 + x2, nl = TRUE), data = dat, sample_prior = TRUE, prior = c(set_prior(hs_a1, nlpar = "a1"), set_prior(lasso_a2, nlpar = "a2")) ) expect_equal(sdata$hs_df_a1, 7) expect_equal(sdata$lasso_df_a2, 2) }) test_that("dots in formula are correctly expanded", { dat <- data.frame(y = 1:10, x1 = 1:10, x2 = 1:10) sdata <- make_standata(y ~ ., dat) expect_equal(colnames(sdata$X), c("Intercept", "x1", "x2")) }) test_that("argument 'stanvars' is handled correctly", { bprior <- prior(normal(mean_intercept, 10), class = "Intercept") mean_intercept <- 5 stanvars <- stanvar(mean_intercept) sdata <- make_standata(count ~ Trt, data = epilepsy, prior = bprior, stanvars = stanvars) expect_equal(sdata$mean_intercept, 5) # define a multi_normal prior with known covariance matrix bprior <- prior(multi_normal(M, V), class = "b") stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + stanvar(diag(2), "V", scode = " matrix[K, K] V;") sdata <- make_standata(count ~ Trt + zBase, epilepsy, prior = bprior, stanvars = stanvars) expect_equal(sdata$M, rep(0, 2)) expect_equal(sdata$V, diag(2)) }) test_that("addition arguments 'vint' and 'vreal' work correctly", { dat <- data.frame(size = 10, y = sample(0:10, 20, TRUE), x = rnorm(20)) beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = c("vint1[n]", "vreal1[n]") ) sdata <- make_standata( y | vint(size) + vreal(x, size) ~ 1, data = dat, family = beta_binomial2, ) expect_equal(sdata$vint1, as.array(rep(10, 20))) expect_equal(sdata$vreal1, as.array(dat$x)) expect_equal(sdata$vreal2, as.array(rep(10, 20))) }) test_that("reserved variables 'Intercept' is handled correctly", { dat <- data.frame(y = 1:10) expect_warning( sdata <- make_standata(y ~ 0 + intercept, dat), "Reserved variable name 'intercept' is deprecated." ) expect_true(all(sdata$X[, "intercept"] == 1)) sdata <- make_standata(y ~ 0 + Intercept, dat) expect_true(all(sdata$X[, "Intercept"] == 1)) }) test_that("data for multinomial and dirichlet models is correct", { N <- 15 dat <- as.data.frame(rdirichlet(N, c(3, 2, 1))) names(dat) <- c("y1", "y2", "y3") dat$t1 <- round(dat$y1 * rpois(N, 10)) dat$t2 <- round(dat$y2 * rpois(N, 10)) dat$t3 <- round(dat$y3 * rpois(N, 10)) dat$x <- rnorm(N) dat$y <- with(dat, cbind(y1, y2, y3)) dat$t <- with(dat, cbind(t1, t2, t3)) dat$size <- rowSums(dat$t) sdata <- make_standata(t | trials(size) ~ x, dat, multinomial()) expect_equal(sdata$trials, as.array(dat$size)) expect_equal(sdata$ncat, 3) expect_equal(sdata$Y, unname(dat$t)) sdata <- make_standata(y ~ x, data = dat, family = dirichlet()) expect_equal(sdata$ncat, 3) expect_equal(sdata$Y, unname(dat$y)) expect_error( make_standata(t | trials(10) ~ x, data = dat, family = multinomial()), "Number of trials does not match the number of events" ) expect_error(make_standata(t ~ x, data = dat, family = dirichlet()), "Response values in dirichlet models must sum to 1") }) test_that("make_standata handles cox models correctly", { data <- data.frame(y = rexp(100), x = rnorm(100)) bform <- bf(y ~ x) bprior <- prior(dirichlet(3), sbhaz) sdata <- make_standata(bform, data, brmsfamily("cox"), prior = bprior) expect_equal(dim(sdata$Zbhaz), c(100, 5)) expect_equal(dim(sdata$Zcbhaz), c(100, 5)) expect_equal(sdata$con_sbhaz, as.array(rep(3, 5))) sdata <- make_standata(bform, data, brmsfamily("cox", bhaz = list(df = 6))) expect_equal(dim(sdata$Zbhaz), c(100, 6)) expect_equal(dim(sdata$Zcbhaz), c(100, 6)) }) test_that("make_standata handles addition term 'rate' is correctly", { data <- data.frame(y = rpois(10, 1), x = rnorm(10), time = 1:10) sdata <- make_standata(y | rate(time) ~ x, data, poisson()) expect_equal(sdata$denom, as.array(data$time)) }) test_that("make_standata handles grouped ordinal thresholds correctly", { dat <- data.frame( y = c(1:5, 1:4, 4), gr = rep(c("a", "b"), each = 5), th = rep(5:6, each = 5), x = rnorm(10) ) # thresholds without a grouping factor sdata <- make_standata(y ~ x, dat, cumulative()) expect_equal(sdata$nthres, 4) sdata <- make_standata(y | thres(5) ~ x, dat, cumulative()) expect_equal(sdata$nthres, 5) expect_error( make_standata(y | thres(th) ~ x, dat, cumulative()), "Number of thresholds needs to be a single value" ) # thresholds with a grouping factor sdata <- make_standata(y | thres(th, gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(5, 6))) expect_equal(sdata$ngrthres, 2) expect_equal(unname(sdata$Jthres[1, ]), c(1, 5)) expect_equal(unname(sdata$Jthres[10, ]), c(6, 11)) sdata <- make_standata(y | thres(gr = gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(4, 3))) expect_equal(sdata$ngrthres, 2) sdata <- make_standata(y | thres(6, gr = gr) ~ x, dat, cumulative()) expect_equal(sdata$nthres, as.array(c(6, 6))) expect_equal(sdata$ngrthres, 2) }) test_that("information for threading is handled correctly", { dat <- data.frame(y = 1:10) sdata <- make_standata(y ~ 1, dat, threads = threading(2, grainsize = 3)) expect_equal(sdata$grainsize, 3) }) test_that("variables in data2 can be used in population-level effects", { dat <- data.frame(y = 1:10, x1 = rnorm(10), x2 = rnorm(10), x3 = rnorm(10)) foo <- function(..., idx = NULL) { out <- cbind(...) if (!is.null(idx)) { out <- out[, idx, drop = FALSE] } out } sdata <- make_standata(y ~ foo(x1, x2, x3, idx = id), data = dat, data2 = list(id = c(3, 1))) target <- c("Intercept", "foox1x2x3idxEQidx3", "foox1x2x3idxEQidx1") expect_equal(colnames(sdata$X), target) expect_equivalent(sdata$X[, 2], dat$x3) expect_equivalent(sdata$X[, 3], dat$x1) }) test_that("NAs are allowed in unused interval censoring variables", { dat <- data.frame(y = rnorm(10), ce = c(1, rep(2, 9))) dat$y2 <- dat$y + 2 dat$y2[1] <- NA sdata <- make_standata(y | cens(ce, y2 = y2) ~ 1, data = dat) expect_equal(sdata$N, 10L) expect_equal(sdata$rcens[1], 0) dat$ce[1] <- 2 expect_error( make_standata(y | cens(ce, y2 = y2) ~ 1, data = dat), "'y2' should not be NA for interval censored observations" ) }) brms/tests/testthat/tests.brmsfit-methods.R0000644000175000017500000010523114111751667020757 0ustar nileshnileshcontext("Tests for brmsfit methods") # to reduce testing time on CRAN substantially skip_on_cran() expect_range <- function(object, lower = -Inf, upper = Inf, ...) { testthat::expect_true(all(object >= lower & object <= upper), ...) } expect_ggplot <- function(object, ...) { testthat::expect_true(is(object, "ggplot"), ...) } SM <- suppressMessages SW <- suppressWarnings fit1 <- rename_pars(brms:::brmsfit_example1) fit2 <- rename_pars(brms:::brmsfit_example2) fit3 <- rename_pars(brms:::brmsfit_example3) fit4 <- rename_pars(brms:::brmsfit_example4) fit5 <- rename_pars(brms:::brmsfit_example5) fit6 <- rename_pars(brms:::brmsfit_example6) # test S3 methods in alphabetical order test_that("as_draws and friends have resonable outputs", { draws <- as_draws(fit1, variable = "b_Intercept") expect_s3_class(draws, "draws_list") expect_equal(variables(draws), "b_Intercept") expect_equal(ndraws(draws), ndraws(fit1)) draws <- SM(as_draws_matrix(fit1)) expect_s3_class(draws, "draws_matrix") expect_equal(ndraws(draws), ndraws(fit1)) draws <- as_draws_array(fit2) expect_s3_class(draws, "draws_array") expect_equal(niterations(draws), ndraws(fit2)) draws <- as_draws_df(fit2, variable = "^b_", regex = TRUE) expect_s3_class(draws, "draws_df") expect_true(all(grepl("^b_", variables(draws)))) draws <- as_draws_list(fit2) expect_s3_class(draws, "draws_list") expect_equal(nchains(draws), nchains(fit2)) draws <- as_draws_rvars(fit3) expect_s3_class(draws, "draws_rvars") expect_equal(ndraws(draws), ndraws(fit3)) expect_true(length(variables(draws)) > 0) }) test_that("as.data.frame has reasonable ouputs", { draws <- as.data.frame(fit1) expect_true(is(draws, "data.frame")) expect_equal(dim(draws), c(ndraws(fit1), length(variables(fit1)))) # deprecated 'pars' argument still works expect_warning( draws <- as.data.frame(fit1, pars = "^b_"), "'pars' is deprecated" ) expect_s3_class(draws, "data.frame") expect_true(ncol(draws) > 0) # deprecated 'subset' argument still works expect_warning( draws <- as.data.frame(fit1, subset = 10:20), "'subset' is deprecated" ) expect_s3_class(draws, "data.frame") expect_equal(nrow(draws), 11) }) test_that("as.matrix has reasonable ouputs", { draws <- as.matrix(fit1, iteration = 1:10) expect_true(is(draws, "matrix")) expect_equal(dim(draws), c(10, length(variables(fit1)))) }) test_that("as.array has reasonable ouputs", { draws <- as.array(fit1) expect_true(is.array(draws)) chains <- fit1$fit@sim$chains ps_dim <- c(niterations(fit1), chains, length(variables(fit1))) expect_equal(dim(draws), ps_dim) draws <- as.array(fit1, chain = 1) expect_true(is.array(draws)) ps_dim <- c(niterations(fit1), 1, length(variables(fit1))) expect_equal(dim(draws), ps_dim) }) test_that("as.mcmc has reasonable ouputs", { chains <- fit1$fit@sim$chains mc <- SW(as.mcmc(fit1)) expect_equal(length(mc), chains) expect_equal(dim(mc[[1]]), c(ndraws(fit1) / chains, length(variables(fit1)))) mc <- SW(as.mcmc(fit1, combine_chains = TRUE)) expect_equal(dim(mc), c(ndraws(fit1), length(variables(fit1)))) # test assumes thin = 1 expect_equal(dim(SW(as.mcmc(fit1, inc_warmup = TRUE)[[1]])), c(fit1$fit@sim$iter, length(variables(fit1)))) }) test_that("autocor has reasonable ouputs", { expect_true(is.null(SW(autocor(fit1)))) expect_true(is.null(SW(autocor(fit6, resp = "count")))) }) test_that("bayes_R2 has reasonable ouputs", { fit1 <- add_criterion(fit1, "bayes_R2") R2 <- bayes_R2(fit1, summary = FALSE) expect_equal(dim(R2), c(ndraws(fit1), 1)) R2 <- bayes_R2(fit2, newdata = model.frame(fit2)[1:5, ], re_formula = NA) expect_equal(dim(R2), c(1, 4)) R2 <- bayes_R2(fit6) expect_equal(dim(R2), c(2, 4)) }) test_that("bayes_factor has reasonable ouputs", { # don't test for now as it requires calling Stan's C++ code }) test_that("bridge_sampler has reasonable ouputs", { # don't test for now as it requires calling Stan's C++ code }) test_that("coef has reasonable ouputs", { coef1 <- SM(coef(fit1)) expect_equal(dim(coef1$visit), c(4, 4, 9)) coef1 <- SM(coef(fit1, summary = FALSE)) expect_equal(dim(coef1$visit), c(ndraws(fit1), 4, 9)) coef2 <- SM(coef(fit2)) expect_equal(dim(coef2$patient), c(59, 4, 4)) coef4 <- SM(coef(fit4)) expect_equal(dim(coef4$subject), c(10, 4, 8)) }) test_that("combine_models has reasonable ouputs", { expect_equal(ndraws(combine_models(fit1, fit1)), ndraws(fit1) * 2) }) test_that("conditional_effects has reasonable ouputs", { me <- conditional_effects(fit1, resp = "count") expect_equal(nrow(me[[2]]), 100) meplot <- plot(me, points = TRUE, rug = TRUE, ask = FALSE, plot = FALSE) expect_ggplot(meplot[[1]]) me <- conditional_effects(fit1, "Trt", select_points = 0.1) expect_lt(nrow(attr(me[[1]], "points")), nobs(fit1)) me <- conditional_effects(fit1, "volume:Age", surface = TRUE, resolution = 15, too_far = 0.2) meplot <- plot(me, plot = FALSE) expect_ggplot(meplot[[1]]) meplot <- plot(me, stype = "raster", plot = FALSE) expect_ggplot(meplot[[1]]) me <- conditional_effects(fit1, "Age", spaghetti = TRUE, ndraws = 10) expect_equal(nrow(attr(me$Age, "spaghetti")), 1000) meplot <- plot(me, plot = FALSE) expect_ggplot(meplot[[1]]) expect_error( conditional_effects(fit1, "Age", spaghetti = TRUE, surface = TRUE), "Cannot use 'spaghetti' and 'surface' at the same time" ) me <- conditional_effects(fit1, effects = "Age:visit", re_formula = NULL) exp_nrow <- 100 * length(unique(fit1$data$visit)) expect_equal(nrow(me[[1]]), exp_nrow) mdata = data.frame( Age = c(-0.3, 0, 0.3), count = c(10, 20, 30), Exp = c(1, 3, 5) ) exp_nrow <- nrow(mdata) * 100 me <- conditional_effects(fit1, effects = "Age", conditions = mdata) expect_equal(nrow(me[[1]]), exp_nrow) mdata$visit <- 1:3 me <- conditional_effects(fit1, re_formula = NULL, conditions = mdata) expect_equal(nrow(me$Age), exp_nrow) me <- conditional_effects( fit1, "Age:Trt", int_conditions = list(Age = rnorm(5)) ) expect_equal(nrow(me[[1]]), 10) me <- conditional_effects( fit1, "Age:Trt", int_conditions = list(Age = quantile) ) expect_equal(nrow(me[[1]]), 10) expect_error(conditional_effects(fit1, effects = "Trtc"), "All specified effects are invalid for this model") expect_warning(conditional_effects(fit1, effects = c("Trtc", "Trt")), "Some specified effects are invalid for this model") expect_error(conditional_effects(fit1, effects = "Trtc:a:b"), "please use the 'conditions' argument") mdata$visit <- NULL mdata$Exp <- NULL mdata$patient <- 1 expect_equal(nrow(conditional_effects(fit2)[[2]]), 100) me <- conditional_effects(fit2, re_formula = NULL, conditions = mdata) expect_equal(nrow(me$Age), exp_nrow) expect_warning( me4 <- conditional_effects(fit4), "Predictions are treated as continuous variables" ) expect_true(is(me4, "brms_conditional_effects")) me4 <- conditional_effects(fit4, "x2", categorical = TRUE) expect_true(is(me4, "brms_conditional_effects")) me5 <- conditional_effects(fit5) expect_true(is(me5, "brms_conditional_effects")) me6 <- conditional_effects(fit6, ndraws = 40) expect_true(is(me6, "brms_conditional_effects")) }) test_that("plot of conditional_effects has reasonable outputs", { SW(ggplot2::theme_set(theme_black())) N <- 90 marg_results <- data.frame( effect1__ = rpois(N, 20), effect2__ = factor(rep(1:3, each = N / 3)), estimate__ = rnorm(N, sd = 5), se__ = rt(N, df = 10), cond__ = rep(1:2, each = N / 2), cats__ = factor(rep(1:3, each = N / 3)) ) marg_results[["lower__"]] <- marg_results$estimate__ - 2 marg_results[["upper__"]] <- marg_results$estimate__ + 2 marg_results <- list(marg_results[order(marg_results$effect1__), ]) class(marg_results) <- "brms_conditional_effects" attr(marg_results[[1]], "response") <- "count" # test with 1 numeric predictor attr(marg_results[[1]], "effects") <- "P1" marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test with 1 categorical predictor attr(marg_results[[1]], "effects") <- "P2" marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test with 1 numeric and 1 categorical predictor attr(marg_results[[1]], "effects") <- c("P1", "P2") marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) # test ordinal raster plot attr(marg_results[[1]], "effects") <- c("P1", "cats__") attr(marg_results[[1]], "ordinal") <- TRUE marg_plot <- plot(marg_results, plot = FALSE) expect_ggplot(marg_plot[[1]]) }) test_that("conditional_smooths has reasonable ouputs", { ms <- conditional_smooths(fit1) expect_equal(nrow(ms[[1]]), 100) expect_true(is(ms, "brms_conditional_effects")) ms <- conditional_smooths(fit1, spaghetti = TRUE, ndraws = 10) expect_equal(nrow(attr(ms[[1]], "spaghetti")), 1000) expect_error(conditional_smooths(fit1, smooths = "s3"), "No valid smooth terms found in the model") expect_error(conditional_smooths(fit2), "No valid smooth terms found in the model") }) test_that("family has reasonable ouputs", { expect_is(family(fit1), "brmsfamily") expect_is(family(fit6, resp = "count"), "brmsfamily") expect_output(print(family(fit1), links = TRUE), "student.*log.*logm1") expect_output(print(family(fit5)), "Mixture.*gaussian.*exponential") }) test_that("fitted has reasonable outputs", { skip_on_cran() fi <- fitted(fit1) expect_equal(dim(fi), c(nobs(fit1), 4)) expect_equal(colnames(fi), c("Estimate", "Est.Error", "Q2.5", "Q97.5")) newdata <- data.frame( Age = c(0, -0.2), visit = c(1, 4), Trt = c(0, 1), count = c(20, 13), patient = c(1, 42), Exp = c(2, 4), volume = 0 ) fi <- fitted(fit1, newdata = newdata) expect_equal(dim(fi), c(2, 4)) newdata$visit <- c(1, 6) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(fi), c(2, 4)) # fitted values with new_levels newdata <- data.frame( Age = 0, visit = paste0("a", 1:100), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE, sample_new_levels = "old_levels", ndraws = 10) expect_equal(dim(fi), c(100, 4)) fi <- fitted(fit1, newdata = newdata, allow_new_levels = TRUE, sample_new_levels = "gaussian", ndraws = 1) expect_equal(dim(fi), c(100, 4)) # fitted values of auxiliary parameters newdata <- data.frame( Age = 0, visit = c("a", "b"), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) fi <- fitted(fit1, dpar = "sigma") expect_equal(dim(fi), c(nobs(fit1), 4)) expect_true(all(fi > 0)) fi_lin <- fitted(fit1, dpar = "sigma", scale = "linear") expect_equal(dim(fi_lin), c(nobs(fit1), 4)) expect_true(!isTRUE(all.equal(fi, fi_lin))) expect_error(fitted(fit1, dpar = "inv"), "Invalid argument 'dpar'") fi <- fitted(fit2) expect_equal(dim(fi), c(nobs(fit2), 4)) fi <- fitted(fit2, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(fi), c(2, 4)) fi <- fitted(fit2, dpar = "shape") expect_equal(dim(fi), c(nobs(fit2), 4)) expect_equal(fi[1, ], fi[2, ]) fi <- fitted(fit2, nlpar = "a") expect_equal(dim(fi), c(nobs(fit2), 4)) fi <- fitted(fit3, newdata = fit3$data[1:10, ]) expect_equal(dim(fi), c(10, 4)) fi <- fitted(fit4) expect_equal(dim(fi), c(nobs(fit4), 4, 4)) fi <- fitted(fit4, newdata = fit4$data[1, ]) expect_equal(dim(fi), c(1, 4, 4)) fi <- fitted(fit4, newdata = fit4$data[1, ], scale = "linear") expect_equal(dim(fi), c(1, 4, 3)) fi <- fitted(fit5) expect_equal(dim(fi), c(nobs(fit5), 4)) fi <- fitted(fit6) expect_equal(dim(fi), c(nobs(fit6), 4, 2)) expect_equal(dimnames(fi)[[3]], c("volume", "count")) }) test_that("fixef has reasonable ouputs", { fixef1 <- SM(fixef(fit1)) expect_equal(rownames(fixef1), c("Intercept", "sigma_Intercept", "Trt1", "Age", "volume", "Trt1:Age", "sigma_Trt1", "sAge_1", "moExp") ) fixef1 <- SM(fixef(fit1, pars = c("Age", "sAge_1"))) expect_equal(rownames(fixef1), c("Age", "sAge_1")) }) test_that("formula has reasonable ouputs", { expect_true(is.brmsformula(formula(fit1))) }) test_that("hypothesis has reasonable ouputs", { hyp <- hypothesis(fit1, c("Age > Trt1", "Trt1:Age = -1")) expect_equal(dim(hyp$hypothesis), c(2, 8)) expect_output(print(hyp), "(Age)-(Trt1) > 0", fixed = TRUE) expect_ggplot(plot(hyp, plot = FALSE)[[1]]) hyp <- hypothesis(fit1, "Intercept = 0", class = "sd", group = "visit") expect_true(is.numeric(hyp$hypothesis$Evid.Ratio[1])) expect_output(print(hyp), "class sd_visit:", fixed = TRUE) expect_ggplot(plot(hyp, ignore_prior = TRUE, plot = FALSE)[[1]]) hyp <- hypothesis(fit1, "0 > r_visit[4,Intercept]", class = "", alpha = 0.01) expect_equal(dim(hyp$hypothesis), c(1, 8)) expect_output(print(hyp, chars = NULL), "r_visit[4,Intercept]", fixed = TRUE) expect_output(print(hyp), "99%-CI", fixed = TRUE) hyp <- hypothesis( fit1, c("Intercept = 0", "Intercept + exp(Trt1) = 0"), group = "visit", scope = "coef" ) expect_equal(dim(hyp$hypothesis), c(8, 9)) expect_equal(hyp$hypothesis$Group[1], factor(1, levels = 1:4)) expect_error(hypothesis(fit1, "Intercept > x"), fixed = TRUE, "cannot be found in the model: \n'b_x'") expect_error(hypothesis(fit1, 1), "Argument 'hypothesis' must be a character vector") expect_error(hypothesis(fit2, "b_Age = 0", alpha = 2), "Argument 'alpha' must be a single value in [0,1]", fixed = TRUE) expect_error(hypothesis(fit3, "b_Age x 0"), "Every hypothesis must be of the form 'left (= OR < OR >) right'", fixed = TRUE) # test hypothesis.default method hyp <- hypothesis(as.data.frame(fit3), "bsp_meAgeAgeSD > sigma") expect_equal(dim(hyp$hypothesis), c(1, 8)) hyp <- hypothesis(fit3$fit, "bsp_meAgeAgeSD > sigma") expect_equal(dim(hyp$hypothesis), c(1, 8)) }) test_that("launch_shinystan has reasonable ouputs", { # requires running shiny which is not reasonable in automated tests }) test_that("log_lik has reasonable ouputs", { expect_equal(dim(log_lik(fit1)), c(ndraws(fit1), nobs(fit1))) expect_equal(dim(logLik(fit1)), c(ndraws(fit1), nobs(fit1))) expect_equal(dim(log_lik(fit2)), c(ndraws(fit2), nobs(fit2))) }) test_that("loo has reasonable outputs", { skip_on_cran() loo1 <- SW(LOO(fit1, cores = 1)) expect_true(is.numeric(loo1$estimates)) expect_output(print(loo1), "looic") loo_compare1 <- SW(loo(fit1, fit1, cores = 1)) expect_equal(names(loo_compare1$loos), c("fit1", "fit1")) expect_equal(dim(loo_compare1$ic_diffs__), c(1, 2)) expect_output(print(loo_compare1), "'fit1':") expect_is(loo_compare1$diffs, "compare.loo") loo2 <- SW(loo(fit2, cores = 1)) expect_true(is.numeric(loo2$estimates)) loo3 <- SW(loo(fit3, cores = 1)) expect_true(is.numeric(loo3$estimates)) loo3 <- SW(loo(fit3, pointwise = TRUE, cores = 1)) expect_true(is.numeric(loo3$estimates)) loo4 <- SW(loo(fit4, cores = 1)) expect_true(is.numeric(loo4$estimates)) # fails because of too small effective sample size # loo5 <- SW(loo(fit5, cores = 1)) # expect_true(is.numeric(loo5$estimates)) loo6_1 <- SW(loo(fit6, cores = 1)) expect_true(is.numeric(loo6_1$estimates)) loo6_2 <- SW(loo(fit6, cores = 1, newdata = fit6$data)) expect_true(is.numeric(loo6_2$estimates)) loo_compare <- loo_compare(loo6_1, loo6_2) expect_range(loo_compare[2, 1], -1, 1) }) test_that("loo_subsample has reasonable outputs", { skip_on_cran() loo2 <- SW(loo_subsample(fit2, observations = 50)) expect_true(is.numeric(loo2$estimates)) expect_equal(nrow(loo2$pointwise), 50) expect_output(print(loo2), "looic") }) test_that("loo_R2 has reasonable outputs", { skip_on_cran() R2 <- SW(loo_R2(fit1)) expect_equal(dim(R2), c(1, 4)) R2 <- SW(loo_R2(fit2, summary = FALSE)) expect_equal(dim(R2), c(ndraws(fit1), 1)) }) test_that("loo_linpred has reasonable outputs", { skip_on_cran() llp <- SW(loo_linpred(fit1)) expect_equal(length(llp), nobs(fit1)) expect_error(loo_linpred(fit4), "Method 'loo_linpred'") llp <- SW(loo_linpred(fit2, scale = "response", type = "var")) expect_equal(length(llp), nobs(fit2)) }) test_that("loo_predict has reasonable outputs", { skip_on_cran() llp <- SW(loo_predict(fit1)) expect_equal(length(llp), nobs(fit1)) newdata <- data.frame( Age = 0, visit = c("a", "b"), Trt = 0, count = 20, patient = 1, Exp = 2, volume = 0 ) llp <- SW(loo_predict( fit1, newdata = newdata, type = "quantile", probs = c(0.25, 0.75), allow_new_levels = TRUE )) expect_equal(dim(llp), c(2, nrow(newdata))) llp <- SW(loo_predict(fit4)) expect_equal(length(llp), nobs(fit4)) }) test_that("loo_predictive_interval has reasonable outputs", { skip_on_cran() llp <- SW(loo_predictive_interval(fit3)) expect_equal(dim(llp), c(nobs(fit3), 2)) }) test_that("loo_model_weights has reasonable outputs", { skip_on_cran() llw <- SW(loo_model_weights(fit1, fit1)) expect_is(llw[1:2], "numeric") expect_equal(names(llw), c("fit1", "fit1")) }) test_that("model.frame has reasonable ouputs", { expect_equal(model.frame(fit1), fit1$data) }) test_that("model_weights has reasonable ouputs", { mw <- model_weights(fit1, fit1, weights = "waic") expect_equal(names(mw), c("fit1", "fit1")) # fails with MKL on CRAN for unknown reasons # expect_equal(mw, setNames(c(0.5, 0.5), c("fit1", "fit1"))) }) test_that("ndraws and friends have reasonable ouputs", { expect_equal(ndraws(fit1), 50) expect_equal(nchains(fit1), 1) expect_equal(niterations(fit1), 50) }) test_that("ngrps has reasonable ouputs", { expect_equal(ngrps(fit1), list(visit = 4)) expect_equal(ngrps(fit2), list(patient = 59)) }) test_that("nobs has reasonable ouputs", { expect_equal(nobs(fit1), nrow(epilepsy)) }) test_that("nsamples has reasonable ouputs", { expect_equal(SW(nsamples(fit1)), 50) expect_equal(SW(nsamples(fit1, subset = 10:1)), 10) expect_equal(SW(nsamples(fit1, incl_warmup = TRUE)), 200) }) test_that("pairs has reasonable outputs", { expect_s3_class(SW(pairs(fit1, variable = variables(fit1)[1:3])), "bayesplot_grid") }) test_that("plot has reasonable outputs", { expect_silent(p <- plot(fit1, plot = FALSE)) expect_silent(p <- plot(fit1, variable = "^b", regex = TRUE, plot = FALSE)) expect_silent(p <- plot(fit1, variable = "^sd", regex = TRUE, plot = FALSE)) expect_error(plot(fit1, variable = "123")) }) test_that("post_prob has reasonable ouputs", { # only test error messages for now expect_error(post_prob(fit1, fit2, model_names = "test1"), "Number of model names is not equal to the number of models") }) test_that("posterior_average has reasonable outputs", { pnames <- c("b_Age", "nu") draws <- posterior_average(fit1, fit1, variable = pnames, weights = c(0.3, 0.7)) expect_equal(dim(draws), c(ndraws(fit1), 2)) expect_equal(names(draws), pnames) weights <- rexp(3) draws <- brms:::SW(posterior_average( fit1, fit2, fit3, variable = "nu", weights = rexp(3), missing = 1, ndraws = 10 )) expect_equal(dim(draws), c(10, 1)) expect_equal(names(draws), "nu") }) test_that("posterior_samples has reasonable outputs", { draws <- SW(posterior_samples(fit1)) expect_equal(dim(draws), c(ndraws(fit1), length(variables(fit1)))) expect_equal(names(draws), variables(fit1)) expect_equal(names(SW(posterior_samples(fit1, pars = "^b_"))), c("b_Intercept", "b_sigma_Intercept", "b_Trt1", "b_Age", "b_volume", "b_Trt1:Age", "b_sigma_Trt1")) # test default method draws <- SW(posterior_samples(fit1$fit, "^b_Intercept$")) expect_equal(dim(draws), c(ndraws(fit1), 1)) }) test_that("posterior_summary has reasonable outputs", { draws <- posterior_summary(fit1, variable = "^b_", regex = TRUE) expect_equal(dim(draws), c(7, 4)) }) test_that("posterior_interval has reasonable outputs", { expect_equal(dim(posterior_interval(fit1)), c(length(variables(fit1)), 2)) }) test_that("posterior_predict has reasonable outputs", { expect_equal(dim(posterior_predict(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("posterior_linpred has reasonable outputs", { expect_equal(dim(posterior_linpred(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("pp_average has reasonable outputs", { ppa <- pp_average(fit1, fit1, weights = "waic") expect_equal(dim(ppa), c(nobs(fit1), 4)) ppa <- pp_average(fit1, fit1, weights = c(1, 4)) expect_equal(attr(ppa, "weights"), c(fit1 = 0.2, fit1 = 0.8)) ns <- c(fit1 = ndraws(fit1) / 5, fit1 = 4 * ndraws(fit1) / 5) expect_equal(attr(ppa, "ndraws"), ns) }) test_that("pp_check has reasonable outputs", { expect_ggplot(pp_check(fit1)) expect_ggplot(pp_check(fit1, newdata = fit1$data[1:100, ])) expect_ggplot(pp_check(fit1, "stat", ndraws = 5)) expect_ggplot(pp_check(fit1, "error_binned")) pp <- pp_check(fit1, "ribbon_grouped", group = "visit", x = "Age") expect_ggplot(pp) pp <- pp_check(fit1, type = "violin_grouped", group = "visit", newdata = fit1$data[1:100, ]) expect_ggplot(pp) pp <- SW(pp_check(fit1, type = "loo_pit", cores = 1)) expect_ggplot(pp) expect_ggplot(pp_check(fit3)) expect_ggplot(pp_check(fit2, "ribbon", x = "Age")) expect_error(pp_check(fit2, "ribbon", x = "x"), "Variable 'x' could not be found in the data") expect_error(pp_check(fit1, "wrong_type")) expect_error(pp_check(fit2, "violin_grouped"), "group") expect_error(pp_check(fit1, "stat_grouped", group = "g"), "Variable 'g' could not be found in the data") expect_ggplot(pp_check(fit4)) expect_ggplot(pp_check(fit5)) expect_error(pp_check(fit4, "error_binned"), "Type 'error_binned' is not available") }) test_that("posterior_epred has reasonable outputs", { expect_equal(dim(posterior_epred(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("pp_mixture has reasonable outputs", { expect_equal(dim(pp_mixture(fit5)), c(nobs(fit5), 4, 2)) expect_error(pp_mixture(fit1), "Method 'pp_mixture' can only be applied to mixture models" ) }) test_that("predict has reasonable outputs", { pred <- predict(fit1) expect_equal(dim(pred), c(nobs(fit1), 4)) expect_equal(colnames(pred), c("Estimate", "Est.Error", "Q2.5", "Q97.5")) pred <- predict(fit1, ndraws = 10, probs = c(0.2, 0.5, 0.8)) expect_equal(dim(pred), c(nobs(fit1), 5)) newdata <- data.frame( Age = c(0, -0.2), visit = c(1, 4), Trt = c(1, 0), count = c(2, 10), patient = c(1, 42), Exp = c(1, 2), volume = 0 ) pred <- predict(fit1, newdata = newdata) expect_equal(dim(pred), c(2, 4)) newdata$visit <- c(1, 6) pred <- predict(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(pred), c(2, 4)) # predict NA responses in ARMA models df <- fit1$data[1:10, ] df$count[8:10] <- NA pred <- predict(fit1, newdata = df, ndraws = 1) expect_true(!anyNA(pred[, "Estimate"])) pred <- predict(fit2) expect_equal(dim(pred), c(nobs(fit2), 4)) pred <- predict(fit2, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(pred), c(2, 4)) # check if grouping factors with a single level are accepted newdata$patient <- factor(2) pred <- predict(fit2, newdata = newdata) expect_equal(dim(pred), c(2, 4)) pred <- predict(fit4) expect_equal(dim(pred), c(nobs(fit4), 4)) expect_equal(colnames(pred), paste0("P(Y = ", 1:4, ")")) pred <- predict(fit4, newdata = fit4$data[1, ]) expect_equal(dim(pred), c(1, 4)) pred <- predict(fit5) expect_equal(dim(pred), c(nobs(fit5), 4)) newdata <- fit5$data[1:10, ] newdata$patient <- "a" pred <- predict(fit5, newdata, allow_new_levels = TRUE, sample_new_levels = "old_levels") expect_equal(dim(pred), c(10, 4)) pred <- predict(fit5, newdata, allow_new_levels = TRUE, sample_new_levels = "gaussian") expect_equal(dim(pred), c(10, 4)) }) test_that("predictive_error has reasonable outputs", { expect_equal(dim(predictive_error(fit1)), c(ndraws(fit1), nobs(fit1))) }) test_that("print has reasonable outputs", { expect_output(SW(print(fit1)), "Group-Level Effects:") }) test_that("prior_draws has reasonable outputs", { prs1 <- prior_draws(fit1) prior_names <- c( "Intercept", "b", paste0("simo_moExp1[", 1:4, "]"), "bsp", "bs", "sds_sAge_1", "b_sigma", "Intercept_sigma", "nu", "sd_visit", "cor_visit" ) expect_equal(colnames(prs1), prior_names) prs2 <- prior_draws(fit1, variable = "b_Trt1") expect_equal(dimnames(prs2), list(as.character(1:ndraws(fit1)), "b_Trt1")) expect_equal(sort(prs1$b), sort(prs2$b_Trt)) # test default method prs <- prior_draws(fit1$fit, variable = "^sd_visit", regex = TRUE) expect_equal(names(prs), "prior_sd_visit") }) test_that("prior_summary has reasonable outputs", { expect_true(is(prior_summary(fit1), "brmsprior")) }) test_that("ranef has reasonable outputs", { ranef1 <- SM(ranef(fit1)) expect_equal(dim(ranef1$visit), c(4, 4, 2)) ranef1 <- SM(ranef(fit1, pars = "Trt1")) expect_equal(dimnames(ranef1$visit)[[3]], "Trt1") ranef1 <- SM(ranef(fit1, groups = "a")) expect_equal(length(ranef1), 0L) ranef2 <- SM(ranef(fit2, summary = FALSE)) expect_equal(dim(ranef2$patient), c(ndraws(fit2), 59, 2)) }) test_that("residuals has reasonable outputs", { res1 <- SW(residuals(fit1, type = "pearson", probs = c(0.65))) expect_equal(dim(res1), c(nobs(fit1), 3)) newdata <- cbind(epilepsy[1:10, ], Exp = rep(1:5, 2), volume = 0) res2 <- residuals(fit1, newdata = newdata) expect_equal(dim(res2), c(10, 4)) newdata$visit <- rep(1:5, 2) res3 <- residuals(fit1, newdata = newdata, allow_new_levels = TRUE) expect_equal(dim(res3), c(10, 4)) res4 <- residuals(fit2) expect_equal(dim(res4), c(nobs(fit2), 4)) expect_error(residuals(fit4), "Predictive errors are not defined") res6 <- residuals(fit6) expect_equal(dim(res6), c(nobs(fit6), 4, 2)) expect_equal(dimnames(res6)[[3]], c("volume", "count")) }) test_that("stancode has reasonable outputs", { scode <- stancode(fit1) expect_true(is.character(stancode(fit1))) expect_match(stancode(fit1), "generated quantities") expect_identical(scode, fit1$model) # test that stancode can be updated scode <- stancode(fit2, threads = threading(1)) expect_match(scode, "reduce_sum(partial_log_lik_lpmf,", fixed = TRUE) }) test_that("standata has reasonable outputs", { expect_equal(sort(names(standata(fit1))), sort(c("N", "Y", "Kar", "Kma", "J_lag", "K", "X", "Ksp", "Imo", "Xmo_1", "Jmo", "con_simo_1", "Z_1_1", "Z_1_2", "nb_1", "knots_1", "Zs_1_1", "Ks", "Xs", "offsets", "K_sigma", "X_sigma", "J_1", "N_1", "M_1", "NC_1", "prior_only")) ) expect_equal(sort(names(standata(fit2))), sort(c("N", "Y", "weights", "C_1", "K_a", "X_a", "Z_1_a_1", "K_b", "X_b", "Z_1_b_2", "J_1", "N_1", "M_1", "NC_1", "prior_only")) ) }) test_that("mcmc_plot has reasonable outputs", { expect_ggplot(mcmc_plot(fit1)) expect_ggplot(mcmc_plot(fit1, variable = "^b", regex = TRUE)) expect_ggplot(SM(mcmc_plot(fit1, type = "trace", variable = "^b_", regex = TRUE))) expect_ggplot(mcmc_plot(fit1, type = "hist", variable = "^sd_", regex = TRUE)) expect_ggplot(mcmc_plot(fit1, type = "dens")) expect_ggplot(mcmc_plot(fit1, type = "scatter", variable = variables(fit1)[2:3])) expect_ggplot(SW(mcmc_plot(fit1, type = "rhat", variable = "^b_", regex = TRUE))) expect_ggplot(SW(mcmc_plot(fit1, type = "neff"))) expect_ggplot(mcmc_plot(fit1, type = "acf")) expect_silent(p <- mcmc_plot(fit1, type = "nuts_divergence")) expect_error(mcmc_plot(fit1, type = "density"), "Invalid plot type") expect_error(mcmc_plot(fit1, type = "hex"), "Exactly 2 parameters must be selected") }) test_that("summary has reasonable outputs", { summary1 <- SW(summary(fit1, priors = TRUE)) expect_true(is.data.frame(summary1$fixed)) expect_equal(rownames(summary1$fixed), c("Intercept", "sigma_Intercept", "Trt1", "Age", "volume", "Trt1:Age", "sigma_Trt1", "sAge_1", "moExp")) expect_equal(colnames(summary1$fixed), c("Estimate", "Est.Error", "l-95% CI", "u-95% CI", "Rhat", "Bulk_ESS", "Tail_ESS")) expect_equal(rownames(summary1$random$visit), c("sd(Intercept)", "sd(Trt1)", "cor(Intercept,Trt1)")) expect_output(print(summary1), "Population-Level Effects:") expect_output(print(summary1), "Priors:") summary5 <- SW(summary(fit5, robust = TRUE)) expect_output(print(summary5), "sigma1") expect_output(print(summary5), "theta1") summary6 <- SW(summary(fit6)) expect_output(print(summary6), "sdgp") }) test_that("update has reasonable outputs", { # Do not actually refit the model as is causes CRAN checks to fail. # Some tests are commented out as they fail when updating Stan code # of internal example models because of Stan code mismatches. Refitting # these example models is slow especially when done repeatedly and # leads the git repo to blow up eventually due the size of the models. up <- update(fit1, testmode = TRUE) expect_true(is(up, "brmsfit")) new_data <- data.frame( Age = rnorm(18), visit = rep(c(3, 2, 4), 6), Trt = rep(0:1, 9), count = rep(c(5, 17, 28), 6), patient = 1, Exp = 4, volume = 0 ) up <- update(fit1, newdata = new_data, save_pars = save_pars(group = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) expect_equal(attr(up$data, "data_name"), "new_data") # expect_equal(attr(up$ranef, "levels")$visit, c("2", "3", "4")) # expect_true("r_1_1" %in% up$exclude) expect_error(update(fit1, data = new_data), "use argument 'newdata'") up <- update(fit1, formula = ~ . + I(exp(Age)), testmode = TRUE, prior = set_prior("normal(0,10)")) expect_true(is(up, "brmsfit")) up <- update(fit1, ~ . - Age + factor(Age), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit1, formula = ~ . + I(exp(Age)), newdata = new_data, sample_prior = FALSE, testmode = TRUE) expect_true(is(up, "brmsfit")) expect_error(update(fit1, formula. = ~ . + wrong_var), "New variables found: 'wrong_var'") up <- update(fit1, save_pars = save_pars(group = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_true("r_1_1" %in% up$exclude) up <- update(fit3, save_pars = save_pars(latent = FALSE), testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_true("Xme_1" %in% up$exclude) up <- update(fit2, algorithm = "fullrank", testmode = TRUE) expect_true(is(up, "brmsfit")) # expect_equal(up$algorithm, "fullrank") up <- update(fit2, formula. = bf(. ~ ., a + b ~ 1, nl = TRUE), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit2, formula. = bf(count ~ a + b, nl = TRUE), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit3, family = acat(), testmode = TRUE) expect_true(is(up, "brmsfit")) up <- update(fit3, bf(~., family = acat()), testmode = TRUE) expect_true(is(up, "brmsfit")) }) test_that("VarCorr has reasonable outputs", { vc <- VarCorr(fit1) expect_equal(names(vc), c("visit")) Names <- c("Intercept", "Trt1") expect_equal(dimnames(vc$visit$cov)[c(1, 3)], list(Names, Names)) vc <- VarCorr(fit2) expect_equal(names(vc), c("patient")) expect_equal(dim(vc$patient$cor), c(2, 4, 2)) vc <- VarCorr(fit2, summary = FALSE) expect_equal(dim(vc$patient$cor), c(ndraws(fit2), 2, 2)) expect_equal(dim(VarCorr(fit6)$residual__$sd), c(1, 4)) vc <- VarCorr(fit5) expect_equal(dim(vc$patient$sd), c(2, 4)) }) test_that("variables has reasonable ouputs", { expect_true(all( c("b_Intercept", "bsp_moExp", "ar[1]", "cor_visit__Intercept__Trt1", "nu", "simo_moExp1[2]", "r_visit[4,Trt1]", "s_sAge_1[8]", "prior_sd_visit", "prior_cor_visit", "lp__") %in% variables(fit1) )) expect_true(all( c("b_a_Intercept", "b_b_Age", "sd_patient__b_Intercept", "cor_patient__a_Intercept__b_Intercept", "r_patient__a[1,Intercept]", "r_patient__b[4,Intercept]", "prior_b_a") %in% variables(fit2) )) expect_true(all( c("lscale_volume_gpAgeTrt0", "lscale_volume_gpAgeTrt1") %in% variables(fit6) )) expect_equal(variables(fit3), SW(parnames(fit3))) }) test_that("vcov has reasonable outputs", { expect_equal(dim(vcov(fit1)), c(9, 9)) expect_equal(dim(vcov(fit1, cor = TRUE)), c(9, 9)) }) test_that("waic has reasonable outputs", { waic1 <- SW(WAIC(fit1)) expect_true(is.numeric(waic1$estimates)) # fails on MKL for unknown reasons # expect_equal(waic1, SW(waic(fit1))) fit1 <- SW(add_criterion(fit1, "waic")) expect_true(is.numeric(fit1$criteria$waic$estimates)) # fails on MKL for unknown reasons # expect_equal(waic(fit1), fit1$criteria$waic) waic_compare <- SW(waic(fit1, fit1)) expect_equal(length(waic_compare$loos), 2) expect_equal(dim(waic_compare$ic_diffs__), c(1, 2)) waic2 <- SW(waic(fit2)) expect_true(is.numeric(waic2$estimates)) waic_pointwise <- SW(waic(fit2, pointwise = TRUE)) expect_equal(waic2, waic_pointwise) expect_warning(compare_ic(waic1, waic2), "Model comparisons are likely invalid") waic4 <- SW(waic(fit4)) expect_true(is.numeric(waic4$estimates)) }) test_that("diagnostic convenience functions have reasonable outputs", { expect_true(is(log_posterior(fit1), "data.frame")) expect_true(is(nuts_params(fit1), "data.frame")) expect_true(is(rhat(fit1), "numeric")) expect_true(is(neff_ratio(fit1), "numeric")) }) test_that("contrasts of grouping factors are not stored #214", { expect_true(is.null(attr(fit1$data$patient, "contrasts"))) }) brms/tests/testthat/tests.priors.R0000644000175000017500000001274214111751670017164 0ustar nileshnilesh# most tests of prior related stuff can be found in tests.make_stancode.R context("Tests for prior generating functions") test_that("get_prior finds all classes for which priors can be specified", { expect_equal( sort( get_prior( count ~ zBase * Trt + (1|patient) + (1+Trt|visit), data = epilepsy, family = "poisson" )$class ), sort(c(rep("b", 4), c("cor", "cor"), "Intercept", rep("sd", 6))) ) expect_equal( sort( get_prior( rating ~ treat + period + cse(carry), data = inhaler, family = sratio(threshold = "equidistant") )$class ), sort(c(rep("b", 4), "delta", rep("Intercept", 1))) ) }) test_that("set_prior allows arguments to be vectors", { bprior <- set_prior("normal(0, 2)", class = c("b", "sd")) expect_is(bprior, "brmsprior") expect_equal(bprior$prior, rep("normal(0, 2)", 2)) expect_equal(bprior$class, c("b", "sd")) }) test_that("print for class brmsprior works correctly", { expect_output(print(set_prior("normal(0,1)")), fixed = TRUE, "b ~ normal(0,1)") expect_output(print(set_prior("normal(0,1)", coef = "x")), "b_x ~ normal(0,1)", fixed = TRUE) expect_output(print(set_prior("cauchy(0,1)", class = "sd", group = "x")), "sd_x ~ cauchy(0,1)", fixed = TRUE) expect_output(print(set_prior("target += normal_lpdf(x | 0,1))", check = FALSE)), "target += normal_lpdf(x | 0,1))", fixed = TRUE) }) test_that("get_prior returns correct nlpar names for random effects pars", { # reported in issue #47 data <- data.frame(y = rnorm(10), x = rnorm(10), g = rep(1:2, 5)) gp <- get_prior(bf(y ~ a - b^x, a + b ~ (1+x|g), nl = TRUE), data = data) expect_equal(sort(unique(gp$nlpar)), c("", "a", "b")) }) test_that("get_prior returns correct fixed effect names for GAMMs", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(y ~ z + s(x) + (1|g), data = dat) expect_equal(prior[prior$class == "b", ]$coef, c("", "sx_1", "z")) prior <- get_prior(bf(y ~ lp, lp ~ z + s(x) + (1|g), nl = TRUE), data = dat) expect_equal(prior[prior$class == "b", ]$coef, c("", "Intercept", "sx_1", "z")) }) test_that("get_prior returns correct prior names for auxiliary parameters", { dat <- data.frame(y = rnorm(10), x = rnorm(10), z = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(bf(y ~ 1, phi ~ z + (1|g)), data = dat, family = Beta()) prior <- prior[prior$dpar == "phi", ] pdata <- data.frame(class = c("b", "b", "Intercept", rep("sd", 3)), coef = c("", "z", "", "", "", "Intercept"), group = c(rep("", 4), "g", "g"), stringsAsFactors = FALSE) pdata <- pdata[with(pdata, order(class, group, coef)), ] expect_equivalent(prior[, c("class", "coef", "group")], pdata) }) test_that("get_prior returns correct priors for multivariate models", { dat <- data.frame(y1 = rnorm(10), y2 = c(1, rep(1:3, 3)), x = rnorm(10), g = rep(1:2, 5)) bform <- bf(mvbind(y1, y2) ~ x + (x|ID1|g)) + set_rescor(TRUE) # check global priors prior <- get_prior(bform, dat, family = gaussian()) expect_equal(prior[prior$resp == "y1" & prior$class == "b", "coef"], c("", "x")) expect_equal(prior[prior$class == "rescor", "prior"], "lkj(1)") # check family and autocor specific priors family <- list(gaussian, Beta()) bform <- bf(y1 ~ x + (x|ID1|g) + ar()) + bf(y2 ~ 1) prior <- get_prior(bform, dat, family = family) expect_true(any(with(prior, class == "sigma" & resp == "y1"))) expect_true(any(with(prior, class == "ar" & resp == "y1"))) expect_true(any(with(prior, class == "phi" & resp == "y2"))) expect_true(!any(with(prior, class == "ar" & resp == "y2"))) }) test_that("get_prior returns correct priors for categorical models", { # check global priors dat <- data.frame(y2 = c(1, rep(1:3, 3)), x = rnorm(10), g = rep(1:2, 5)) prior <- get_prior(y2 ~ x + (x|ID1|g), data = dat, family = categorical()) expect_equal(prior[prior$dpar == "mu2" & prior$class == "b", "coef"], c("", "x")) }) test_that("set_prior alias functions produce equivalent results", { expect_equal(set_prior("normal(0, 1)", class = "sd"), prior(normal(0, 1), class = sd)) expect_equal(set_prior("normal(0, 1)", class = "sd", nlpar = "a"), prior(normal(0, 1), class = "sd", nlpar = a)) expect_equal(set_prior("normal(0, 1)", class = "sd", nlpar = "a"), prior_(~normal(0, 1), class = ~sd, nlpar = quote(a))) expect_equal(set_prior("normal(0, 1)", class = "sd"), prior_string("normal(0, 1)", class = "sd")) }) test_that("external interface of validate_prior works correctly", { prior1 <- prior(normal(0,10), class = b) + prior(cauchy(0,2), class = sd) prior1 <- validate_prior( prior1, count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson() ) expect_true(all(c("b", "Intercept", "sd") %in% prior1$class)) expect_equal(nrow(prior1), 9) }) test_that("overall intercept priors are adjusted for the intercept", { dat <- data.frame(y = rep(c(1, 3), each = 5), off = 10) prior1 <- get_prior(y ~ 1 + offset(off), dat) int_prior <- prior1$prior[prior1$class == "Intercept"] expect_equal(int_prior, "student_t(3, -8, 2.5)") }) brms/tests/testthat/tests.log_lik.R0000644000175000017500000003747614111751667017307 0ustar nileshnileshcontext("Tests for log_lik helper functions") test_that("log_lik for location shift models works as expected", { ns <- 25 prep <- structure(list(), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * 2), ncol = 2), sigma = rchisq(ns, 3), nu = rgamma(ns, 4) ) prep$family <- gaussian() prep$family$fun <- "gaussian" prep$data <- list(Y = rnorm(ns)) ll_gaussian <- dnorm( x = prep$data$Y[1], mean = prep$dpars$mu[, 1], sd = prep$dpars$sigma, log = TRUE ) ll <- brms:::log_lik_gaussian(1, prep = prep) expect_equal(ll, ll_gaussian) ll_student <- dstudent_t( x = prep$data$Y[2], df = prep$dpars$nu, mu = prep$dpars$mu[, 2], sigma = prep$dpars$sigma, log = TRUE ) ll <- brms:::log_lik_student(2, prep = prep) expect_equal(ll, ll_student) # also test weighting prep$data$weights <- sample(1:10, ns, replace = TRUE) ll <- brms:::log_lik_gaussian(1, prep = prep) expect_equal(ll, ll_gaussian * prep$data$weights[1]) }) test_that("log_lik for various skewed normal models works as expected", { ns <- 50 prep <- structure(list(), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), beta = rchisq(ns, 3), mu = matrix(rnorm(ns*2), ncol = 2), alpha = rnorm(ns), ndt = 1 ) prep$data <- list(Y = rlnorm(ns)) ll_lognormal <- dlnorm( x = prep$data$Y[1], mean = prep$dpars$mu[, 1], sd = prep$dpars$sigma, log = TRUE ) ll <- brms:::log_lik_lognormal(1, prep = prep) expect_equal(ll, ll_lognormal) ll_shifted_lognormal <- dshifted_lnorm( x = prep$data$Y[1], mean = prep$dpars$mu[, 1], sd = prep$dpars$sigma, shift = prep$dpars$ndt, log = TRUE ) ll <- brms:::log_lik_shifted_lognormal(1, prep = prep) expect_equal(ll, ll_shifted_lognormal) ll_exgaussian <- dexgaussian( x = prep$data$Y[1], mu = prep$dpars$mu[, 1], sigma = prep$dpars$sigma, beta = prep$dpars$beta, log = TRUE ) ll <- brms:::log_lik_exgaussian(1, prep = prep) expect_equal(ll, ll_exgaussian) ll_skew_normal <- dskew_normal( x = prep$data$Y[1], mu = prep$dpars$mu[, 1], sigma = prep$dpars$sigma, alpha = prep$dpars$alpha, log = TRUE ) ll <- as.numeric(brms:::log_lik_skew_normal(1, prep = prep)) expect_equal(ll, ll_skew_normal) }) test_that("log_lik of aysm_laplace models runs without errors", { ns <- 50 prep <- structure(list(), class = "brmsprep") prep$dpars <- list( sigma = rchisq(ns, 3), quantile = rbeta(ns, 2, 1), mu = matrix(rnorm(ns*2), ncol = 2), zi = rbeta(ns, 10, 10) ) prep$data <- list(Y = brms:::rasym_laplace(ns)) ll <- brms:::log_lik_asym_laplace(1, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_zero_inflated_asym_laplace(1, prep = prep) expect_equal(length(ll), ns) }) test_that("log_lik for multivariate linear models runs without errors", { ns <- 10 nvars <- 3 ncols <- 4 nobs <- nvars * ncols prep <- structure(list(), class = "mvbrmsprep") Sigma = array(cov(matrix(rnorm(300), ncol = 3)), dim = c(3, 3, 10)) prep$mvpars <- list( Mu = array(rnorm(ns*nobs*nvars), dim = c(ns, nobs, nvars)), Sigma = aperm(Sigma, c(3, 1, 2)) ) prep$dpars <- list(nu = rgamma(ns, 5)) prep$ndraws <- ns prep$data <- list(Y = matrix(rnorm(nobs), ncol = nvars)) ll <- brms:::log_lik_gaussian_mv(1, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_student_mv(2, prep = prep) expect_equal(length(ll), ns) }) test_that("log_lik for ARMA models runs without errors", { ns <- 20 nobs <- 15 prep <- structure(list(ndraws = ns), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns*nobs), ncol = nobs), sigma = rchisq(ns, 3), nu = rgamma(ns, 5) + 15 ) prep$ac <- list( ar = matrix(rbeta(ns, 0.5, 0.5), ncol = 1), ma = matrix(rbeta(ns, 0.2, 1), ncol = 1), begin_tg = 2, end_tg = 5 ) prep$data <- list(Y = rnorm(nobs), se = rgamma(ns, 10)) prep$family$fun <- "gaussian_time" ll <- brms:::log_lik_gaussian_time(1, prep = prep) expect_equal(dim(ll), c(ns, 4)) prep$family$fun <- "student_time" ll <- brms:::log_lik_student_time(1, prep = prep) expect_equal(dim(ll), c(ns, 4)) }) test_that("log_lik for SAR models runs without errors", { prep <- structure(list(ndraws = 3, nobs = 10), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(30), nrow = 3), nu = rep(10, 3), sigma = rep(10, 3) ) prep$ac <- list( lagsar = matrix(c(0.3, 0.5, 0.7)), Msar = diag(10) ) prep$data <- list(Y = rnorm(10)) ll <- brms:::log_lik_gaussian_lagsar(1, prep = prep) expect_equal(dim(ll), c(3, 10)) ll <- brms:::log_lik_student_lagsar(1, prep = prep) expect_equal(dim(ll), c(3, 10)) prep$ac$errorsar <- prep$ac$lagsar prep$ac$lagsar <- NULL ll <- brms:::log_lik_gaussian_errorsar(1, prep = prep) expect_equal(dim(ll), c(3, 10)) ll <- brms:::log_lik_student_errorsar(1, prep = prep) expect_equal(dim(ll), c(3, 10)) }) test_that("log_lik for FCOR models runs without errors", { ns <- 3 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(nobs * ns), nrow = ns), sigma = rep(1, ns), nu = rep(10, ns) ) prep$ac <- list(Mfcor = diag(nobs)) prep$data$Y <- rnorm(nobs) ll <- brms:::log_lik_gaussian_fcor(1, prep = prep) expect_equal(dim(ll), c(ns, nobs)) ll <- brms:::log_lik_student_fcor(1, prep = prep) expect_equal(dim(ll), c(ns, nobs)) }) test_that("log_lik for count and survival models works correctly", { ns <- 25 nobs <- 10 trials <- sample(10:30, nobs, replace = TRUE) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns*nobs), ncol = nobs), shape = rgamma(ns, 4), xi = runif(ns, -1, 0.5) ) prep$dpars$sigma <- 1 / prep$dpars$shape prep$dpars$nu <- prep$dpars$shape + 1 prep$data <- list( Y = rbinom(nobs, size = trials, prob = rbeta(nobs, 1, 1)), trials = trials ) i <- sample(nobs, 1) prep$dpars$mu <- brms:::inv_logit(prep$dpars$eta) ll_binom <- dbinom( x = prep$data$Y[i], prob = prep$dpars$mu[, i], size = prep$data$trials[i], log = TRUE ) ll <- brms:::log_lik_binomial(i, prep = prep) expect_equal(ll, ll_binom) # don't test the actual values as they will be -Inf for this data ll <- brms:::log_lik_discrete_weibull(i, prep = prep) expect_equal(length(ll), ns) prep$dpars$mu <- exp(prep$dpars$eta) ll_pois <- dpois( x = prep$data$Y[i], lambda = prep$dpars$mu[, i], log = TRUE ) ll <- brms:::log_lik_poisson(i, prep = prep) expect_equal(ll, ll_pois) ll_nbinom <- dnbinom( x = prep$data$Y[i], mu = prep$dpars$mu[, i], size = prep$dpars$shape, log = TRUE ) ll <- brms:::log_lik_negbinomial(i, prep = prep) expect_equal(ll, ll_nbinom) ll <- brms:::log_lik_negbinomial2(i, prep = prep) expect_equal(ll, ll_nbinom) ll_geo <- dnbinom( x = prep$data$Y[i], mu = prep$dpars$mu[, i], size = 1, log = TRUE ) ll <- brms:::log_lik_geometric(i, prep = prep) expect_equal(ll, ll_geo) ll_com_pois <- brms:::dcom_poisson( x = prep$data$Y[i], mu = prep$dpars$mu[, i], shape = prep$dpars$shape, log = TRUE ) ll <- brms:::log_lik_com_poisson(i, prep = prep) expect_equal(ll, ll_com_pois) ll_exp <- dexp( x = prep$data$Y[i], rate = 1 / prep$dpars$mu[, i], log = TRUE ) ll <- brms:::log_lik_exponential(i, prep = prep) expect_equal(ll, ll_exp) ll_gamma <- dgamma( x = prep$data$Y[i], shape = prep$dpars$shape, scale = prep$dpars$mu[, i] / prep$dpars$shape, log = TRUE ) ll <- brms:::log_lik_gamma(i, prep = prep) expect_equal(ll, ll_gamma) scale <- prep$dpars$mu[, i] / gamma(1 - 1 / prep$dpars$nu) ll_frechet <- dfrechet( x = prep$data$Y[i], shape = prep$dpars$nu, scale = scale, log = TRUE ) ll <- brms:::log_lik_frechet(i, prep = prep) expect_equal(ll, ll_frechet) ll_invgauss <- dinv_gaussian( x = prep$data$Y[i], shape = prep$dpars$shape, mu = prep$dpars$mu[, i], log = TRUE ) ll <- brms:::log_lik_inverse.gaussian(i, prep = prep) expect_equal(ll, ll_invgauss) ll_weibull <- dweibull( x = prep$data$Y[i], shape = prep$dpars$shape, scale = prep$dpars$mu[, i] / gamma(1 + 1 / prep$dpars$shape), log = TRUE ) ll <- brms:::log_lik_weibull(i, prep = prep) expect_equal(ll, c(ll_weibull)) # keep test at the end prep$family$link <- "identity" prep$data$Y[i] <- 0 ll_gen_extreme_value <- SW(dgen_extreme_value( x = prep$data$Y[i], mu = prep$dpars$mu[, i], sigma = prep$dpars$sigma, xi = prep$dpars$xi, log = TRUE )) ll <- SW(brms:::log_lik_gen_extreme_value(i, prep = prep)) expect_equal(ll, ll_gen_extreme_value) }) test_that("log_lik for bernoulli and beta models works correctly", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = brms:::inv_logit(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)), phi = rgamma(ns, 4) ) prep$data <- list(Y = sample(0:1, nobs, replace = TRUE)) i <- sample(1:nobs, 1) ll_bern <- dbinom( x = prep$data$Y[i], prob = prep$dpars$mu[, i], size = 1, log = TRUE ) ll <- brms:::log_lik_bernoulli(i, prep = prep) expect_equal(ll, ll_bern) prep$data <- list(Y = rbeta(nobs, 1, 1)) ll_beta <- dbeta( x = prep$data$Y[i], shape1 = prep$dpars$mu[, i] * prep$dpars$phi, shape2 = (1 - prep$dpars$mu[, i]) * prep$dpars$phi, log = TRUE ) ll <- brms:::log_lik_beta(i, prep = prep) expect_equal(ll, ll_beta) }) test_that("log_lik for circular models runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = 2 * atan(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)), kappa = rgamma(ns, 4) ) prep$data <- list(Y = runif(nobs, -pi, pi)) i <- sample(seq_len(nobs), 1) ll <- brms:::log_lik_von_mises(i, prep = prep) expect_equal(length(ll), ns) prep$data$cens <- sample(-1:1, nobs, TRUE) ll <- brms:::log_lik_von_mises(i, prep = prep) expect_equal(length(ll), ns) }) test_that("log_lik for zero-inflated and hurdle models runs without erros", { ns <- 50 nobs <- 8 trials <- sample(10:30, nobs, replace = TRUE) resp <- rbinom(nobs, size = trials, prob = rbeta(nobs, 1, 1)) prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( eta = matrix(rnorm(ns*nobs), ncol = nobs), shape = rgamma(ns, 4), phi = rgamma(ns, 1), zi = rbeta(ns, 1, 1), coi = rbeta(ns, 5, 7) ) prep$dpars$hu <- prep$dpars$zoi <- prep$dpars$zi prep$data <- list(Y = c(resp, rep(0, 4)), trials = trials) prep$dpars$mu <- exp(prep$dpars$eta) ll <- brms:::log_lik_hurdle_poisson(1, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_hurdle_negbinomial(5, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_hurdle_gamma(2, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_hurdle_gamma(8, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_zero_inflated_poisson(3, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_zero_inflated_negbinomial(6, prep = prep) expect_equal(length(ll), ns) prep$dpars$mu <- brms:::inv_logit(prep$dpars$eta) ll <- brms:::log_lik_zero_inflated_binomial(4, prep = prep) expect_equal(length(ll), ns) prep$data$Y[1:nobs] <- rbeta(nobs / 2, 0.5, 4) ll <- brms:::log_lik_zero_inflated_beta(6, prep = prep) expect_equal(length(ll), ns) ll <- brms:::log_lik_zero_one_inflated_beta(7, prep = prep) expect_equal(length(ll), ns) }) test_that("log_lik for ordinal models runs without erros", { ns <- 50 nobs <- 8 nthres <- 3 ncat <- nthres + 1 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = array(rnorm(ns * nobs), dim = c(ns, nobs)), disc = rexp(ns) ) prep$thres$thres <- array(0, dim = c(ns, nthres)) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family$link <- "logit" ll <- sapply(1:nobs, brms:::log_lik_cumulative, prep = prep) expect_equal(dim(ll), c(ns, nobs)) ll <- sapply(1:nobs, brms:::log_lik_sratio, prep = prep) expect_equal(dim(ll), c(ns, nobs)) ll <- sapply(1:nobs, brms:::log_lik_cratio, prep = prep) expect_equal(dim(ll), c(ns, nobs)) ll <- sapply(1:nobs, brms:::log_lik_acat, prep = prep) expect_equal(dim(ll), c(ns, nobs)) prep$family$link <- "probit" ll <- sapply(1:nobs, brms:::log_lik_acat, prep = prep) expect_equal(dim(ll), c(ns, nobs)) }) test_that("log_lik for categorical and related models runs without erros", { ns <- 50 nobs <- 8 ncat <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu1 = array(rnorm(ns*nobs), dim = c(ns, nobs)), mu2 = array(rnorm(ns*nobs), dim = c(ns, nobs)) ) prep$data <- list(Y = rep(1:ncat, 2), ncat = ncat) prep$family <- categorical() ll <- sapply(1:nobs, brms:::log_lik_categorical, prep = prep) expect_equal(dim(ll), c(ns, nobs)) prep$data$Y <- matrix( sample(1:20, nobs * ncat, TRUE), nrow = nobs, ncol = ncat ) prep$data$trials <- sample(1:20, nobs) prep$family <- multinomial() ll <- sapply(1:nobs, brms:::log_lik_multinomial, prep = prep) expect_equal(dim(ll), c(ns, nobs)) prep$data$Y <- prep$data$Y / rowSums(prep$data$Y) prep$dpars$phi <- rexp(ns, 10) prep$family <- dirichlet() ll <- sapply(1:nobs, brms:::log_lik_dirichlet, prep = prep) expect_equal(dim(ll), c(ns, nobs)) prep$family <- brmsfamily("dirichlet2") prep$dpars$mu1 <- rexp(ns, 10) prep$dpars$mu2 <- rexp(ns, 10) prep$dpars$mu3 <- rexp(ns, 10) ll <- sapply(1:nobs, brms:::log_lik_dirichlet2, prep = prep) expect_equal(dim(ll), c(ns, nobs)) }) test_that("censored and truncated log_lik run without errors", { ns <- 30 nobs <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), sigma = rchisq(ns, 3) ) prep$data <- list(Y = rnorm(ns), cens = c(-1,0,1)) ll <- sapply(1:nobs, brms:::log_lik_gaussian, prep = prep) expect_equal(dim(ll), c(ns, nobs)) prep$data <- list(Y = sample(-3:3, nobs), lb = -4, ub = 5) ll <- sapply(1:nobs, brms:::log_lik_gaussian, prep = prep) expect_equal(dim(ll), c(ns, nobs)) }) test_that("log_lik for the wiener diffusion model runs without errors", { ns <- 5 nobs <- 3 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rnorm(ns * nobs), ncol = nobs), bs = rchisq(ns, 3), ndt = rep(0.5, ns), bias = rbeta(ns, 1, 1) ) prep$data <- list(Y = abs(rnorm(ns)) + 0.5, dec = c(1, 0, 1)) i <- sample(1:nobs, 1) expect_equal(length(brms:::log_lik_wiener(i, prep)), ns) }) test_that("log_lik_custom runs without errors", { ns <- 15 nobs <- 10 prep <- structure(list(ndraws = ns, nobs = nobs), class = "brmsprep") prep$dpars <- list( mu = matrix(rbeta(ns * nobs * 2, 1, 1), ncol = nobs * 2) ) prep$data <- list( Y = sample(0:1, nobs, replace = TRUE), trials = rep(1, nobs) ) prep$family <- custom_family( "beta_binomial2", dpars = c("mu", "tau"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "trials[n]" ) log_lik_beta_binomial2 <- function(i, prep) { mu <- prep$dpars$mu[, i] dbinom(prep$data$Y[i], size = prep$data$trials[i], prob = mu) } expect_equal(length(brms:::log_lik_custom(sample(1:nobs, 1), prep)), ns) }) brms/tests/testthat/tests.misc.R0000644000175000017500000000602613737534440016606 0ustar nileshnileshcontext("Tests for miscellaneous functions") test_that("p performs correct indexing", { expect_equal(p(1:10), 1:10) x <- rnorm(10) expect_equal(p(x, i = 3), x[3]) A <- matrix(x, nrow = 5) expect_equal(p(A, i = 3), A[3, , drop = FALSE]) expect_equal(p(A, i = 2, row = FALSE), A[, 2, drop = FALSE]) }) test_that("rmNULL removes all NULL entries", { expect_equal(rmNULL(list(a = NULL, b = 1, c = list(NULL, 1))), list(b = 1, c = list(1))) expect_equal(rmNULL(list(a = NULL, b = 1, c = NULL)), list(b = 1)) }) test_that("rename returns an error on duplicated names", { expect_error(rename(c(letters[1:4],"a()","a["), check_dup = TRUE), fixed = TRUE, paste("Internal renaming led to duplicated names.", "\nOccured for: 'a', 'a()', 'a['")) expect_error(rename(c("aDb","a/b","b"), check_dup = TRUE), fixed = TRUE, paste("Internal renaming led to duplicated names.", "\nOccured for: 'aDb', 'a/b'")) expect_error(rename(c("log(a,b)","logab","bac","ba"), check_dup = TRUE), fixed = TRUE, paste("Internal renaming led to duplicated names.", "\nOccured for: 'log(a,b)', 'logab'")) }) test_that("rename perform correct renaming", { names <- c("acd", "a[23]", "b__") expect_equal( rename(names, c("[", "]", "__"), c(".", ".", ":")), c("acd", "a.23.", "b:") ) expect_equal( rename(names, c("^\\[", "\\]", "__$"), c(".", ".", ":"), fixed = FALSE), c("acd", "a[23.", "b:") ) }) test_that("collapse_lists performs correct collapsing after names", { x <- list(a = "a <- ", b = "b <- ") y <- list(b = "cauchy(1,2)", c = "normal(0,1)", a = "gamma(1,1)") expect_equal(collapse_lists(list()), list()) expect_equal(collapse_lists(x, y), list(a = "a <- gamma(1,1)", b = "b <- cauchy(1,2)", c = "normal(0,1)")) expect_equal(collapse_lists(ls = list(c(x, c = "c <- "), y)), list(a = "a <- gamma(1,1)", b = "b <- cauchy(1,2)", c = "c <- normal(0,1)")) }) test_that("nlist works correctly", { x <- 1 y <- 2:3 exlist <- list(x = x, y = y) expect_equal(nlist(x = x, y = y), exlist) expect_equal(nlist(x, y), exlist) expect_equal(nlist(x = x, y), exlist) }) test_that("use_alias works correctly", { a <- 2 b <- 3 expect_warning(use_alias(a, b), fixed = TRUE, "'b' is deprecated. Please use argument 'a' instead.") dots <- list(c = 1) expect_warning(use_alias(a, dots$c), fixed = TRUE, "'c' is deprecated. Please use argument 'a' instead.") expect_equal(use_alias(a, dots$c, warn = FALSE), dots$c) }) test_that("rhs keeps attributes", { form <- structure(y~x, test = TRUE) expect_equal(attributes(form), attributes(rhs(form))) }) test_that("lsp works correctly", { expect_equal( lsp("base", pattern = "^log"), c("log", "log10", "log1p", "log2", "logb", "logical") ) expect_equal( lsp("brms", pattern = "^inv_logit"), c("inv_logit", "inv_logit_scaled") ) }) brms/tests/testthat/tests.brmsfit-helpers.R0000644000175000017500000001702114111751667020755 0ustar nileshnileshcontext("Tests for brmsfit helper functions") test_that("first_greater returns expected results", { A <- cbind(1:10, 11:20, 21:30) x <- c(5, 25, 7, 15, 7, 10, 15, 19, 3, 11) expect_equal(first_greater(A, x), c(2, 3, 2, 3, 2, 2, 2, 3, 1, 2)) expect_equal(first_greater(A, x, i = 2), c(2, 3, 2, 3, 2, 2, 2, 3, 2, 2)) }) test_that("array2list performs correct conversion", { A <- array(1:27, dim = c(3,3,3)) B <- list(matrix(1:9,3,3), matrix(10:18,3,3), matrix(19:27,3,3)) expect_equal(brms:::array2list(A), B) }) test_that("probit and probit_approx produce similar results", { expect_equal(brms:::ilink(-10:10, "probit"), brms:::ilink(-10:10, "probit_approx"), tolerance = 1e-3) }) test_that("autocorrelation matrices are computed correctly", { ar <- 0.5 ma <- 0.3 ar_mat <- brms:::get_cor_matrix_ar1(ar = matrix(ar), nobs = 4) expected_ar_mat <- 1 / (1 - ar^2) * cbind(c(1, ar, ar^2, ar^3), c(ar, 1, ar, ar^2), c(ar^2, ar, 1, ar), c(ar^3, ar^2, ar, 1)) expect_equal(ar_mat[1, , ], expected_ar_mat) ma_mat <- brms:::get_cor_matrix_ma1(ma = matrix(ma), nobs = 4) expected_ma_mat <- cbind(c(1+ma^2, ma, 0, 0), c(ma, 1+ma^2, ma, 0), c(0, ma, 1+ma^2, ma), c(0, 0, ma, 1+ma^2)) expect_equal(ma_mat[1, , ], expected_ma_mat) arma_mat <- brms:::get_cor_matrix_arma1( ar = matrix(ar), ma = matrix(ma), nobs = 4 ) g0 <- 1 + ma^2 + 2 * ar * ma g1 <- (1 + ar * ma) * (ar + ma) expected_arma_mat <- 1 / (1 - ar^2) * cbind(c(g0, g1, g1 * ar, g1 * ar^2), c(g1, g0, g1, g1 * ar), c(g1 * ar, g1, g0, g1), c(g1 * ar^2, g1 * ar, g1, g0)) expect_equal(arma_mat[1, , ], expected_arma_mat) cosy <- 0.6 cosy_mat <- brms:::get_cor_matrix_cosy(cosy = as.matrix(cosy), nobs = 4) expected_cosy_mat <- matrix(cosy, 4, 4) diag(expected_cosy_mat) <- 1 expect_equal(cosy_mat[1, , ], expected_cosy_mat) ident_mat <- brms:::get_cor_matrix_ident(ndraws = 10, nobs = 4) expected_ident_mat <- diag(1, 4) expect_equal(ident_mat[1, , ], expected_ident_mat) }) test_that("evidence_ratio returns expected results", { ps <- -4:10 prs <- -2:12 expect_true(evidence_ratio(ps, prior_samples = prs) > 1) expect_true(is.na(evidence_ratio(ps))) expect_equal(evidence_ratio(ps, cut = 0.5, wsign = "greater"), 10/5) expect_equal(evidence_ratio(ps, cut = 0.5, wsign = "less"), 5/10) }) test_that("find_vars finds all valid variable names in a string", { string <- "x + b.x - .5 + abc(a__3) : 1/2 - 0.2" expect_equal(find_vars(string), c("x", "b.x", "a__3")) }) test_that(".predictor_arma runs without errors", { ns <- 20 nobs <- 30 Y = rnorm(nobs) J_lag = c(1:3, 3, 3, rep(c(0:3, 3), 4), 0:3, 0) ar <- matrix(rnorm(ns * 3), nrow = ns, ncol = 3) ma <- matrix(rnorm(ns * 1), nrow = ns, ncol = 1) eta <- matrix(rnorm(ns * nobs), nrow = ns, ncol = nobs) expect_equal(.predictor_arma(eta, Y = Y, J_lag = J_lag), eta) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ar = ar)) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ma = ma)) expect_silent(.predictor_arma(eta, Y = Y, J_lag = J_lag, ar = ar, ma = ma)) }) test_that("make_conditions works correctly", { conds <- make_conditions(epilepsy, c("zBase", "zAge")) expect_equal(dim(conds), c(9, 3)) expect_equal(conds$cond__[3], "zBase = -1 & zAge = 1") }) test_that("brmsfit_needs_refit works correctly", { cache_tmp <- tempfile(fileext = ".rds") expect_null(read_brmsfit(cache_tmp)) saveRDS(list(a = 1), file = cache_tmp) expect_error(read_brmsfit(cache_tmp)) data_model1 <- data.frame(y = rnorm(10), x = rnorm(10)) fake_fit <- brm(y ~ x, data = data_model1, empty = TRUE) fake_fit_file <- fake_fit fake_fit_file$file <- cache_tmp scode_model1 <- make_stancode(y ~ x, data = data_model1) sdata_model1 <- make_standata(y ~ x, data = data_model1) data_model2 <- data_model1 data_model2$x[1] <- data_model2$x[1] + 1 scode_model2 <- make_stancode(y ~ 0 + x, data = data_model2) sdata_model2 <- make_standata(y ~ 0 + x, data = data_model2) write_brmsfit(fake_fit, file = cache_tmp) cache_res <- read_brmsfit(file = cache_tmp) expect_equal(cache_res, fake_fit_file) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = NULL, silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = NULL, algorithm = "sampling", silent = TRUE)) expect_false(brmsfit_needs_refit( cache_res, sdata = NULL, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model2, scode = scode_model1, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model2, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model2, scode = scode_model2, algorithm = "sampling", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = sdata_model1, scode = scode_model1, algorithm = "optimize", silent = TRUE)) expect_true(brmsfit_needs_refit( cache_res, sdata = make_standata(y ~ x, data = data_model1, sample_prior = "only"), scode = scode_model1, algorithm = NULL, silent = TRUE)) }) test_that("insert_refcat() works correctly", { source(testthat::test_path(file.path("helpers", "insert_refcat_ch.R"))) source(testthat::test_path(file.path("helpers", "simopts_catlike.R"))) for (ndraws in ndraws_vec) { for (nobsv in nobsv_vec) { for (ncat in ncat_vec) { cats <- paste0("cat", 1:ncat) fam_list <- list( fam_refNULL = categorical(), fam_ref1 = categorical(refcat = cats[1]), fam_reflast = categorical(refcat = cats[ncat]) ) if (ncat > 2) { fam_list <- c(fam_list, list(fam_ref2 = categorical(refcat = cats[2]))) } eta_test_list <- list(array(rnorm(ndraws * nobsv * (ncat - 1)), dim = c(ndraws, nobsv, ncat - 1))) if (nobsv == 1) { eta_test_list <- c( eta_test_list, list(matrix(rnorm(ndraws * (ncat - 1)), nrow = ndraws)) ) } for (eta_test in eta_test_list) { for (fam in fam_list) { # Emulate content of `fam` after fit: if (is.null(fam$refcat)) { fam$refcat <- cats[1] } fam$cats <- cats # Perform the check: eta_ref <- insert_refcat(eta_test, fam) eta_ref_ch <- insert_refcat_ch(eta_test, fam) expect_equivalent(eta_ref, eta_ref_ch) if (length(dim(eta_test)) == 3) { expect_equal(dim(eta_ref), c(ndraws, nobsv, ncat)) } else if (length(dim(eta_test)) == 2) { expect_equal(dim(eta_ref), c(ndraws, ncat)) } } } } } } }) brms/tests/testthat.R0000644000175000017500000000007013234633711014474 0ustar nileshnileshlibrary(testthat) library(brms) test_check("brms") brms/R/0000755000175000017500000000000014146747057011567 5ustar nileshnileshbrms/R/hypothesis.R0000644000175000017500000005540614111751666014114 0ustar nileshnilesh#' Non-Linear Hypothesis Testing #' #' Perform non-linear hypothesis testing for all model parameters. #' #' @param x An \code{R} object. If it is no \code{brmsfit} object, #' it must be coercible to a \code{data.frame}. #' In the latter case, the variables used in the \code{hypothesis} argument #' need to correspond to column names of \code{x}, while the rows #' are treated as representing posterior draws of the variables. #' @param hypothesis A character vector specifying one or more #' non-linear hypothesis concerning parameters of the model. #' @param class A string specifying the class of parameters being tested. #' Default is "b" for population-level effects. #' Other typical options are "sd" or "cor". #' If \code{class = NULL}, all parameters can be tested #' against each other, but have to be specified with their full name #' (see also \code{\link[brms:draws-index-brms]{variables}}) #' @param group Name of a grouping factor to evaluate only #' group-level effects parameters related to this grouping factor. #' @param alpha The alpha-level of the tests (default is 0.05; #' see 'Details' for more information). #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' @param scope Indicates where to look for the variables specified in #' \code{hypothesis}. If \code{"standard"}, use the full parameter names #' (subject to the restriction given by \code{class} and \code{group}). #' If \code{"coef"} or \code{"ranef"}, compute the hypothesis for all levels #' of the grouping factor given in \code{"group"}, based on the #' output of \code{\link{coef.brmsfit}} and \code{\link{ranef.brmsfit}}, #' respectively. #' @param seed A single numeric value passed to \code{\link{set.seed}} #' to make results reproducible. #' @param ... Currently ignored. #' #' @details Among others, \code{hypothesis} computes an evidence ratio #' (\code{Evid.Ratio}) for each hypothesis. For a one-sided hypothesis, this #' is just the posterior probability (\code{Post.Prob}) under the hypothesis #' against its alternative. That is, when the hypothesis is of the form #' \code{a > b}, the evidence ratio is the ratio of the posterior probability #' of \code{a > b} and the posterior probability of \code{a < b}. In this #' example, values greater than one indicate that the evidence in favor of #' \code{a > b} is larger than evidence in favor of \code{a < b}. For an #' two-sided (point) hypothesis, the evidence ratio is a Bayes factor between #' the hypothesis and its alternative computed via the Savage-Dickey density #' ratio method. That is the posterior density at the point of interest #' divided by the prior density at that point. Values greater than one #' indicate that evidence in favor of the point hypothesis has increased after #' seeing the data. In order to calculate this Bayes factor, all parameters #' related to the hypothesis must have proper priors and argument #' \code{sample_prior} of function \code{brm} must be set to \code{"yes"}. #' Otherwise \code{Evid.Ratio} (and \code{Post.Prob}) will be \code{NA}. #' Please note that, for technical reasons, we cannot sample from priors of #' certain parameters classes. Most notably, these include overall intercept #' parameters (prior class \code{"Intercept"}) as well as group-level #' coefficients. When interpreting Bayes factors, make sure that your priors #' are reasonable and carefully chosen, as the result will depend heavily on #' the priors. In particular, avoid using default priors. #' #' The \code{Evid.Ratio} may sometimes be \code{0} or \code{Inf} implying very #' small or large evidence, respectively, in favor of the tested hypothesis. #' For one-sided hypotheses pairs, this basically means that all posterior #' draws are on the same side of the value dividing the two hypotheses. In #' that sense, instead of \code{0} or \code{Inf,} you may rather read it as #' \code{Evid.Ratio} smaller \code{1 / S} or greater \code{S}, respectively, #' where \code{S} denotes the number of posterior draws used in the #' computations. #' #' The argument \code{alpha} specifies the size of the credible interval #' (i.e., Bayesian confidence interval). For instance, if we tested a #' two-sided hypothesis and set \code{alpha = 0.05} (5\%) an, the credible #' interval will contain \code{1 - alpha = 0.95} (95\%) of the posterior #' values. Hence, \code{alpha * 100}\% of the posterior values will #' lie outside of the credible interval. Although this allows testing of #' hypotheses in a similar manner as in the frequentist null-hypothesis #' testing framework, we strongly argue against using arbitrary cutoffs (e.g., #' \code{p < .05}) to determine the 'existence' of an effect. #' #' @return A \code{\link{brmshypothesis}} object. #' #' @seealso \code{\link{brmshypothesis}} #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' \dontrun{ #' ## define priors #' prior <- c(set_prior("normal(0,2)", class = "b"), #' set_prior("student_t(10,0,1)", class = "sigma"), #' set_prior("student_t(10,0,1)", class = "sd")) #' #' ## fit a linear mixed effects models #' fit <- brm(time ~ age + sex + disease + (1 + age|patient), #' data = kidney, family = lognormal(), #' prior = prior, sample_prior = "yes", #' control = list(adapt_delta = 0.95)) #' #' ## perform two-sided hypothesis testing #' (hyp1 <- hypothesis(fit, "sexfemale = age + diseasePKD")) #' plot(hyp1) #' hypothesis(fit, "exp(age) - 3 = 0", alpha = 0.01) #' #' ## perform one-sided hypothesis testing #' hypothesis(fit, "diseasePKD + diseaseGN - 3 < 0") #' #' hypothesis(fit, "age < Intercept", #' class = "sd", group = "patient") #' #' ## test the amount of random intercept variance on all variance #' h <- paste("sd_patient__Intercept^2 / (sd_patient__Intercept^2 +", #' "sd_patient__age^2 + sigma^2) = 0") #' (hyp2 <- hypothesis(fit, h, class = NULL)) #' plot(hyp2) #' #' ## test more than one hypothesis at once #' h <- c("diseaseGN = diseaseAN", "2 * diseaseGN - diseasePKD = 0") #' (hyp3 <- hypothesis(fit, h)) #' plot(hyp3, ignore_prior = TRUE) #' #' ## compute hypotheses for all levels of a grouping factor #' hypothesis(fit, "age = 0", scope = "coef", group = "patient") #' #' ## use the default method #' dat <- as.data.frame(fit) #' str(dat) #' hypothesis(dat, "b_age > 0") #' } #' #' @export hypothesis.brmsfit <- function(x, hypothesis, class = "b", group = "", scope = c("standard", "ranef", "coef"), alpha = 0.05, robust = FALSE, seed = NULL, ...) { # use a seed as prior_draws.brmsfit randomly permutes draws if (!is.null(seed)) { set.seed(seed) } contains_draws(x) x <- restructure(x) group <- as_one_character(group) scope <- match.arg(scope) if (scope == "standard") { if (!length(class)) { class <- "" } class <- as_one_character(class) if (nzchar(group)) { class <- paste0(class, "_", group, "__") } else if (nzchar(class)) { class <- paste0(class, "_") } out <- .hypothesis( x, hypothesis, class = class, alpha = alpha, robust = robust, ... ) } else { co <- do_call(scope, list(x, summary = FALSE)) if (!group %in% names(co)) { stop2("'group' should be one of ", collapse_comma(names(co))) } out <- hypothesis_coef( co[[group]], hypothesis, alpha = alpha, robust = robust, ... ) } out } #' @rdname hypothesis.brmsfit #' @export hypothesis <- function(x, ...) { UseMethod("hypothesis") } #' @rdname hypothesis.brmsfit #' @export hypothesis.default <- function(x, hypothesis, alpha = 0.05, robust = FALSE, ...) { x <- as.data.frame(x) .hypothesis( x, hypothesis, class = "", alpha = alpha, robust = robust, ... ) } #' Descriptions of \code{brmshypothesis} Objects #' #' A \code{brmshypothesis} object contains posterior draws #' as well as summary statistics of non-linear hypotheses as #' returned by \code{\link{hypothesis}}. #' #' @name brmshypothesis #' #' @param ignore_prior A flag indicating if prior distributions #' should also be plotted. Only used if priors were specified on #' the relevant parameters. #' @param digits Minimal number of significant digits, #' see \code{\link[base:print.default]{print.default}}. #' @param chars Maximum number of characters of each hypothesis #' to print or plot. If \code{NULL}, print the full hypotheses. #' Defaults to \code{20}. #' @param colors Two values specifying the colors of the posterior #' and prior density respectively. If \code{NULL} (the default) #' colors are taken from the current color scheme of #' the \pkg{bayesplot} package. #' @param ... Currently ignored. #' @inheritParams plot.brmsfit #' #' @details #' The two most important elements of a \code{brmshypothesis} object are #' \code{hypothesis}, which is a data.frame containing the summary estimates #' of the hypotheses, and \code{samples}, which is a data.frame containing #' the corresponding posterior draws. #' #' @seealso \code{\link{hypothesis}} NULL # internal function to evaluate hypotheses # @param x the primary object passed to the hypothesis method; # needs to be a brmsfit object or coercible to a data.frame # @param hypothesis vector of character strings containing the hypotheses # @param class prefix of the parameters in the hypotheses # @param alpha the 'alpha-level' as understood by frequentist statistics # @return a 'brmshypothesis' object .hypothesis <- function(x, hypothesis, class, alpha, robust, combine = TRUE, ...) { if (!is.character(hypothesis) || !length(hypothesis)) { stop2("Argument 'hypothesis' must be a character vector.") } if (length(alpha) != 1L || alpha < 0 || alpha > 1) { stop2("Argument 'alpha' must be a single value in [0,1].") } class <- as_one_character(class) robust <- as_one_logical(robust) out <- vector("list", length(hypothesis)) for (i in seq_along(out)) { out[[i]] <- eval_hypothesis( hypothesis[i], x = x, class = class, alpha = alpha, robust = robust, name = names(hypothesis)[i] ) } if (combine) { out <- combine_hlist(out, class = class, alpha = alpha) } out } # evaluate hypotheses for an arrary of ranefs or coefs # seperaly for each grouping-factor level hypothesis_coef <- function(x, hypothesis, alpha, ...) { stopifnot(is.array(x), length(dim(x)) == 3L) levels <- dimnames(x)[[2]] coefs <- dimnames(x)[[3]] x <- lapply(seq_along(levels), function(l) structure(as.data.frame(x[, l, ]), names = coefs) ) out <- vector("list", length(levels)) for (l in seq_along(levels)) { out[[l]] <- .hypothesis( x[[l]], hypothesis, class = "", alpha = alpha, combine = FALSE, ... ) for (i in seq_along(out[[l]])) { out[[l]][[i]]$summary$Group <- levels[l] } } out <- unlist(out, recursive = FALSE) out <- as.list(matrix(out, ncol = length(hypothesis), byrow = TRUE)) out <- combine_hlist(out, class = "", alpha = alpha) out$hypothesis$Group <- factor(out$hypothesis$Group, levels) out$hypothesis <- move2start(out$hypothesis, "Group") out } # combine list of outputs of eval_hypothesis # @param hlist list of evaluate hypothesis # @return a 'brmshypothesis' object combine_hlist <- function(hlist, class, alpha) { stopifnot(is.list(hlist)) hs <- do_call(rbind, lapply(hlist, function(h) h$summary)) rownames(hs) <- NULL samples <- lapply(hlist, function(h) h$samples) samples <- as.data.frame(do_call(cbind, samples)) prior_samples <- lapply(hlist, function(h) h$prior_samples) prior_samples <- as.data.frame(do_call(cbind, prior_samples)) names(samples) <- names(prior_samples) <- paste0("H", seq_along(hlist)) class <- sub("_+$", "", class) # TODO: rename 'samples' to 'draws' in brms 3.0 out <- nlist(hypothesis = hs, samples, prior_samples, class, alpha) structure(out, class = "brmshypothesis") } # evaluate a single hypothesis based on the posterior draws eval_hypothesis <- function(h, x, class, alpha, robust, name = NULL) { stopifnot(length(h) == 1L && is.character(h)) pars <- variables(x)[grepl(paste0("^", class), variables(x))] # parse hypothesis string h <- gsub("[ \t\r\n]", "", h) sign <- get_matches("=|<|>", h) lr <- get_matches("[^=<>]+", h) if (length(sign) != 1L || length(lr) != 2L) { stop2("Every hypothesis must be of the form 'left (= OR < OR >) right'.") } h <- paste0("(", lr[1], ")") h <- paste0(h, ifelse(lr[2] != "0", paste0("-(", lr[2], ")"), "")) varsH <- find_vars(h) parsH <- paste0(class, varsH) miss_pars <- setdiff(parsH, pars) if (length(miss_pars)) { miss_pars <- collapse_comma(miss_pars) stop2("Some parameters cannot be found in the model: \n", miss_pars) } # rename hypothesis for correct evaluation h_renamed <- rename(h, c(":", "[", "]", ","), c("___", ".", ".", "..")) # get posterior and prior draws pattern <- c(paste0("^", class), ":", "\\[", "\\]", ",") repl <- c("", "___", ".", ".", "..") samples <- as.data.frame(x, variable = parsH) names(samples) <- rename(names(samples), pattern, repl, fixed = FALSE) samples <- as.matrix(eval2(h_renamed, samples)) prior_samples <- prior_draws(x, variable = parsH) if (!is.null(prior_samples) && ncol(prior_samples) == length(varsH)) { names(prior_samples) <- rename( names(prior_samples), pattern, repl, fixed = FALSE ) prior_samples <- as.matrix(eval2(h_renamed, prior_samples)) } else { prior_samples <- NULL } # summarize hypothesis wsign <- switch(sign, "=" = "equal", "<" = "less", ">" = "greater") probs <- switch(sign, "=" = c(alpha / 2, 1 - alpha / 2), "<" = c(alpha, 1 - alpha), ">" = c(alpha, 1 - alpha) ) if (robust) { measures <- c("median", "mad") } else { measures <- c("mean", "sd") } measures <- c(measures, "quantile", "evidence_ratio") sm <- lapply( measures, get_estimate, draws = samples, probs = probs, wsign = wsign, prior_samples = prior_samples ) sm <- as.data.frame(matrix(unlist(sm), nrow = 1)) names(sm) <- c("Estimate", "Est.Error", "CI.Lower", "CI.Upper", "Evid.Ratio") sm$Post.Prob <- sm$Evid.Ratio / (1 + sm$Evid.Ratio) if (is.infinite(sm$Evid.Ratio)) { sm$Post.Prob <- 1 } if (sign == "=") { sm$Star <- str_if(!(sm$CI.Lower <= 0 && 0 <= sm$CI.Upper), "*") } else { sm$Star <- str_if(sm$Post.Prob > 1 - alpha, "*") } if (!length(name) || !nzchar(name)) { name <- paste(h, sign, "0") } sm$Hypothesis <- as_one_character(name) sm <- move2start(sm, "Hypothesis") if (is.null(prior_samples)) { prior_samples <- as.matrix(rep(NA, nrow(samples))) } nlist(summary = sm, samples, prior_samples) } # find all valid variable names in a string # @param x a character string # @param dot are dots allowed in variable names? # @param brackets allow brackets at the end of variable names? # @return all valid variable names within the string # @note does not use the R parser itself to allow for double points, # square brackets, and commas at the end of names find_vars <- function(x, dot = TRUE, brackets = TRUE) { x <- gsub("[[:space:]]", "", as_one_character(x)) dot <- as_one_logical(dot) brackets <- as_one_logical(brackets) regex_all <- paste0( "([^([:digit:]|[:punct:])]", if (dot) "|\\.", ")", "[[:alnum:]_\\:", if (dot) "\\.", "]*", if (brackets) "(\\[[^],]+(,[^],]+)*\\])?" ) pos_all <- gregexpr(regex_all, x)[[1]] regex_fun <- paste0( "([^([:digit:]|[:punct:])]", if (dot) "|\\.", ")", "[[:alnum:]_", if (dot) "\\.", "]*\\(" ) pos_fun <- gregexpr(regex_fun, x)[[1]] pos_decnum <- gregexpr("\\.[[:digit:]]+", x)[[1]] keep <- !pos_all %in% c(pos_fun, pos_decnum) pos_var <- pos_all[keep] attr(pos_var, "match.length") <- attributes(pos_all)$match.length[keep] if (length(pos_var)) { out <- unique(unlist(regmatches(x, list(pos_var)))) } else { out <- character(0) } out } #' Compute Density Ratios #' #' Compute the ratio of two densities at given points based on draws of the #' corresponding distributions. #' #' @param x Vector of draws from the first distribution, usually the posterior #' distribution of the quantity of interest. #' @param y Optional vector of draws from the second distribution, usually the #' prior distribution of the quantity of interest. If \code{NULL} (the #' default), only the density of \code{x} will be evaluated. #' @param point Numeric values at which to evaluate and compare the densities. #' Defaults to \code{0}. #' @param n Single numeric value. Influences the accuracy of the density #' estimation. See \code{\link[stats:density]{density}} for details. #' @param ... Further arguments passed to \code{\link[stats:density]{density}}. #' #' @return A vector of length equal to \code{length(point)}. If \code{y} is #' provided, the density ratio of \code{x} against \code{y} is returned. Else, #' only the density of \code{x} is returned. #' #' @details In order to achieve sufficient accuracy in the density estimation, #' more draws than usual are required. That is you may need an effective #' sample size of 10,000 or more to reliably estimate the densities. #' #' @examples #' x <- rnorm(10000) #' y <- rnorm(10000, mean = 1) #' density_ratio(x, y, point = c(0, 1)) #' #' @export density_ratio <- function(x, y = NULL, point = 0, n = 4096, ...) { x <- as.numeric(x) point <- as.numeric(point) dots <- list(...) dots <- dots[names(dots) %in% names(formals("density.default"))] dots$n <- n eval_density <- function(x, point) { # evaluate density of x at point from <- min(x) to <- max(x) if (from > point) { from <- point - sd(x) / 4 } else if (to < point) { to <- point + sd(x) / 4 } dens <- do_call(density, c(nlist(x, from, to), dots)) return(spline(dens$x, dens$y, xout = point)$y) } out <- ulapply(point, eval_density, x = x) if (!is.null(y)) { y <- as.numeric(y) out <- out / ulapply(point, eval_density, x = y) } out } # compute the evidence ratio between two disjunct hypotheses # @param x posterior draws # @param cut the cut point between the two hypotheses # @param wsign direction of the hypothesis # @param prior_samples optional prior draws for two-sided hypothesis # @param ... optional arguments passed to density_ratio # @return the evidence ratio of the two hypothesis evidence_ratio <- function(x, cut = 0, wsign = c("equal", "less", "greater"), prior_samples = NULL, ...) { wsign <- match.arg(wsign) if (wsign == "equal") { if (is.null(prior_samples)) { out <- NA } else { out <- density_ratio(x, prior_samples, point = cut, ...) } } else if (wsign == "less") { out <- length(which(x < cut)) out <- out / (length(x) - out) } else if (wsign == "greater") { out <- length(which(x > cut)) out <- out / (length(x) - out) } out } # round all numeric elements of a list-like object round_numeric <- function(x, digits = 2) { stopifnot(is.list(x)) for (i in seq_along(x)) { if (is.numeric(x[[i]])) { x[[i]] <- round(x[[i]], digits = digits) } } x } #' @rdname brmshypothesis #' @export print.brmshypothesis <- function(x, digits = 2, chars = 20, ...) { # make sure hypothesis names are not too long x$hypothesis$Hypothesis <- limit_chars( x$hypothesis$Hypothesis, chars = chars ) cat(paste0("Hypothesis Tests for class ", x$class, ":\n")) x$hypothesis <- round_numeric(x$hypothesis, digits = digits) print(x$hypothesis, quote = FALSE) pone <- (1 - x$alpha * 2) * 100 ptwo <- (1 - x$alpha) * 100 cat(glue( "---\n'CI': {pone}%-CI for one-sided and {ptwo}%-CI for two-sided hypotheses.\n", "'*': For one-sided hypotheses, the posterior probability exceeds {ptwo}%;\n", "for two-sided hypotheses, the value tested against lies outside the {ptwo}%-CI.\n", "Posterior probabilities of point hypotheses assume equal prior probabilities.\n" )) invisible(x) } #' @rdname brmshypothesis #' @method plot brmshypothesis #' @export plot.brmshypothesis <- function(x, N = 5, ignore_prior = FALSE, chars = 40, colors = NULL, theme = NULL, ask = TRUE, plot = TRUE, ...) { dots <- list(...) if (!is.data.frame(x$samples)) { stop2("No posterior draws found.") } plot <- use_alias(plot, dots$do_plot) if (is.null(colors)) { colors <- bayesplot::color_scheme_get()[c(4, 2)] colors <- unname(unlist(colors)) } if (length(colors) != 2L) { stop2("Argument 'colors' must be of length 2.") } .plot_fun <- function(samples) { gg <- ggplot(samples, aes_string(x = "values")) + facet_wrap("ind", ncol = 1, scales = "free") + xlab("") + ylab("") + theme + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) if (ignore_prior) { gg <- gg + geom_density(alpha = 0.7, fill = colors[1], na.rm = TRUE) } else { gg <- gg + geom_density(aes_string(fill = "Type"), alpha = 0.7, na.rm = TRUE) + scale_fill_manual(values = colors) } return(gg) } samples <- cbind(x$samples, Type = "Posterior") if (!ignore_prior) { prior_samples <- cbind(x$prior_samples, Type = "Prior") samples <- rbind(samples, prior_samples) } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } hyps <- limit_chars(x$hypothesis$Hypothesis, chars = chars) names(samples)[seq_along(hyps)] <- hyps nplots <- ceiling(length(hyps) / N) plots <- vector(mode = "list", length = nplots) for (i in seq_len(nplots)) { rel_hyps <- hyps[((i - 1) * N + 1):min(i * N, length(hyps))] sub_samples <- cbind( utils::stack(samples[, rel_hyps, drop = FALSE]), samples[, "Type", drop = FALSE] ) # make sure that parameters appear in the original order sub_samples$ind <- with(sub_samples, factor(ind, levels = unique(ind))) plots[[i]] <- .plot_fun(sub_samples) if (plot) { plot(plots[[i]]) if (i == 1) devAskNewPage(ask = ask) } } invisible(plots) } brms/R/loo_moment_match.R0000644000175000017500000001622314111751666015233 0ustar nileshnilesh#' Moment matching for efficient approximate leave-one-out cross-validation #' #' Moment matching for efficient approximate leave-one-out cross-validation #' (LOO-CV). See \code{\link[loo:loo_moment_match]{loo_moment_match}} #' for more details. #' #' @aliases loo_moment_match #' #' @inheritParams predict.brmsfit #' @param x An object of class \code{brmsfit}. #' @param loo An object of class \code{loo} originally created from \code{x}. #' @param k_threshold The threshold at which Pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} #' for more details. #' @param check Logical; If \code{TRUE} (the default), some checks #' check are performed if the \code{loo} object was generated #' from the \code{brmsfit} object passed to argument \code{fit}. #' @param ... Further arguments passed to the underlying methods. #' Additional arguments initially passed to \code{\link{loo}}, #' for example, \code{newdata} or \code{resp} need to be passed #' again to \code{loo_moment_match} in order for the latter #' to work correctly. #' @return An updated object of class \code{loo}. #' #' @details The moment matching algorithm requires draws of all variables #' defined in Stan's \code{parameters} block to be saved. Otherwise #' \code{loo_moment_match} cannot be computed. Thus, please set #' \code{save_pars = save_pars(all = TRUE)} in the call to \code{\link{brm}}, #' if you are planning to apply \code{loo_moment_match} to your models. #' #' @references #' Paananen, T., Piironen, J., Buerkner, P.-C., Vehtari, A. (2021). #' Implicitly Adaptive Importance Sampling. Statistics and Computing. #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' save_pars = save_pars(all = TRUE)) #' #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' (mmloo1 <- loo_moment_match(fit1, loo = loo1)) #' } #' #' @importFrom loo loo_moment_match #' @export loo_moment_match #' @export loo_moment_match.brmsfit <- function(x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, ...) { stopifnot(is.loo(loo), is.brmsfit(x)) if (is.null(newdata)) { newdata <- model.frame(x) } else { newdata <- as.data.frame(newdata) } check <- as_one_logical(check) if (check) { yhash_loo <- attr(loo, "yhash") yhash_fit <- hash_response(x, newdata = newdata) if (!is_equal(yhash_loo, yhash_fit)) { stop2( "Response values used in 'loo' and 'x' do not match. ", "If this is a false positive, please set 'check' to FALSE." ) } } # otherwise loo_moment_match might not work in a new R session x <- update_misc_env(x) out <- try(loo::loo_moment_match.default( x, loo = loo, post_draws = as.matrix, log_lik_i = .log_lik_i, unconstrain_pars = .unconstrain_pars, log_prob_upars = .log_prob_upars, log_lik_i_upars = .log_lik_i_upars, k_threshold = k_threshold, newdata = newdata, resp = resp, ... )) if (is(out, "try-error")) { stop2( "Moment matching failed. Perhaps you did not set ", "'save_pars = save_pars(all = TRUE)' when fitting your model?" ) } out } # compute a vector of log-likelihood values for the ith observation .log_lik_i <- function(x, i, newdata, ...) { as.vector(log_lik(x, newdata = newdata[i, , drop = FALSE], ...)) } # transform parameters to the unconstrained space .unconstrain_pars <- function(x, pars, ...) { unconstrain_pars_stanfit(x$fit, pars = pars, ...) } # compute log_prob for each posterior draws on the unconstrained space .log_prob_upars <- function(x, upars, ...) { x <- update_misc_env(x, only_windows = TRUE) log_prob_upars_stanfit(x$fit, upars = upars, ...) } # transform parameters to the constraint space .update_pars <- function(x, upars, ...) { # list with one element per posterior draw pars <- apply(upars, 1, .constrain_pars, x = x) # select required parameters only pars <- lapply(pars, "[", x$fit@sim$pars_oi_old) # transform draws ndraws <- length(pars) pars <- unlist(pars) npars <- length(pars) / ndraws dim(pars) <- c(npars, ndraws) # add dummy 'lp__' draws pars <- rbind(pars, rep(0, ndraws)) # bring draws into the right structure new_draws <- named_list(x$fit@sim$fnames_oi_old, list(numeric(ndraws))) if (length(new_draws) != nrow(pars)) { stop2("Updating parameters in `loo_moment_match.brmsfit' failed. ", "Please report a bug at https://github.com/paul-buerkner/brms.") } for (i in seq_len(npars)) { new_draws[[i]] <- pars[i, ] } # create new sim object to overwrite x$fit@sim x$fit@sim <- list( samples = list(new_draws), iter = ndraws, thin = 1, warmup = 0, chains = 1, n_save = ndraws, warmup2 = 0, permutation = list(seq_len(ndraws)), pars_oi = x$fit@sim$pars_oi_old, dims_oi = x$fit@sim$dims_oi_old, fnames_oi = x$fit@sim$fnames_oi_old, n_flatnames = length(x$fit@sim$fnames_oi_old) ) x$fit@stan_args <- list( list(chain_id = 1, iter = ndraws, thin = 1, warmup = 0) ) rename_pars(x) } # wrapper around rstan::constrain_pars # ensures that the right posterior draws are excluded .constrain_pars <- function(upars, x) { out <- rstan::constrain_pars(upars, object = x$fit) out[x$exclude] <- NULL out } # compute log_lik values based on the unconstrained parameters .log_lik_i_upars <- function(x, upars, i, ndraws = NULL, draw_ids = NULL, ...) { # do not pass draw_ids or ndraws further to avoid subsetting twice x <- update_misc_env(x, only_windows = TRUE) x <- .update_pars(x, upars = upars, ...) .log_lik_i(x, i = i, ...) } # -------- will be imported from rstan at some point ------- # transform parameters to the unconstraint space unconstrain_pars_stanfit <- function(x, pars, ...) { skeleton <- .create_skeleton(x@sim$pars_oi, x@par_dims[x@sim$pars_oi]) upars <- apply(pars, 1, FUN = function(theta) { rstan::unconstrain_pars(x, pars = .rstan_relist(theta, skeleton)) }) # for one parameter models if (is.null(dim(upars))) { dim(upars) <- c(1, length(upars)) } t(upars) } # compute log_prob for each posterior draws on the unconstrained space log_prob_upars_stanfit <- function(x, upars, ...) { apply(upars, 1, rstan::log_prob, object = x, adjust_transform = TRUE, gradient = FALSE) } # create a named list of draws for use with rstan methods .rstan_relist <- function (x, skeleton) { out <- utils::relist(x, skeleton) for (i in seq_along(skeleton)) { dim(out[[i]]) <- dim(skeleton[[i]]) } out } # rstan helper function to get dims of parameters right .create_skeleton <- function (pars, dims) { out <- lapply(seq_along(pars), function(i) { len_dims <- length(dims[[i]]) if (len_dims < 1) return(0) return(array(0, dim = dims[[i]])) }) names(out) <- pars out } brms/R/distributions.R0000644000175000017500000023063514111751666014616 0ustar nileshnilesh#' The Student-t Distribution #' #' Density, distribution function, quantile function and random generation #' for the Student-t distribution with location \code{mu}, scale \code{sigma}, #' and degrees of freedom \code{df}. #' #' @name StudentT #' #' @param x,q Vector of quantiles. #' @param p Vector of probabilities. #' @param n Number of draws to sample from the distribution. #' @param mu Vector of location values. #' @param sigma Vector of scale values. #' @param df Vector of degrees of freedom. #' @param log,log.p Logical; If \code{TRUE}, values are returned on the log scale. #' @param lower.tail Logical; If \code{TRUE} (default), return P(X <= x). #' Else, return P(X > x) . #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @seealso \code{\link[stats:TDist]{TDist}} #' #' @export dstudent_t <- function(x, df, mu = 0, sigma = 1, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (log) { dt((x - mu) / sigma, df = df, log = TRUE) - log(sigma) } else { dt((x - mu) / sigma, df = df) / sigma } } #' @rdname StudentT #' @export pstudent_t <- function(q, df, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } pt((q - mu) / sigma, df = df, lower.tail = lower.tail, log.p = log.p) } #' @rdname StudentT #' @export qstudent_t <- function(p, df, mu = 0, sigma = 1) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } mu + sigma * qt(p, df = df) } #' @rdname StudentT #' @export rstudent_t <- function(n, df, mu = 0, sigma = 1) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } mu + sigma * rt(n, df = df) } #' The Multivariate Normal Distribution #' #' Density function and random generation for the multivariate normal #' distribution with mean vector \code{mu} and covariance matrix \code{Sigma}. #' #' @name MultiNormal #' #' @inheritParams StudentT #' @param x Vector or matrix of quantiles. If \code{x} is a matrix, #' each row is taken to be a quantile. #' @param mu Mean vector with length equal to the number of dimensions. #' @param Sigma Covariance matrix. #' @param check Logical; Indicates whether several input checks #' should be performed. Defaults to \code{FALSE} to improve #' efficiency. #' #' @details See the Stan user's manual \url{https://mc-stan.org/documentation/} #' for details on the parameterization #' #' @export dmulti_normal <- function(x, mu, Sigma, log = FALSE, check = FALSE) { if (is.vector(x) || length(dim(x)) == 1L) { x <- matrix(x, ncol = length(x)) } p <- ncol(x) if (check) { if (length(mu) != p) { stop2("Dimension of mu is incorrect.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } chol_Sigma <- chol(Sigma) rooti <- backsolve(chol_Sigma, t(x) - mu, transpose = TRUE) quads <- colSums(rooti^2) out <- -(p / 2) * log(2 * pi) - sum(log(diag(chol_Sigma))) - .5 * quads if (!log) { out <- exp(out) } out } #' @rdname MultiNormal #' @export rmulti_normal <- function(n, mu, Sigma, check = FALSE) { p <- length(mu) if (check) { if (!(is_wholenumber(n) && n > 0)) { stop2("n must be a positive integer.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } draws <- matrix(rnorm(n * p), nrow = n, ncol = p) mu + draws %*% chol(Sigma) } #' The Multivariate Student-t Distribution #' #' Density function and random generation for the multivariate Student-t #' distribution with location vector \code{mu}, covariance matrix \code{Sigma}, #' and degrees of freedom \code{df}. #' #' @name MultiStudentT #' #' @inheritParams StudentT #' @param x Vector or matrix of quantiles. If \code{x} is a matrix, #' each row is taken to be a quantile. #' @param mu Location vector with length equal to the number of dimensions. #' @param Sigma Covariance matrix. #' @param check Logical; Indicates whether several input checks #' should be performed. Defaults to \code{FALSE} to improve #' efficiency. #' #' @details See the Stan user's manual \url{https://mc-stan.org/documentation/} #' for details on the parameterization #' #' @export dmulti_student_t <- function(x, df, mu, Sigma, log = FALSE, check = FALSE) { if (is.vector(x) || length(dim(x)) == 1L) { x <- matrix(x, ncol = length(x)) } p <- ncol(x) if (check) { if (isTRUE(any(df <= 0))) { stop2("df must be greater than 0.") } if (length(mu) != p) { stop2("Dimension of mu is incorrect.") } if (!all(dim(Sigma) == c(p, p))) { stop2("Dimension of Sigma is incorrect.") } if (!is_symmetric(Sigma)) { stop2("Sigma must be a symmetric matrix.") } } chol_Sigma <- chol(Sigma) rooti <- backsolve(chol_Sigma, t(x) - mu, transpose = TRUE) quads <- colSums(rooti^2) out <- lgamma((p + df)/2) - (lgamma(df / 2) + sum(log(diag(chol_Sigma))) + p / 2 * log(pi * df)) - 0.5 * (df + p) * log1p(quads / df) if (!log) { out <- exp(out) } out } #' @rdname MultiStudentT #' @export rmulti_student_t <- function(n, df, mu, Sigma, check = FALSE) { p <- length(mu) if (isTRUE(any(df <= 0))) { stop2("df must be greater than 0.") } draws <- rmulti_normal(n, mu = rep(0, p), Sigma = Sigma, check = check) draws <- draws / sqrt(rchisq(n, df = df) / df) sweep(draws, 2, mu, "+") } #' The Skew-Normal Distribution #' #' Density, distribution function, and random generation for the #' skew-normal distribution with mean \code{mu}, #' standard deviation \code{sigma}, and skewness \code{alpha}. #' #' @name SkewNormal #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of mean values. #' @param sigma Vector of standard deviation values. #' @param alpha Vector of skewness values. #' @param xi Optional vector of location values. #' If \code{NULL} (the default), will be computed internally. #' @param omega Optional vector of scale values. #' If \code{NULL} (the default), will be computed internally. #' @param tol Tolerance of the approximation used in the #' computation of quantiles. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dskew_normal <- function(x, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be greater than 0.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, x = x) out <- with(args, { # do it like sn::dsn z <- (x - xi) / omega if (length(alpha) == 1L) { alpha <- rep(alpha, length(z)) } logN <- -log(sqrt(2 * pi)) - log(omega) - z^2 / 2 logS <- ifelse( abs(alpha) < Inf, pnorm(alpha * z, log.p = TRUE), log(as.numeric(sign(alpha) * z > 0)) ) out <- logN + logS - pnorm(0, log.p = TRUE) ifelse(abs(z) == Inf, -Inf, out) }) if (!log) { out <- exp(out) } out } #' @rdname SkewNormal #' @export pskew_normal <- function(q, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE) { require_package("mnormt") if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, q = q) out <- with(args, { # do it like sn::psn z <- (q - xi) / omega nz <- length(z) is_alpha_inf <- abs(alpha) == Inf delta[is_alpha_inf] <- sign(alpha[is_alpha_inf]) out <- numeric(nz) for (k in seq_len(nz)) { if (is_alpha_inf[k]) { if (alpha[k] > 0) { out[k] <- 2 * (pnorm(pmax(z[k], 0)) - 0.5) } else { out[k] <- 1 - 2 * (0.5 - pnorm(pmin(z[k], 0))) } } else { S <- matrix(c(1, -delta[k], -delta[k], 1), 2, 2) out[k] <- 2 * mnormt::biv.nt.prob( 0, lower = rep(-Inf, 2), upper = c(z[k], 0), mean = c(0, 0), S = S ) } } pmin(1, pmax(0, out)) }) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname SkewNormal #' @export qskew_normal <- function(p, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, lower.tail = TRUE, log.p = FALSE, tol = 1e-8) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (log.p) { p <- exp(p) } if (!lower.tail) { p <- 1 - p } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega, p = p) out <- with(args, { # do it like sn::qsn na <- is.na(p) | (p < 0) | (p > 1) zero <- (p == 0) one <- (p == 1) p <- replace(p, (na | zero | one), 0.5) cum <- skew_normal_cumulants(0, 1, alpha, n = 4) g1 <- cum[, 3] / cum[, 2]^(3 / 2) g2 <- cum[, 4] / cum[, 2]^2 x <- qnorm(p) x <- x + (x^2 - 1) * g1 / 6 + x * (x^2 - 3) * g2 / 24 - x * (2 * x^2 - 5) * g1^2 / 36 x <- cum[, 1] + sqrt(cum[, 2]) * x px <- pskew_normal(x, xi = 0, omega = 1, alpha = alpha) max_err <- 1 while (max_err > tol) { x1 <- x - (px - p) / dskew_normal(x, xi = 0, omega = 1, alpha = alpha) x <- x1 px <- pskew_normal(x, xi = 0, omega = 1, alpha = alpha) max_err <- max(abs(px - p)) if (is.na(max_err)) { warning2("Approximation in 'qskew_normal' might have failed.") } } x <- replace(x, na, NA) x <- replace(x, zero, -Inf) x <- replace(x, one, Inf) as.numeric(xi + omega * x) }) out } #' @rdname SkewNormal #' @export rskew_normal <- function(n, mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } args <- cp2dp(mu, sigma, alpha, xi = xi, omega = omega) with(args, { # do it like sn::rsn z1 <- rnorm(n) z2 <- rnorm(n) id <- z2 > args$alpha * z1 z1[id] <- -z1[id] xi + omega * z1 }) } # convert skew-normal mixed-CP to DP parameterization # @return a data.frame containing all relevant parameters cp2dp <- function(mu = 0, sigma = 1, alpha = 0, xi = NULL, omega = NULL, ...) { delta <- alpha / sqrt(1 + alpha^2) if (is.null(omega)) { omega <- sigma / sqrt(1 - 2 / pi * delta^2) } if (is.null(xi)) { xi <- mu - omega * delta * sqrt(2 / pi) } expand(dots = nlist(mu, sigma, alpha, xi, omega, delta, ...)) } # helper function for qskew_normal # code basis taken from sn::sn.cumulants # uses xi and omega rather than mu and sigma skew_normal_cumulants <- function(xi = 0, omega = 1, alpha = 0, n = 4) { cumulants_half_norm <- function(n) { n <- max(n, 2) n <- as.integer(2 * ceiling(n/2)) half.n <- as.integer(n/2) m <- 0:(half.n - 1) a <- sqrt(2/pi)/(gamma(m + 1) * 2^m * (2 * m + 1)) signs <- rep(c(1, -1), half.n)[seq_len(half.n)] a <- as.vector(rbind(signs * a, rep(0, half.n))) coeff <- rep(a[1], n) for (k in 2:n) { ind <- seq_len(k - 1) coeff[k] <- a[k] - sum(ind * coeff[ind] * a[rev(ind)]/k) } kappa <- coeff * gamma(seq_len(n) + 1) kappa[2] <- 1 + kappa[2] return(kappa) } args <- expand(dots = nlist(xi, omega, alpha)) with(args, { # do it like sn::sn.cumulants delta <- alpha / sqrt(1 + alpha^2) kv <- cumulants_half_norm(n) if (length(kv) > n) { kv <- kv[-(n + 1)] } kv[2] <- kv[2] - 1 kappa <- outer(delta, 1:n, "^") * matrix(rep(kv, length(xi)), ncol = n, byrow = TRUE) kappa[, 2] <- kappa[, 2] + 1 kappa <- kappa * outer(omega, 1:n, "^") kappa[, 1] <- kappa[, 1] + xi kappa }) } # CDF of the inverse gamma function pinvgamma <- function(q, shape, rate, lower.tail = TRUE, log.p = FALSE) { pgamma(1/q, shape, rate = rate, lower.tail = !lower.tail, log.p = log.p) } #' The von Mises Distribution #' #' Density, distribution function, and random generation for the #' von Mises distribution with location \code{mu}, and precision \code{kappa}. #' #' @name VonMises #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param kappa Vector of precision values. #' @param acc Accuracy of numerical approximations. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dvon_mises <- function(x, mu, kappa, log = FALSE) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } # expects x in [-pi, pi] rather than [0, 2*pi] as CircStats::dvm be <- besselI(kappa, nu = 0, expon.scaled = TRUE) out <- -log(2 * pi * be) + kappa * (cos(x - mu) - 1) if (!log) { out <- exp(out) } out } #' @rdname VonMises #' @export pvon_mises <- function(q, mu, kappa, lower.tail = TRUE, log.p = FALSE, acc = 1e-20) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } pi <- base::pi pi2 <- 2 * pi q <- (q + pi) %% pi2 mu <- (mu + pi) %% pi2 args <- expand(q = q, mu = mu, kappa = kappa) q <- args$q mu <- args$mu kappa <- args$kappa rm(args) # code basis taken from CircStats::pvm but improved # considerably with respect to speed and stability rec_sum <- function(q, kappa, acc, sum = 0, i = 1) { # compute the sum of of besselI functions recursively term <- (besselI(kappa, nu = i) * sin(i * q)) / i sum <- sum + term rd <- abs(term) >= acc if (sum(rd)) { sum[rd] <- rec_sum( q[rd], kappa[rd], acc, sum = sum[rd], i = i + 1 ) } sum } .pvon_mises <- function(q, kappa, acc) { sum <- rec_sum(q, kappa, acc) q / pi2 + sum / (pi * besselI(kappa, nu = 0)) } out <- rep(NA, length(mu)) zero_mu <- mu == 0 if (sum(zero_mu)) { out[zero_mu] <- .pvon_mises(q[zero_mu], kappa[zero_mu], acc) } lq_mu <- q <= mu if (sum(lq_mu)) { upper <- (q[lq_mu] - mu[lq_mu]) %% pi2 upper[upper == 0] <- pi2 lower <- (-mu[lq_mu]) %% pi2 out[lq_mu] <- .pvon_mises(upper, kappa[lq_mu], acc) - .pvon_mises(lower, kappa[lq_mu], acc) } uq_mu <- q > mu if (sum(uq_mu)) { upper <- q[uq_mu] - mu[uq_mu] lower <- mu[uq_mu] %% pi2 out[uq_mu] <- .pvon_mises(upper, kappa[uq_mu], acc) + .pvon_mises(lower, kappa[uq_mu], acc) } if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname VonMises #' @export rvon_mises <- function(n, mu, kappa) { if (isTRUE(any(kappa < 0))) { stop2("kappa must be non-negative") } args <- expand(mu = mu, kappa = kappa, length = n) mu <- args$mu kappa <- args$kappa rm(args) pi <- base::pi mu <- mu + pi # code basis taken from CircStats::rvm but improved # considerably with respect to speed and stability rvon_mises_outer <- function(r, mu, kappa) { n <- length(r) U1 <- runif(n, 0, 1) z <- cos(pi * U1) f <- (1 + r * z) / (r + z) c <- kappa * (r - f) U2 <- runif(n, 0, 1) outer <- is.na(f) | is.infinite(f) | !(c * (2 - c) - U2 > 0 | log(c / U2) + 1 - c >= 0) inner <- !outer out <- rep(NA, n) if (sum(inner)) { out[inner] <- rvon_mises_inner(f[inner], mu[inner]) } if (sum(outer)) { # evaluate recursively until a valid sample is found out[outer] <- rvon_mises_outer(r[outer], mu[outer], kappa[outer]) } out } rvon_mises_inner <- function(f, mu) { n <- length(f) U3 <- runif(n, 0, 1) (sign(U3 - 0.5) * acos(f) + mu) %% (2 * pi) } a <- 1 + (1 + 4 * (kappa^2))^0.5 b <- (a - (2 * a)^0.5) / (2 * kappa) r <- (1 + b^2) / (2 * b) # indicates underflow due to kappa being close to zero is_uf <- is.na(r) | is.infinite(r) not_uf <- !is_uf out <- rep(NA, n) if (sum(is_uf)) { out[is_uf] <- runif(sum(is_uf), 0, 2 * pi) } if (sum(not_uf)) { out[not_uf] <- rvon_mises_outer(r[not_uf], mu[not_uf], kappa[not_uf]) } out - pi } #' The Exponentially Modified Gaussian Distribution #' #' Density, distribution function, and random generation #' for the exponentially modified Gaussian distribution with #' mean \code{mu} and standard deviation \code{sigma} of the gaussian #' component, as well as scale \code{beta} of the exponential #' component. #' #' @name ExGaussian #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of means of the combined distribution. #' @param sigma Vector of standard deviations of the gaussian component. #' @param beta Vector of scales of the exponential component. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dexgaussian <- function(x, mu, sigma, beta, log = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } args <- nlist(x, mu, sigma, beta) args <- do_call(expand, args) args$mu <- with(args, mu - beta) args$z <- with(args, x - mu - sigma^2 / beta) out <- with(args, -log(beta) - (z + sigma^2 / (2 * beta)) / beta + pnorm(z / sigma, log.p = TRUE) ) if (!log) { out <- exp(out) } out } #' @rdname ExGaussian #' @export pexgaussian <- function(q, mu, sigma, beta, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } args <- nlist(q, mu, sigma, beta) args <- do_call(expand, args) args$mu <- with(args, mu - beta) args$z <- with(args, q - mu - sigma^2 / beta) out <- with(args, pnorm((q - mu) / sigma) - pnorm(z / sigma) * exp(((mu + sigma^2 / beta)^2 - mu^2 - 2 * q * sigma^2 / beta) / (2 * sigma^2)) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname ExGaussian #' @export rexgaussian <- function(n, mu, sigma, beta) { if (isTRUE(any(sigma < 0))) { stop2("sigma must be non-negative.") } if (isTRUE(any(beta < 0))) { stop2("beta must be non-negative.") } mu <- mu - beta rnorm(n, mean = mu, sd = sigma) + rexp(n, rate = 1 / beta) } #' The Frechet Distribution #' #' Density, distribution function, quantile function and random generation #' for the Frechet distribution with location \code{loc}, scale \code{scale}, #' and shape \code{shape}. #' #' @name Frechet #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param loc Vector of locations. #' @param scale Vector of scales. #' @param shape Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dfrechet <- function(x, loc = 0, scale = 1, shape = 1, log = FALSE) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } x <- (x - loc) / scale args <- nlist(x, loc, scale, shape) args <- do_call(expand, args) out <- with(args, log(shape / scale) - (1 + shape) * log(x) - x^(-shape) ) if (!log) { out <- exp(out) } out } #' @rdname Frechet #' @export pfrechet <- function(q, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } q <- pmax((q - loc) / scale, 0) out <- exp(-q^(-shape)) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname Frechet #' @export qfrechet <- function(p, loc = 0, scale = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(p <= 0)) || isTRUE(any(p >= 1))) { stop("'p' must contain probabilities in (0,1)") } if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } if (log.p) { p <- exp(p) } if (!lower.tail) { p <- 1 - p } loc + scale * (-log(p))^(-1/shape) } #' @rdname Frechet #' @export rfrechet <- function(n, loc = 0, scale = 1, shape = 1) { if (isTRUE(any(scale <= 0))) { stop2("Argument 'scale' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } loc + scale * rexp(n)^(-1 / shape) } #' The Shifted Log Normal Distribution #' #' Density, distribution function, quantile function and random generation #' for the shifted log normal distribution with mean \code{meanlog}, #' standard deviation \code{sdlog}, and shift parameter \code{shift}. #' #' @name Shifted_Lognormal #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param meanlog Vector of means. #' @param sdlog Vector of standard deviations. #' @param shift Vector of shifts. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dshifted_lnorm <- function(x, meanlog = 0, sdlog = 1, shift = 0, log = FALSE) { args <- nlist(dist = "lnorm", x, shift, meanlog, sdlog, log) do_call(dshifted, args) } #' @rdname Shifted_Lognormal #' @export pshifted_lnorm <- function(q, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE) { args <- nlist(dist = "lnorm", q, shift, meanlog, sdlog, lower.tail, log.p) do_call(pshifted, args) } #' @rdname Shifted_Lognormal #' @export qshifted_lnorm <- function(p, meanlog = 0, sdlog = 1, shift = 0, lower.tail = TRUE, log.p = FALSE) { args <- nlist(dist = "lnorm", p, shift, meanlog, sdlog, lower.tail, log.p) do_call(qshifted, args) } #' @rdname Shifted_Lognormal #' @export rshifted_lnorm <- function(n, meanlog = 0, sdlog = 1, shift = 0) { args <- nlist(dist = "lnorm", n, shift, meanlog, sdlog) do_call(rshifted, args) } #' The Inverse Gaussian Distribution #' #' Density, distribution function, and random generation #' for the inverse Gaussian distribution with location \code{mu}, #' and shape \code{shape}. #' #' @name InvGaussian #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param shape Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dinv_gaussian <- function(x, mu = 1, shape = 1, log = FALSE) { if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(x, mu, shape) args <- do_call(expand, args) out <- with(args, 0.5 * log(shape / (2 * pi)) - 1.5 * log(x) - 0.5 * shape * (x - mu)^2 / (x * mu^2) ) if (!log) { out <- exp(out) } out } #' @rdname InvGaussian #' @export pinv_gaussian <- function(q, mu = 1, shape = 1, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(q, mu, shape) args <- do_call(expand, args) out <- with(args, pnorm(sqrt(shape / q) * (q / mu - 1)) + exp(2 * shape / mu) * pnorm(-sqrt(shape / q) * (q / mu + 1)) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname InvGaussian #' @export rinv_gaussian <- function(n, mu = 1, shape = 1) { # create random numbers for the inverse gaussian distribution # Args: # Args: see dinv_gaussian if (isTRUE(any(mu <= 0))) { stop2("Argument 'mu' must be positive.") } if (isTRUE(any(shape <= 0))) { stop2("Argument 'shape' must be positive.") } args <- nlist(mu, shape, length = n) args <- do_call(expand, args) # algorithm from wikipedia args$y <- rnorm(n)^2 args$x <- with(args, mu + (mu^2 * y) / (2 * shape) - mu / (2 * shape) * sqrt(4 * mu * shape * y + mu^2 * y^2) ) args$z <- runif(n) with(args, ifelse(z <= mu / (mu + x), x, mu^2 / x)) } #' The Generalized Extreme Value Distribution #' #' Density, distribution function, and random generation #' for the generalized extreme value distribution with #' location \code{mu}, scale \code{sigma} and shape \code{xi}. #' #' @name GenExtremeValue #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param sigma Vector of scales. #' @param xi Vector of shapes. #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dgen_extreme_value <- function(x, mu = 0, sigma = 1, xi = 0, log = FALSE) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } x <- (x - mu) / sigma args <- nlist(x, mu, sigma, xi) args <- do_call(expand, args) args$t <- with(args, 1 + xi * x) out <- with(args, ifelse( xi == 0, -log(sigma) - x - exp(-x), -log(sigma) - (1 + 1 / xi) * log(t) - t^(-1 / xi) )) if (!log) { out <- exp(out) } out } #' @rdname GenExtremeValue #' @export pgen_extreme_value <- function(q, mu = 0, sigma = 1, xi = 0, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } q <- (q - mu) / sigma args <- nlist(q, mu, sigma, xi) args <- do_call(expand, args) out <- with(args, ifelse( xi == 0, exp(-exp(-q)), exp(-(1 + xi * q)^(-1 / xi)) )) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname GenExtremeValue #' @export rgen_extreme_value <- function(n, mu = 0, sigma = 1, xi = 0) { if (isTRUE(any(sigma <= 0))) { stop2("sigma bust be positive.") } args <- nlist(mu, sigma, xi, length = n) args <- do_call(expand, args) with(args, ifelse( xi == 0, mu - sigma * log(rexp(n)), mu + sigma * (rexp(n)^(-xi) - 1) / xi )) } #' The Asymmetric Laplace Distribution #' #' Density, distribution function, quantile function and random generation #' for the asymmetric Laplace distribution with location \code{mu}, #' scale \code{sigma} and asymmetry parameter \code{quantile}. #' #' @name AsymLaplace #' #' @inheritParams StudentT #' @param x,q Vector of quantiles. #' @param mu Vector of locations. #' @param sigma Vector of scales. #' @param quantile Asymmetry parameter corresponding to quantiles #' in quantile regression (hence the name). #' #' @details See \code{vignette("brms_families")} for details #' on the parameterization. #' #' @export dasym_laplace <- function(x, mu = 0, sigma = 1, quantile = 0.5, log = FALSE) { out <- ifelse(x < mu, yes = (quantile * (1 - quantile) / sigma) * exp((1 - quantile) * (x - mu) / sigma), no = (quantile * (1 - quantile) / sigma) * exp(-quantile * (x - mu) / sigma) ) if (log) { out <- log(out) } out } #' @rdname AsymLaplace #' @export pasym_laplace <- function(q, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE) { out <- ifelse(q < mu, yes = quantile * exp((1 - quantile) * (q - mu) / sigma), no = 1 - (1 - quantile) * exp(-quantile * (q - mu) / sigma) ) if (!lower.tail) { out <- 1 - out } if (log.p) { out <- log(out) } out } #' @rdname AsymLaplace #' @export qasym_laplace <- function(p, mu = 0, sigma = 1, quantile = 0.5, lower.tail = TRUE, log.p = FALSE) { if (log.p) { p <- exp(p) } if (!lower.tail) { p <- 1 - p } if (length(quantile) == 1L) { quantile <- rep(quantile, length(mu)) } ifelse(p < quantile, yes = mu + ((sigma * log(p / quantile)) / (1 - quantile)), no = mu - ((sigma * log((1 - p) / (1 - quantile))) / quantile) ) } #' @rdname AsymLaplace #' @export rasym_laplace <- function(n, mu = 0, sigma = 1, quantile = 0.5) { u <- runif(n) qasym_laplace(u, mu = mu, sigma = sigma, quantile = quantile) } # The Discrete Weibull Distribution # # Density, distribution function, quantile function and random generation # for the discrete Weibull distribution with location \code{mu} and # shape \code{shape}. # # @name DiscreteWeibull # # @inheritParams StudentT # @param mu Location parameter in the unit interval. # @param shape Positive shape parameter. # # @details See \code{vignette("brms_families")} for details # on the parameterization. # # @export ddiscrete_weibull <- function(x, mu, shape, log = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } x <- round(x) out <- mu^x^shape - mu^(x + 1)^shape out[x < 0] <- 0 if (log) { out <- log(out) } out } # @rdname DiscreteWeibull # @export pdiscrete_weibull <- function(x, mu, shape, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } x <- round(x) if (lower.tail) { out <- 1 - mu^(x + 1)^shape out[x < 0] <- 0 } else { out <- mu^(x + 1)^shape out[x < 0] <- 1 } if (log.p) { out <- log(out) } out } # @rdname DiscreteWeibull # @export qdiscrete_weibull <- function(p, mu, shape, lower.tail = TRUE, log.p = FALSE) { if (isTRUE(any(mu < 0 | mu > 1))) { stop2("mu bust be between 0 and 1.") } if (isTRUE(any(shape <= 0))) { stop2("shape bust be positive.") } if (log.p) { p <- exp(p) } if (!lower.tail) { p <- 1 - p } ceiling((log(1 - p) / log(mu))^(1 / shape) - 1) } # @rdname DiscreteWeibull # @export rdiscrete_weibull <- function(n, mu, shape) { u <- runif(n, 0, 1) qdiscrete_weibull(u, mu, shape) } # mean of the discrete weibull distribution # @param mu location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation mean_discrete_weibull <- function(mu, shape, M = 1000, thres = 0.001) { opt_M <- ceiling(max((log(thres) / log(mu))^(1 / shape))) if (opt_M <= M) { M <- opt_M } else { # avoid the loop below running too slow warning2( "Approximating the mean of the 'discrete_weibull' ", "distribution failed and results be inaccurate." ) } out <- 0 for (y in seq_len(M)) { out <- out + mu^y^shape } # approximation of the residual series (see Englehart & Li, 2011) # returns unreasonably large values presumably due to numerical issues out } # PDF of the COM-Poisson distribution # com_poisson in brms uses the mode parameterization dcom_poisson <- function(x, mu, shape, log = FALSE) { x <- round(x) log_mu <- log(mu) log_Z <- log_Z_com_poisson(log_mu, shape) out <- shape * (x * log_mu - lgamma(x + 1)) - log_Z if (!log) { out <- exp(out) } out } # random numbers from the COM-Poisson distribution rcom_poisson <- function(n, mu, shape, M = 10000) { n <- check_n_rdist(n, mu, shape) M <- as.integer(as_one_numeric(M)) log_mu <- log(mu) # approximating log_Z may yield too large random draws log_Z <- log_Z_com_poisson(log_mu, shape, approx = FALSE) u <- runif(n, 0, 1) cdf <- exp(-log_Z) lfac <- 0 y <- 0 out <- rep(0, n) not_found <- cdf < u while (any(not_found) && y <= M) { y <- y + 1 out[not_found] <- y lfac <- lfac + log(y) cdf <- cdf + exp(shape * (y * log_mu - lfac) - log_Z) not_found <- cdf < u } if (any(not_found)) { out[not_found] <- NA nfailed <- sum(not_found) warning2( "Drawing random numbers from the 'com_poisson' ", "distribution failed in ", nfailed, " cases." ) } out } # CDF of the COM-Poisson distribution pcom_poisson <- function(x, mu, shape, lower.tail = TRUE, log.p = FALSE) { x <- round(x) args <- expand(x = x, mu = mu, shape = shape) x <- args$x mu <- args$mu shape <- args$shape log_mu <- log(mu) log_Z <- log_Z_com_poisson(log_mu, shape) out <- rep(0, length(x)) dim(out) <- attributes(args)$max_dim out[x > 0] <- log1p_exp(shape * log_mu) k <- 2 lfac <- 0 while (any(x >= k)) { lfac <- lfac + log(k) term <- shape * (k * log_mu - lfac) out[x >= k] <- log_sum_exp(out[x >= k], term) k <- k + 1 } out <- out - log_Z out[out > 0] <- 0 if (!lower.tail) { out <- log1m_exp(out) } if (!log.p) { out <- exp(out) } out } # log normalizing constant of the COM Poisson distribution # @param log_mu log location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation # @param approx use a closed form approximation of the mean if appropriate? log_Z_com_poisson <- function(log_mu, shape, M = 10000, thres = 1e-16, approx = TRUE) { if (isTRUE(any(shape <= 0))) { stop2("'shape' must be positive.") } if (isTRUE(any(shape == Inf))) { stop2("'shape' must be finite.") } approx <- as_one_logical(approx) args <- expand(log_mu = log_mu, shape = shape) log_mu <- args$log_mu shape <- args$shape out <- rep(NA, length(log_mu)) dim(out) <- attributes(args)$max_dim use_poisson <- shape == 1 if (any(use_poisson)) { # shape == 1 implies the poisson distribution out[use_poisson] <- exp(log_mu[use_poisson]) } if (approx) { # use a closed form approximation if appropriate use_approx <- log_mu * shape >= log(1.5) & log_mu >= log(1.5) if (any(use_approx)) { out[use_approx] <- log_Z_com_poisson_approx( log_mu[use_approx], shape[use_approx] ) } } use_exact <- is.na(out) if (any(use_exact)) { # direct computation of the truncated series M <- as.integer(as_one_numeric(M)) thres <- as_one_numeric(thres) log_thres <- log(thres) log_mu <- log_mu[use_exact] shape <- shape[use_exact] # first 2 terms of the series out_exact <- log1p_exp(shape * log_mu) lfac <- 0 k <- 2 converged <- FALSE while (!converged && k <= M) { lfac <- lfac + log(k) term <- shape * (k * log_mu - lfac) out_exact <- log_sum_exp(out_exact, term) converged <- all(term <= log_thres) k <- k + 1 } out[use_exact] <- out_exact if (!converged) { warning2( "Approximating the normalizing constant of the 'com_poisson' ", "distribution failed and results may be inaccurate." ) } } out } # approximate the log normalizing constant of the COM Poisson distribution # based on doi:10.1007/s10463-017-0629-6 log_Z_com_poisson_approx <- function(log_mu, shape) { shape_mu <- shape * exp(log_mu) shape2 <- shape^2 # first 4 terms of the residual series log_sum_resid <- log( 1 + shape_mu^(-1) * (shape2 - 1) / 24 + shape_mu^(-2) * (shape2 - 1) / 1152 * (shape2 + 23) + shape_mu^(-3) * (shape2 - 1) / 414720 * (5 * shape2^2 - 298 * shape2 + 11237) ) shape_mu + log_sum_resid - ((log(2 * pi) + log_mu) * (shape - 1) / 2 + log(shape) / 2) } # compute the log mean of the COM Poisson distribution # @param mu location parameter # @param shape shape parameter # @param M maximal evaluated element of the series # @param thres threshold for new elements at which to stop evaluation # @param approx use a closed form approximation of the mean if appropriate? mean_com_poisson <- function(mu, shape, M = 10000, thres = 1e-16, approx = TRUE) { if (isTRUE(any(shape <= 0))) { stop2("'shape' must be positive.") } if (isTRUE(any(shape == Inf))) { stop2("'shape' must be finite.") } approx <- as_one_logical(approx) args <- expand(mu = mu, shape = shape) mu <- args$mu shape <- args$shape out <- rep(NA, length(mu)) dim(out) <- attributes(args)$max_dim use_poisson <- shape == 1 if (any(use_poisson)) { # shape == 1 implies the poisson distribution out[use_poisson] <- mu[use_poisson] } if (approx) { # use a closed form approximation if appropriate use_approx <- mu^shape >= 1.5 & mu >= 1.5 if (any(use_approx)) { out[use_approx] <- mean_com_poisson_approx( mu[use_approx], shape[use_approx] ) } } use_exact <- is.na(out) if (any(use_exact)) { # direct computation of the truncated series M <- as.integer(as_one_numeric(M)) thres <- as_one_numeric(thres) log_thres <- log(thres) mu <- mu[use_exact] shape <- shape[use_exact] log_mu <- log(mu) # first 2 terms of the series log_num <- shape * log_mu # numerator log_Z <- log1p_exp(shape * log_mu) # denominator lfac <- 0 k <- 2 converged <- FALSE while (!converged && k <= M) { log_k <- log(k) lfac <- lfac + log_k term <- shape * (k * log_mu - lfac) log_num <- log_sum_exp(log_num, log_k + term) log_Z <- log_sum_exp(log_Z, term) converged <- all(term <= log_thres) k <- k + 1 } if (!converged) { warning2( "Approximating the mean of the 'com_poisson' ", "distribution failed and results may be inaccurate." ) } out[use_exact] <- exp(log_num - log_Z) } out } # approximate the mean of COM-Poisson distribution # based on doi:10.1007/s10463-017-0629-6 mean_com_poisson_approx <- function(mu, shape) { term <- 1 - (shape - 1) / (2 * shape) * mu^(-1) - (shape^2 - 1) / (24 * shape^2) * mu^(-2) - (shape^2 - 1) / (24 * shape^3) * mu^(-3) mu * term } #' The Dirichlet Distribution #' #' Density function and random number generation for the dirichlet #' distribution with shape parameter vector \code{alpha}. #' #' @name Dirichlet #' #' @inheritParams StudentT #' @param x Matrix of quantiles. Each row corresponds to one probability vector. #' @param alpha Matrix of positive shape parameters. Each row corresponds to one #' probability vector. #' #' @details See \code{vignette("brms_families")} for details on the #' parameterization. #' #' @export ddirichlet <- function(x, alpha, log = FALSE) { log <- as_one_logical(log) if (!is.matrix(x)) { x <- matrix(x, nrow = 1) } if (!is.matrix(alpha)) { alpha <- matrix(alpha, nrow(x), length(alpha), byrow = TRUE) } if (nrow(x) == 1L && nrow(alpha) > 1L) { x <- repl(x, nrow(alpha)) x <- do_call(rbind, x) } else if (nrow(x) > 1L && nrow(alpha) == 1L) { alpha <- repl(alpha, nrow(x)) alpha <- do_call(rbind, alpha) } if (isTRUE(any(x < 0))) { stop2("x must be non-negative.") } if (!is_equal(rowSums(x), rep(1, nrow(x)))) { stop2("x must sum to 1 per row.") } if (isTRUE(any(alpha <= 0))) { stop2("alpha must be positive.") } out <- lgamma(rowSums(alpha)) - rowSums(lgamma(alpha)) + rowSums((alpha - 1) * log(x)) if (!log) { out <- exp(out) } return(out) } #' @rdname Dirichlet #' @export rdirichlet <- function(n, alpha) { n <- as_one_numeric(n) if (!is.matrix(alpha)) { alpha <- matrix(alpha, nrow = 1) } if (prod(dim(alpha)) == 0) { stop2("alpha should be non-empty.") } if (isTRUE(any(alpha <= 0))) { stop2("alpha must be positive.") } if (n == 1) { n <- nrow(alpha) } if (n > nrow(alpha)) { alpha <- matrix(alpha, nrow = n, ncol = ncol(alpha), byrow = TRUE) } x <- matrix(rgamma(ncol(alpha) * n, alpha), ncol = ncol(alpha)) x / rowSums(x) } #' The Wiener Diffusion Model Distribution #' #' Density function and random generation for the Wiener #' diffusion model distribution with boundary separation \code{alpha}, #' non-decision time \code{tau}, bias \code{beta} and #' drift rate \code{delta}. #' #' @name Wiener #' #' @inheritParams StudentT #' @param alpha Boundary separation parameter. #' @param tau Non-decision time parameter. #' @param beta Bias parameter. #' @param delta Drift rate parameter. #' @param resp Response: \code{"upper"} or \code{"lower"}. #' If no character vector, it is coerced to logical #' where \code{TRUE} indicates \code{"upper"} and #' \code{FALSE} indicates \code{"lower"}. #' @param types Which types of responses to return? By default, #' return both the response times \code{"q"} and the dichotomous #' responses \code{"resp"}. If either \code{"q"} or \code{"resp"}, #' return only one of the two types. #' @param backend Name of the package to use as backend for the computations. #' Either \code{"Rwiener"} (the default) or \code{"rtdists"}. #' Can be set globally for the current \R session via the #' \code{"wiener_backend"} option (see \code{\link{options}}). #' #' @details #' These are wrappers around functions of the \pkg{RWiener} or \pkg{rtdists} #' package (depending on the chosen \code{backend}). See #' \code{vignette("brms_families")} for details on the parameterization. #' #' @seealso \code{\link[RWiener:wienerdist]{wienerdist}}, #' \code{\link[rtdists:Diffusion]{Diffusion}} #' #' @export dwiener <- function(x, alpha, tau, beta, delta, resp = 1, log = FALSE, backend = getOption("wiener_backend", "Rwiener")) { alpha <- as.numeric(alpha) tau <- as.numeric(tau) beta <- as.numeric(beta) delta <- as.numeric(delta) if (!is.character(resp)) { resp <- ifelse(resp, "upper", "lower") } log <- as_one_logical(log) backend <- match.arg(backend, c("Rwiener", "rtdists")) .dwiener <- paste0(".dwiener_", backend) args <- nlist(x, alpha, tau, beta, delta, resp) args <- as.list(do_call(expand, args)) args$log <- log do_call(.dwiener, args) } # dwiener using Rwiener as backend .dwiener_Rwiener <- function(x, alpha, tau, beta, delta, resp, log) { require_package("RWiener") .dwiener <- Vectorize( RWiener::dwiener, c("q", "alpha", "tau", "beta", "delta", "resp") ) args <- nlist(q = x, alpha, tau, beta, delta, resp, give_log = log) do_call(.dwiener, args) } # dwiener using rtdists as backend .dwiener_rtdists <- function(x, alpha, tau, beta, delta, resp, log) { require_package("rtdists") args <- list( rt = x, response = resp, a = alpha, t0 = tau, z = beta * alpha, v = delta ) out <- do_call(rtdists::ddiffusion, args) if (log) { out <- log(out) } out } #' @rdname Wiener #' @export rwiener <- function(n, alpha, tau, beta, delta, types = c("q", "resp"), backend = getOption("wiener_backend", "Rwiener")) { n <- as_one_numeric(n) alpha <- as.numeric(alpha) tau <- as.numeric(tau) beta <- as.numeric(beta) delta <- as.numeric(delta) types <- match.arg(types, several.ok = TRUE) backend <- match.arg(backend, c("Rwiener", "rtdists")) .rwiener <- paste0(".rwiener_", backend) args <- nlist(n, alpha, tau, beta, delta, types) do_call(.rwiener, args) } # rwiener using Rwiener as backend .rwiener_Rwiener <- function(n, alpha, tau, beta, delta, types) { require_package("RWiener") max_len <- max(lengths(list(alpha, tau, beta, delta))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("Can only sample exactly once for each condition.") } n <- 1 } # helper function to return a numeric vector instead # of a data.frame with two columns as for RWiener::rwiener .rwiener_num <- function(n, alpha, tau, beta, delta, types) { out <- RWiener::rwiener(n, alpha, tau, beta, delta) out$resp <- ifelse(out$resp == "upper", 1, 0) if (length(types) == 1L) { out <- out[[types]] } out } # vectorized version of .rwiener_num .rwiener <- function(...) { fun <- Vectorize( .rwiener_num, c("alpha", "tau", "beta", "delta"), SIMPLIFY = FALSE ) do_call(rbind, fun(...)) } args <- nlist(n, alpha, tau, beta, delta, types) do_call(.rwiener, args) } # rwiener using rtdists as backend .rwiener_rtdists <- function(n, alpha, tau, beta, delta, types) { require_package("rtdists") max_len <- max(lengths(list(alpha, tau, beta, delta))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("Can only sample exactly once for each condition.") } n <- max_len } out <- rtdists::rdiffusion( n, a = alpha, t0 = tau, z = beta * alpha, v = delta ) # TODO: use column names of rtdists in the output? names(out)[names(out) == "rt"] <- "q" names(out)[names(out) == "response"] <- "resp" out$resp <- ifelse(out$resp == "upper", 1, 0) if (length(types) == 1L) { out <- out[[types]] } out } # density of the cox proportional hazards model # @param x currently ignored as the information is passed # via 'bhaz' and 'cbhaz'. Before exporting the cox distribution # functions, this needs to be refactored so that x is actually used # @param mu positive location parameter # @param bhaz baseline hazard # @param cbhaz cumulative baseline hazard dcox <- function(x, mu, bhaz, cbhaz, log = FALSE) { out <- hcox(x, mu, bhaz, cbhaz, log = TRUE) + pcox(x, mu, bhaz, cbhaz, lower.tail = FALSE, log.p = TRUE) if (!log) { out <- exp(out) } out } # hazard function of the cox model hcox <- function(x, mu, bhaz, cbhaz, log = FALSE) { out <- log(bhaz) + log(mu) if (!log) { out <- exp(out) } out } # distribution function of the cox model pcox <- function(q, mu, bhaz, cbhaz, lower.tail = TRUE, log.p = FALSE) { log_surv <- -cbhaz * mu if (lower.tail) { if (log.p) { out <- log1m_exp(log_surv) } else { out <- 1 - exp(log_surv) } } else { if (log.p) { out <- log_surv } else { out <- exp(log_surv) } } out } #' Zero-Inflated Distributions #' #' Density and distribution functions for zero-inflated distributions. #' #' @name ZeroInflated #' #' @inheritParams StudentT #' @param zi zero-inflation probability #' @param mu,lambda location parameter #' @param shape,shape1,shape2 shape parameter #' @param size number of trials #' @param prob probability of success on each trial #' #' @details #' The density of a zero-inflated distribution can be specified as follows. #' If \eqn{x = 0} set \eqn{f(x) = \theta + (1 - \theta) * g(0)}. #' Else set \eqn{f(x) = (1 - \theta) * g(x)}, #' where \eqn{g(x)} is the density of the non-zero-inflated part. NULL #' @rdname ZeroInflated #' @export dzero_inflated_poisson <- function(x, lambda, zi, log = FALSE) { pars <- nlist(lambda) .dzero_inflated(x, "pois", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_poisson <- function(q, lambda, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(lambda) .pzero_inflated(q, "pois", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_negbinomial <- function(x, mu, shape, zi, log = FALSE) { pars <- nlist(mu, size = shape) .dzero_inflated(x, "nbinom", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_negbinomial <- function(q, mu, shape, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, size = shape) .pzero_inflated(q, "nbinom", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_binomial <- function(x, size, prob, zi, log = FALSE) { pars <- nlist(size, prob) .dzero_inflated(x, "binom", zi, pars, log) } #' @rdname ZeroInflated #' @export pzero_inflated_binomial <- function(q, size, prob, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(size, prob) .pzero_inflated(q, "binom", zi, pars, lower.tail, log.p) } #' @rdname ZeroInflated #' @export dzero_inflated_beta <- function(x, shape1, shape2, zi, log = FALSE) { pars <- nlist(shape1, shape2) # zi_beta is technically a hurdle model .dhurdle(x, "beta", zi, pars, log, type = "real") } #' @rdname ZeroInflated #' @export pzero_inflated_beta <- function(q, shape1, shape2, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(shape1, shape2) # zi_beta is technically a hurdle model .phurdle(q, "beta", zi, pars, lower.tail, log.p, type = "real") } # @rdname ZeroInflated # @export dzero_inflated_asym_laplace <- function(x, mu, sigma, quantile, zi, log = FALSE) { pars <- nlist(mu, sigma, quantile) # zi_asym_laplace is technically a hurdle model .dhurdle(x, "asym_laplace", zi, pars, log, type = "real") } # @rdname ZeroInflated # @export pzero_inflated_asym_laplace <- function(q, mu, sigma, quantile, zi, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, sigma, quantile) # zi_asym_laplace is technically a hurdle model .phurdle(q, "asym_laplace", zi, pars, lower.tail, log.p, type = "real", lb = -Inf, ub = Inf) } # density of a zero-inflated distribution # @param dist name of the distribution # @param zi bernoulli zero-inflated parameter # @param pars list of parameters passed to pdf .dzero_inflated <- function(x, dist, zi, pars, log) { stopifnot(is.list(pars)) dist <- as_one_character(dist) log <- as_one_logical(log) args <- expand(dots = c(nlist(x, zi), pars)) x <- args$x zi <- args$zi pars <- args[names(pars)] pdf <- paste0("d", dist) out <- ifelse(x == 0, log(zi + (1 - zi) * do_call(pdf, c(0, pars))), log(1 - zi) + do_call(pdf, c(list(x), pars, log = TRUE)) ) if (!log) { out <- exp(out) } out } # CDF of a zero-inflated distribution # @param dist name of the distribution # @param zi bernoulli zero-inflated parameter # @param pars list of parameters passed to pdf # @param lb lower bound of the conditional distribution # @param ub upper bound of the conditional distribution .pzero_inflated <- function(q, dist, zi, pars, lower.tail, log.p, lb = 0, ub = Inf) { stopifnot(is.list(pars)) dist <- as_one_character(dist) lower.tail <- as_one_logical(lower.tail) log.p <- as_one_logical(log.p) lb <- as_one_numeric(lb) ub <- as_one_numeric(ub) args <- expand(dots = c(nlist(q, zi), pars)) q <- args$q zi <- args$zi pars <- args[names(pars)] cdf <- paste0("p", dist) # compute log CCDF values out <- log(1 - zi) + do_call(cdf, c(list(q), pars, lower.tail = FALSE, log.p = TRUE)) # take the limits of the distribution into account out <- ifelse(q < lb, 0, out) out <- ifelse(q > ub, -Inf, out) if (lower.tail) { out <- 1 - exp(out) if (log.p) { out <- log(out) } } else { if (!log.p) { out <- exp(out) } } out } #' Hurdle Distributions #' #' Density and distribution functions for hurdle distributions. #' #' @name Hurdle #' #' @inheritParams StudentT #' @param hu hurdle probability #' @param mu,lambda location parameter #' @param shape shape parameter #' @param sigma,scale scale parameter #' #' @details #' The density of a hurdle distribution can be specified as follows. #' If \eqn{x = 0} set \eqn{f(x) = \theta}. Else set #' \eqn{f(x) = (1 - \theta) * g(x) / (1 - G(0))} #' where \eqn{g(x)} and \eqn{G(x)} are the density and distribution #' function of the non-hurdle part, respectively. NULL #' @rdname Hurdle #' @export dhurdle_poisson <- function(x, lambda, hu, log = FALSE) { pars <- nlist(lambda) .dhurdle(x, "pois", hu, pars, log, type = "int") } #' @rdname Hurdle #' @export phurdle_poisson <- function(q, lambda, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(lambda) .phurdle(q, "pois", hu, pars, lower.tail, log.p, type = "int") } #' @rdname Hurdle #' @export dhurdle_negbinomial <- function(x, mu, shape, hu, log = FALSE) { pars <- nlist(mu, size = shape) .dhurdle(x, "nbinom", hu, pars, log, type = "int") } #' @rdname Hurdle #' @export phurdle_negbinomial <- function(q, mu, shape, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(mu, size = shape) .phurdle(q, "nbinom", hu, pars, lower.tail, log.p, type = "int") } #' @rdname Hurdle #' @export dhurdle_gamma <- function(x, shape, scale, hu, log = FALSE) { pars <- nlist(shape, scale) .dhurdle(x, "gamma", hu, pars, log, type = "real") } #' @rdname Hurdle #' @export phurdle_gamma <- function(q, shape, scale, hu, lower.tail = TRUE, log.p = FALSE) { pars <- nlist(shape, scale) .phurdle(q, "gamma", hu, pars, lower.tail, log.p, type = "real") } #' @rdname Hurdle #' @export dhurdle_lognormal <- function(x, mu, sigma, hu, log = FALSE) { pars <- list(meanlog = mu, sdlog = sigma) .dhurdle(x, "lnorm", hu, pars, log, type = "real") } #' @rdname Hurdle #' @export phurdle_lognormal <- function(q, mu, sigma, hu, lower.tail = TRUE, log.p = FALSE) { pars <- list(meanlog = mu, sdlog = sigma) .phurdle(q, "lnorm", hu, pars, lower.tail, log.p, type = "real") } # density of a hurdle distribution # @param dist name of the distribution # @param hu bernoulli hurdle parameter # @param pars list of parameters passed to pdf # @param type support of distribution (int or real) .dhurdle <- function(x, dist, hu, pars, log, type) { stopifnot(is.list(pars)) dist <- as_one_character(dist) log <- as_one_logical(log) type <- match.arg(type, c("int", "real")) args <- expand(dots = c(nlist(x, hu), pars)) x <- args$x hu <- args$hu pars <- args[names(pars)] pdf <- paste0("d", dist) if (type == "int") { lccdf0 <- log(1 - do_call(pdf, c(0, pars))) } else { lccdf0 <- 0 } out <- ifelse(x == 0, log(hu), log(1 - hu) + do_call(pdf, c(list(x), pars, log = TRUE)) - lccdf0 ) if (!log) { out <- exp(out) } out } # CDF of a hurdle distribution # @param dist name of the distribution # @param hu bernoulli hurdle parameter # @param pars list of parameters passed to pdf # @param type support of distribution (int or real) # @param lb lower bound of the conditional distribution # @param ub upper bound of the conditional distribution .phurdle <- function(q, dist, hu, pars, lower.tail, log.p, type, lb = 0, ub = Inf) { stopifnot(is.list(pars)) dist <- as_one_character(dist) lower.tail <- as_one_logical(lower.tail) log.p <- as_one_logical(log.p) type <- match.arg(type, c("int", "real")) lb <- as_one_numeric(lb) ub <- as_one_numeric(ub) args <- expand(dots = c(nlist(q, hu), pars)) q <- args$q hu <- args$hu pars <- args[names(pars)] cdf <- paste0("p", dist) # compute log CCDF values out <- log(1 - hu) + do_call(cdf, c(list(q), pars, lower.tail = FALSE, log.p = TRUE)) if (type == "int") { pdf <- paste0("d", dist) out <- out - log(1 - do_call(pdf, c(0, pars))) } out <- ifelse(q < 0, log_sum_exp(out, log(hu)), out) # take the limits of the distribution into account out <- ifelse(q < lb, 0, out) out <- ifelse(q > ub, -Inf, out) if (lower.tail) { out <- 1 - exp(out) if (log.p) { out <- log(out) } } else { if (!log.p) { out <- exp(out) } } out } # density of the categorical distribution with the softmax transform # @param x positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log return values on the log scale? dcategorical <- function(x, eta, log = FALSE) { if (is.null(dim(eta))) { eta <- matrix(eta, nrow = 1) } if (length(dim(eta)) != 2L) { stop2("eta must be a numeric vector or matrix.") } out <- inv_link_categorical(eta, log = log) out[, x, drop = FALSE] } # generic inverse link function for the categorical family # # @param x Matrix (S x `ncat` or S x `ncat - 1` (depending on # `insert_refcat_fam`), with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) with values of `eta` for # one observation (see dcategorical()) or an array (S x N x `ncat` or S x N x # `ncat - 1` (depending on `insert_refcat_fam`)) containing the same values as # the matrix just described, but for N observations. # @param insert_refcat_fam Either NULL or an object of class "brmsfamily". If # NULL, `x` is not modified at all. If an object of class "brmsfamily", then # insert_refcat() is used to insert values for the reference category into # `x`. # @param log Logical (length 1) indicating whether to log the return value. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_categorical <- function(x, insert_refcat_fam = NULL, log = FALSE) { if (!is.null(insert_refcat_fam)) { x <- insert_refcat(x, family = insert_refcat_fam) } if (log) { out <- log_softmax(x) } else { out <- softmax(x) } out } # generic link function for the categorical family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param refcat Numeric (length 1) giving the index of the reference category. # @param return_refcat Logical (length 1) indicating whether to include the # reference category in the return value. # # @return If `x` is a matrix, then a matrix (S x `ncat` or S x `ncat - 1` # (depending on `return_refcat`), with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) containing the # values of the link function applied to `x`. If `x` is an array, then an # array (S x N x `ncat` or S x N x `ncat - 1` (depending on `return_refcat`)) # containing the same values as the matrix just described, but for N # observations. link_categorical <- function(x, refcat = 1, return_refcat = TRUE) { ndim <- length(dim(x)) marg_noncat <- seq_along(dim(x))[-ndim] if (return_refcat) { x_tosweep <- x } else { x_tosweep <- slice(x, ndim, -refcat, drop = FALSE) } log(sweep( x_tosweep, MARGIN = marg_noncat, STATS = slice(x, ndim, refcat), FUN = "/" )) } # CDF of the categorical distribution with the softmax transform # @param q positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log.p return values on the log scale? pcategorical <- function(q, eta, log.p = FALSE) { p <- dcategorical(seq_len(max(q)), eta = eta) out <- cblapply(q, function(j) rowSums(p[, 1:j, drop = FALSE])) if (log.p) { out <- log(out) } out } # density of the multinomial distribution with the softmax transform # @param x positive integers not greater than ncat # @param eta the linear predictor (of length or ncol ncat) # @param log return values on the log scale? dmultinomial <- function(x, eta, log = FALSE) { if (is.null(dim(eta))) { eta <- matrix(eta, nrow = 1) } if (length(dim(eta)) != 2L) { stop2("eta must be a numeric vector or matrix.") } log_prob <- log_softmax(eta) size <- sum(x) x <- data2draws(x, dim = dim(eta)) out <- lgamma(size + 1) + rowSums(x * log_prob - lgamma(x + 1)) if (!log) { out <- exp(out) } out } # density of the cumulative distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dcumulative <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (thres - eta) if (link == "identity") { out <- eta } else { out <- inv_link_cumulative(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the cumulative family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dcumulative()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_cumulative <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) zeros_arr <- array(0, dim = c(dim_noncat, 1)) abind::abind(x, ones_arr) - abind::abind(zeros_arr, x) } # generic link function for the cumulative family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_cumulative <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] - 1 marg_noncat <- seq_along(dim(x))[-ndim] dim_t <- c(nthres, dim_noncat) x <- apply(slice(x, ndim, -ncat, drop = FALSE), marg_noncat, cumsum) x <- aperm(array(x, dim = dim_t), perm = c(marg_noncat + 1, 1)) link(x, link) } # density of the sratio distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dsratio <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (thres - eta) if (link == "identity") { out <- eta } else { out <- inv_link_sratio(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the sratio family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dsratio()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_sratio <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) Sx_cumprod <- aperm( array(apply(1 - x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) abind::abind(x, ones_arr) * abind::abind(ones_arr, Sx_cumprod) } # generic link function for the sratio family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_sratio <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, S_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- slice(x, ndim, k, drop = FALSE) / prev_res$S_km1_prod .out <- list( F_k = abind::abind(prev_res$F_k, F_k), S_km1_prod = prev_res$S_km1_prod * (1 - F_k) ) return(.out) } x <- .F_k(dim(x)[ndim] - 1)$F_k link(x, link) } # density of the cratio distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dcratio <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (eta - thres) if (link == "identity") { out <- eta } else { out <- inv_link_cratio(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the cratio family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` for one observation (see dcratio()) or an array # (S x N x `ncat - 1`) containing the same values as the matrix just # described, but for N observations. # @param link Character vector (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the inverse-link function applied to # `x`. If `x` is an array, then an array (S x N x `ncat`) containing the same # values as the matrix just described, but for N observations. inv_link_cratio <- function(x, link) { x <- ilink(x, link) ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) x_cumprod <- aperm( array(apply(x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) abind::abind(1 - x, ones_arr) * abind::abind(ones_arr, x_cumprod) } # generic link function for the cratio family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_cratio <- function(x, link) { ndim <- length(dim(x)) .F_k <- function(k) { if (k == 1) { prev_res <- list(F_k = NULL, F_km1_prod = 1) } else { prev_res <- .F_k(k - 1) } F_k <- 1 - slice(x, ndim, k, drop = FALSE) / prev_res$F_km1_prod .out <- list( F_k = abind::abind(prev_res$F_k, F_k), F_km1_prod = prev_res$F_km1_prod * F_k ) return(.out) } x <- .F_k(dim(x)[ndim] - 1)$F_k link(x, link) } # density of the acat distribution # # @param x Integer vector containing response category indices to return the # "densities" (probability masses) for. # @param eta Vector (length S, with S denoting the number of posterior draws) of # linear predictor draws. # @param thres Matrix (S x `ncat - 1`, with S denoting the number of posterior # draws and `ncat` denoting the number of response categories) of threshold # draws. # @param disc Vector (length S, with S denoting the number of posterior draws, # or length 1 for recycling) of discrimination parameter draws. # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `length(x)`) containing the values of the inverse-link # function applied to `disc * (thres - eta)`. dacat <- function(x, eta, thres, disc = 1, link = "logit") { eta <- disc * (eta - thres) if (link == "identity") { out <- eta } else { out <- inv_link_acat(eta, link = link) } out[, x, drop = FALSE] } # generic inverse link function for the acat family # # @param x Matrix (S x `ncat - 1`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) with values of # `disc * (thres - eta)` (see dacat()). # @param link Character vector (length 1) giving the name of the link function. # # @return A matrix (S x `ncat`, with S denoting the number of posterior draws # and `ncat` denoting the number of response categories) containing the values # of the inverse-link function applied to `x`. inv_link_acat <- function(x, link) { ndim <- length(dim(x)) dim_noncat <- dim(x)[-ndim] nthres <- dim(x)[ndim] marg_noncat <- seq_along(dim(x))[-ndim] ones_arr <- array(1, dim = c(dim_noncat, 1)) dim_t <- c(nthres, dim_noncat) if (link == "logit") { # faster evaluation in this case exp_x_cumprod <- aperm( array(apply(exp(x), marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) out <- abind::abind(ones_arr, exp_x_cumprod) } else { x <- ilink(x, link) x_cumprod <- aperm( array(apply(x, marg_noncat, cumprod), dim = dim_t), perm = c(marg_noncat + 1, 1) ) Sx_cumprod_rev <- apply( 1 - slice(x, ndim, rev(seq_len(nthres)), drop = FALSE), marg_noncat, cumprod ) Sx_cumprod_rev <- aperm( array(Sx_cumprod_rev, dim = dim_t), perm = c(marg_noncat + 1, 1) ) Sx_cumprod_rev <- slice( Sx_cumprod_rev, ndim, rev(seq_len(nthres)), drop = FALSE ) out <- abind::abind(ones_arr, x_cumprod) * abind::abind(Sx_cumprod_rev, ones_arr) } catsum <- array(apply(out, marg_noncat, sum), dim = dim_noncat) sweep(out, marg_noncat, catsum, "/") } # generic link function for the acat family # # @param x Matrix (S x `ncat`, with S denoting the number of posterior draws and # `ncat` denoting the number of response categories) of probabilities for the # response categories or an array (S x N x `ncat`) containing the same values # as the matrix just described, but for N observations. # @param link Character string (length 1) giving the name of the link function. # # @return If `x` is a matrix, then a matrix (S x `ncat - 1`, with S denoting the # number of posterior draws and `ncat` denoting the number of response # categories) containing the values of the link function applied to `x`. If # `x` is an array, then an array (S x N x `ncat - 1`) containing the same # values as the matrix just described, but for N observations. link_acat <- function(x, link) { ndim <- length(dim(x)) ncat <- dim(x)[ndim] x <- slice(x, ndim, -1, drop = FALSE) / slice(x, ndim, -ncat, drop = FALSE) if (link == "logit") { # faster evaluation in this case out <- log(x) } else { x <- inv_odds(x) out <- link(x, link) } out } # CDF for ordinal distributions # @param q positive integers not greater than ncat # @param eta draws of the linear predictor # @param thres draws of threshold parameters # @param disc draws of the discrimination parameter # @param family a character string naming the family # @param link a character string naming the link # @return a matrix of probabilities P(x <= q) pordinal <- function(q, eta, thres, disc = 1, family = NULL, link = "logit") { family <- as_one_character(family) link <- as_one_character(link) args <- nlist(x = seq_len(max(q)), eta, thres, disc, link) p <- do_call(paste0("d", family), args) .fun <- function(j) rowSums(as.matrix(p[, 1:j, drop = FALSE])) cblapply(q, .fun) } # helper functions to shift arbitrary distributions dshifted <- function(dist, x, shift = 0, ...) { do_call(paste0("d", dist), list(x - shift, ...)) } pshifted <- function(dist, q, shift = 0, ...) { do_call(paste0("p", dist), list(q - shift, ...)) } qshifted <- function(dist, p, shift = 0, ...) { do_call(paste0("q", dist), list(p, ...)) + shift } rshifted <- function(dist, n, shift = 0, ...) { do_call(paste0("r", dist), list(n, ...)) + shift } # check if 'n' in r functions is valid # @param n number of desired random draws # @param .. parameter vectors # @return validated 'n' check_n_rdist <- function(n, ...) { n <- as.integer(as_one_numeric(n)) max_len <- max(lengths(list(...))) if (max_len > 1L) { if (!n %in% c(1, max_len)) { stop2("'n' must match the maximum length of the parameter vectors.") } n <- max_len } n } brms/R/formula-cs.R0000644000175000017500000000176013701270367013755 0ustar nileshnilesh#' Category Specific Predictors in \pkg{brms} Models #' #' @aliases cse #' #' @param expr Expression containing predictors, #' for which category specific effects should be estimated. #' For evaluation, \R formula syntax is applied. #' #' @details For detailed documentation see \code{help(brmsformula)} #' as well as \code{vignette("brms_overview")}. #' #' This function is almost solely useful when #' called in formulas passed to the \pkg{brms} package. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ period + carry + cs(treat), #' data = inhaler, family = sratio("cloglog"), #' prior = set_prior("normal(0,5)"), chains = 2) #' summary(fit) #' plot(fit, ask = FALSE) #' } #' #' @export cs <- function(expr) { deparse_no_string(substitute(expr)) } # alias of function 'cs' used in the JSS paper of brms #' @export cse <- function(expr) { deparse_no_string(substitute(expr)) } brms/R/pp_mixture.R0000644000175000017500000000731014111751666014100 0ustar nileshnilesh#' Posterior Probabilities of Mixture Component Memberships #' #' Compute the posterior probabilities of mixture component #' memberships for each observation including uncertainty #' estimates. #' #' @inheritParams predict.brmsfit #' @param x An \R object usually of class \code{brmsfit}. #' @param log Logical; Indicates whether to return #' probabilities on the log-scale. #' #' @return #' If \code{summary = TRUE}, an N x E x K array, #' where N is the number of observations, K is the number #' of mixture components, and E is equal to \code{length(probs) + 2}. #' If \code{summary = FALSE}, an S x N x K array, where #' S is the number of posterior draws. #' #' @details #' The returned probabilities can be written as #' \eqn{P(Kn = k | Yn)}, that is the posterior probability #' that observation n originates from component k. #' They are computed using Bayes' Theorem #' \deqn{P(Kn = k | Yn) = P(Yn | Kn = k) P(Kn = k) / P(Yn),} #' where \eqn{P(Yn | Kn = k)} is the (posterior) likelihood #' of observation n for component k, \eqn{P(Kn = k)} is #' the (posterior) mixing probability of component k #' (i.e. parameter \code{theta}), and #' \deqn{P(Yn) = \sum (k=1,...,K) P(Yn | Kn = k) P(Kn = k)} #' is a normalizing constant. #' #' @examples #' \dontrun{ #' ## simulate some data #' set.seed(1234) #' dat <- data.frame( #' y = c(rnorm(100), rnorm(50, 2)), #' x = rnorm(150) #' ) #' ## fit a simple normal mixture model #' mix <- mixture(gaussian, nmix = 2) #' prior <- c( #' prior(normal(0, 5), Intercept, nlpar = mu1), #' prior(normal(0, 5), Intercept, nlpar = mu2), #' prior(dirichlet(2, 2), theta) #' ) #' fit1 <- brm(bf(y ~ x), dat, family = mix, #' prior = prior, chains = 2, inits = 0) #' summary(fit1) #' #' ## compute the membership probabilities #' ppm <- pp_mixture(fit1) #' str(ppm) #' #' ## extract point estimates for each observation #' head(ppm[, 1, ]) #' #' ## classify every observation according to #' ## the most likely component #' apply(ppm[, 1, ], 1, which.max) #' } #' #' @export pp_mixture.brmsfit <- function(x, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, log = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { stopifnot_resp(x, resp) log <- as_one_logical(log) contains_draws(x) x <- restructure(x) if (is_mv(x)) { resp <- validate_resp(resp, x$formula$responses, multiple = FALSE) family <- x$family[[resp]] } else { family <- x$family } if (!is.mixfamily(family)) { stop2("Method 'pp_mixture' can only be applied to mixture models.") } prep <- prepare_predictions( x, newdata = newdata, re_formula = re_formula, resp = resp, draw_ids = draw_ids, ndraws = ndraws, check_response = TRUE, ... ) stopifnot(is.brmsprep(prep)) prep$pp_mixture <- TRUE for (dp in names(prep$dpars)) { prep$dpars[[dp]] <- get_dpar(prep, dpar = dp) } N <- choose_N(prep) out <- lapply(seq_len(N), log_lik_mixture, prep = prep) out <- abind(out, along = 3) out <- aperm(out, c(1, 3, 2)) old_order <- prep$old_order sort <- isTRUE(ncol(out) != length(old_order)) out <- reorder_obs(out, old_order, sort = sort) if (!log) { out <- exp(out) } if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) dimnames(out) <- list( seq_len(nrow(out)), colnames(out), paste0("P(K = ", seq_len(dim(out)[3]), " | Y)") ) } out } #' @rdname pp_mixture.brmsfit #' @export pp_mixture <- function(x, ...) { UseMethod("pp_mixture") } brms/R/stan-helpers.R0000644000175000017500000002045314111751666014314 0ustar nileshnilesh# unless otherwise specified, functions return a named list # of Stan code snippets to be pasted together later on # define Stan functions or globally used transformed data # TODO: refactor to not require extraction of information from all model parts stan_global_defs <- function(bterms, prior, ranef, threads) { families <- family_names(bterms) links <- family_info(bterms, "link") unique_combs <- !duplicated(paste0(families, ":", links)) families <- families[unique_combs] links <- links[unique_combs] out <- list() # TODO: detect these links in all dpars not just in 'mu' if (any(links == "cauchit")) { str_add(out$fun) <- " #include 'fun_cauchit.stan'\n" } else if (any(links == "cloglog")) { str_add(out$fun) <- " #include 'fun_cloglog.stan'\n" } else if (any(links == "softplus")) { str_add(out$fun) <- " #include 'fun_softplus.stan'\n" } else if (any(links == "squareplus")) { str_add(out$fun) <- " #include 'fun_squareplus.stan'\n" } special <- get_special_prior(prior) if (!isNULL(lapply(special, "[[", "horseshoe"))) { str_add(out$fun) <- " #include 'fun_horseshoe.stan'\n" } if (!isNULL(lapply(special, "[[", "R2D2"))) { str_add(out$fun) <- " #include 'fun_r2d2.stan'\n" } if (nrow(ranef)) { r_funs <- NULL ids <- unique(ranef$id) for (id in ids) { r <- ranef[ranef$id == id, ] if (nrow(r) > 1L && r$cor[1]) { if (nzchar(r$by[1])) { if (nzchar(r$cov[1])) { c(r_funs) <- " #include 'fun_scale_r_cor_by_cov.stan'\n" } else { c(r_funs) <- " #include 'fun_scale_r_cor_by.stan'\n" } } else { if (nzchar(r$cov[1])) { c(r_funs) <- " #include 'fun_scale_r_cor_cov.stan'\n" } else { c(r_funs) <- " #include 'fun_scale_r_cor.stan'\n" } } } } str_add(out$fun) <- collapse(unique(r_funs)) } family_files <- family_info(bterms, "include") if (length(family_files)) { str_add(out$fun) <- cglue(" #include '{family_files}'\n") } is_ordinal <- ulapply(families, is_ordinal) if (any(is_ordinal)) { ord_fams <- families[is_ordinal] ord_links <- links[is_ordinal] for (i in seq_along(ord_fams)) { str_add(out$fun) <- stan_ordinal_lpmf(ord_fams[i], ord_links[i]) } } uni_mo <- ulapply(get_effect(bterms, "sp"), attr, "uni_mo") if (length(uni_mo)) { str_add(out$fun) <- " #include 'fun_monotonic.stan'\n" } if (length(get_effect(bterms, "gp"))) { # TODO: include functions selectively str_add(out$fun) <- " #include 'fun_gaussian_process.stan'\n" str_add(out$fun) <- " #include 'fun_gaussian_process_approx.stan'\n" str_add(out$fun) <- " #include 'fun_which_range.stan'\n" } acterms <- get_effect(bterms, "ac") acefs <- lapply(acterms, tidy_acef) if (any(ulapply(acefs, has_ac_subset, dim = "time", cov = TRUE))) { # TODO: include functions selectively str_add(out$fun) <- glue( " #include 'fun_normal_time.stan'\n", " #include 'fun_student_t_time.stan'\n", " #include 'fun_scale_time_err.stan'\n", " #include 'fun_cholesky_cor_ar1.stan'\n", " #include 'fun_cholesky_cor_ma1.stan'\n", " #include 'fun_cholesky_cor_arma1.stan'\n", " #include 'fun_cholesky_cor_cosy.stan'\n" ) } if (any(ulapply(acefs, has_ac_class, "sar"))) { if ("gaussian" %in% families) { str_add(out$fun) <- glue( " #include 'fun_normal_lagsar.stan'\n", " #include 'fun_normal_errorsar.stan'\n" ) } if ("student" %in% families) { str_add(out$fun) <- glue( " #include 'fun_student_t_lagsar.stan'\n", " #include 'fun_student_t_errorsar.stan'\n" ) } } if (any(ulapply(acefs, has_ac_class, "car"))) { str_add(out$fun) <- glue( " #include 'fun_sparse_car_lpdf.stan'\n", " #include 'fun_sparse_icar_lpdf.stan'\n" ) } if (any(ulapply(acefs, has_ac_class, "fcor"))) { str_add(out$fun) <- glue( " #include 'fun_normal_fcor.stan'\n", " #include 'fun_student_t_fcor.stan'\n" ) } if (use_threading(threads)) { str_add(out$fun) <- " #include 'fun_sequence.stan'\n" } out } # link function in Stan language # @param link name of the link function stan_link <- function(link) { switch(link, identity = "", log = "log", logm1 = "logm1", inverse = "inv", sqrt = "sqrt", "1/mu^2" = "inv_square", logit = "logit", probit = "inv_Phi", probit_approx = "inv_Phi", cloglog = "cloglog", cauchit = "cauchit", tan_half = "tan_half", log1p = "log1p", softplus = "log_expm1", squareplus = "inv_squareplus" ) } # inverse link in Stan language # @param link name of the link function stan_ilink <- function(link) { switch(link, identity = "", log = "exp", logm1 = "expp1", inverse = "inv", sqrt = "square", "1/mu^2" = "inv_sqrt", logit = "inv_logit", probit = "Phi", probit_approx = "Phi_approx", cloglog = "inv_cloglog", cauchit = "inv_cauchit", tan_half = "inv_tan_half", log1p = "expm1", softplus = "log1p_exp", squareplus = "squareplus" ) } # define a vector in Stan language stan_vector <- function(...) { paste0("transpose([", paste0(c(...), collapse = ", "), "])") } # prepare Stan code for correlations in the generated quantities block # @param cor name of the correlation vector # @param ncol number of columns of the correlation matrix stan_cor_gen_comp <- function(cor, ncol) { Cor <- paste0(toupper(substring(cor, 1, 1)), substring(cor, 2)) glue( " // extract upper diagonal of correlation matrix\n", " for (k in 1:{ncol}) {{\n", " for (j in 1:(k - 1)) {{\n", " {cor}[choose(k - 1, 2) + j] = {Cor}[j, k];\n", " }}\n", " }}\n" ) } # indicates if a family-link combination has a built in # function in Stan (such as binomial_logit) # @param family a list with elements 'family' and 'link' # ideally a (brms)family object # @param bterms brmsterms object of the univariate model stan_has_built_in_fun <- function(family, bterms) { stopifnot(all(c("family", "link") %in% names(family))) stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms$adforms, c("cens", "trunc")) link <- family[["link"]] dpar <- family[["dpar"]] if (cens_or_trunc) { # only few families have special lcdf and lccdf functions out <- has_built_in_fun(family, link, cdf = TRUE) || has_built_in_fun(bterms, link, dpar = dpar, cdf = TRUE) } else { out <- has_built_in_fun(family, link) || has_built_in_fun(bterms, link, dpar = dpar) } out } # get all variable names accepted in Stan stan_all_vars <- function(x) { x <- gsub("\\.", "+", x) all_vars(x) } # transform names to be used as variable names in Stan make_stan_names <- function(x) { gsub("\\.|_", "", make.names(x, unique = TRUE)) } # functions to handle indexing when threading stan_slice <- function(threads) { str_if(use_threading(threads), "[start:end]") } stan_nn <- function(threads) { str_if(use_threading(threads), "[nn]", "[n]") } stan_nn_def <- function(threads) { str_if(use_threading(threads), " int nn = n + start - 1;\n") } stan_nn_regex <- function() { "\\[((n)|(nn))\\]" } # clean up arguments for partial_log_lik # @param ... strings containing arguments of the form ', type identifier' # @return named list of two elements: # typed: types + identifiers for use in the function header # plain: identifiers only for use in the function call stan_clean_pll_args <- function(...) { args <- paste0(...) # split up header to remove duplicates typed <- unlist(strsplit(args, ", +"))[-1] typed <- unique(typed) plain <- rm_wsp(get_matches(" [^ ]+$", typed)) typed <- collapse(", ", typed) plain <- collapse(", ", plain) nlist(typed, plain) } # prepare a string to be used as comment in Stan stan_comment <- function(comment, wsp = 2) { comment <- as.character(comment) wsp <- wsp(nsp = wsp) if (!length(comment)) { return(character(0)) } ifelse(nzchar(comment), paste0(wsp, "// ", comment), "") } brms/R/formula-re.R0000644000175000017500000007237514111751666013772 0ustar nileshnilesh# This file contains functions dealing with the extended # lme4-like formula syntax to specify group-level terms #' Set up basic grouping terms in \pkg{brms} #' #' Function used to set up a basic grouping term in \pkg{brms}. #' The function does not evaluate its arguments -- #' it exists purely to help set up a model with grouping terms. #' \code{gr} is called implicitly inside the package #' and there is usually no need to call it directly. #' #' @param ... One or more terms containing grouping factors. #' @param by An optional factor variable, specifying sub-populations of the #' groups. For each level of the \code{by} variable, a separate #' variance-covariance matrix will be fitted. Levels of the grouping factor #' must be nested in levels of the \code{by} variable. #' @param cor Logical. If \code{TRUE} (the default), group-level terms will be #' modelled as correlated. #' @param id Optional character string. All group-level terms across the model #' with the same \code{id} will be modeled as correlated (if \code{cor} is #' \code{TRUE}). See \code{\link{brmsformula}} for more details. #' @param cov An optional matrix which is proportional to the withon-group #' covariance matrix of the group-level effects. All levels of the grouping #' factor should appear as rownames of the corresponding matrix. This argument #' can be used, among others, to model pedigrees and phylogenetic effects. See #' \code{vignette("brms_phylogenetics")} for more details. By default, levels #' of the same grouping factor are modeled as independent of each other. #' @param dist Name of the distribution of the group-level effects. #' Currently \code{"gaussian"} is the only option. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' # model using basic lme4-style formula #' fit1 <- brm(count ~ Trt + (1|patient), data = epilepsy) #' summary(fit1) #' #' # equivalent model using 'gr' which is called anyway internally #' fit2 <- brm(count ~ Trt + (1|gr(patient)), data = epilepsy) #' summary(fit2) #' #' # include Trt as a by variable #' fit3 <- brm(count ~ Trt + (1|gr(patient, by = Trt)), data = epilepsy) #' summary(fit3) #' } #' #' @export gr <- function(..., by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") { label <- deparse(match.call()) groups <- as.character(as.list(substitute(list(...)))[-1]) if (length(groups) > 1L) { stop2("Grouping structure 'gr' expects only a single grouping term") } stopif_illegal_group(groups[1]) cor <- as_one_logical(cor) id <- as_one_character(id, allow_na = TRUE) by <- substitute(by) if (!is.null(by)) { by <- deparse_combine(by) } else { by <- "" } cov <- substitute(cov) if (!is.null(cov)) { cov <- all.vars(cov) if (length(cov) != 1L) { stop2("Argument 'cov' must contain exactly one variable.") } } else { cov <- "" } dist <- match.arg(dist, c("gaussian", "student")) byvars <- all_vars(by) allvars <- str2formula(c(groups, byvars)) nlist(groups, allvars, label, by, cor, id, cov, dist, type = "") } #' Set up multi-membership grouping terms in \pkg{brms} #' #' Function to set up a multi-membership grouping term in \pkg{brms}. #' The function does not evaluate its arguments -- #' it exists purely to help set up a model with grouping terms. #' #' @inheritParams gr #' @param weights A matrix specifying the weights of each member. #' It should have as many columns as grouping terms specified in \code{...}. #' If \code{NULL} (the default), equally weights are used. #' @param by An optional factor matrix, specifying sub-populations of the #' groups. It should have as many columns as grouping terms specified in #' \code{...}. For each level of the \code{by} variable, a separate #' variance-covariance matrix will be fitted. Levels of the grouping factor #' must be nested in levels of the \code{by} variable matrix. #' @param scale Logical; if \code{TRUE} (the default), #' weights are standardized in order to sum to one per row. #' If negative weights are specified, \code{scale} needs #' to be set to \code{FALSE}. #' #' @seealso \code{\link{brmsformula}}, \code{\link{mmc}} #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- data.frame( #' y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), #' g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) #' ) #' #' # multi-membership model with two members per group and equal weights #' fit1 <- brm(y ~ x1 + (1|mm(g1, g2)), data = dat) #' summary(fit1) #' #' # weight the first member two times for than the second member #' dat$w1 <- rep(2, 100) #' dat$w2 <- rep(1, 100) #' fit2 <- brm(y ~ x1 + (1|mm(g1, g2, weights = cbind(w1, w2))), data = dat) #' summary(fit2) #' #' # multi-membership model with level specific covariate values #' dat$xc <- (dat$x1 + dat$x2) / 2 #' fit3 <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) #' summary(fit3) #' } #' #' @export mm <- function(..., weights = NULL, scale = TRUE, by = NULL, cor = TRUE, id = NA, cov = NULL, dist = "gaussian") { label <- deparse(match.call()) groups <- as.character(as.list(substitute(list(...)))[-1]) if (length(groups) < 2) { stop2("Multi-membership terms require at least two grouping variables.") } for (i in seq_along(groups)) { stopif_illegal_group(groups[i]) } cor <- as_one_logical(cor) id <- as_one_character(id, allow_na = TRUE) by <- substitute(by) if (!is.null(by)) { by <- deparse_combine(by) } else { by <- "" } cov <- substitute(cov) if (!is.null(cov)) { cov <- all.vars(cov) if (length(cov) != 1L) { stop2("Argument 'cov' must contain exactly one variable.") } } else { cov <- "" } dist <- match.arg(dist, c("gaussian", "student")) scale <- as_one_logical(scale) weights <- substitute(weights) weightvars <- all_vars(weights) byvars <- all_vars(by) allvars <- str2formula(c(groups, weightvars, byvars)) if (!is.null(weights)) { weights <- str2formula(deparse_no_string(weights)) attr(weights, "scale") <- scale weightvars <- str2formula(weightvars) } nlist( groups, weights, weightvars, allvars, label, by, cor, id, cov, dist, type = "mm" ) } #' Multi-Membership Covariates #' #' Specify covariates that vary over different levels #' of multi-membership grouping factors thus requiring #' special treatment. This function is almost solely useful, #' when called in combination with \code{\link{mm}}. #' Outside of multi-membership terms it will behave #' very much like \code{\link{cbind}}. #' #' @param ... One or more terms containing covariates #' corresponding to the grouping levels specified in \code{\link{mm}}. #' #' @return A matrix with covariates as columns. #' #' @seealso \code{\link{mm}} #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- data.frame( #' y = rnorm(100), x1 = rnorm(100), x2 = rnorm(100), #' g1 = sample(1:10, 100, TRUE), g2 = sample(1:10, 100, TRUE) #' ) #' #' # multi-membership model with level specific covariate values #' dat$xc <- (dat$x1 + dat$x2) / 2 #' fit <- brm(y ~ xc + (1 + mmc(x1, x2) | mm(g1, g2)), data = dat) #' summary(fit) #' } #' #' @export mmc <- function(...) { dots <- list(...) if (any(ulapply(dots, is_like_factor))) { stop2("'mmc' requires numeric variables.") } out <- cbind(...) colnames(out) <- paste0("?", colnames(out)) out } # check if the group part of a group-level term is invalid # @param group the group part of a group-level term illegal_group_expr <- function(group) { group <- as_one_character(group) valid_expr <- ":|([^([:digit:]|[:punct:])]|\\.)[[:alnum:]_\\.]*" rsv_signs <- c("+", "-", "*", "/", "|", "::") nzchar(gsub(valid_expr, "", group)) || any(ulapply(rsv_signs, grepl, x = group, fixed = TRUE)) } stopif_illegal_group <- function(group) { if (illegal_group_expr(group)) { stop2( "Illegal grouping term '", group, "'. It may contain ", "only variable names combined by the symbol ':'" ) } invisible(NULL) } re_lhs <- function(re_terms) { get_matches("^[^\\|]*", re_terms) } re_mid <- function(re_terms) { get_matches("\\|([^\\|]*\\||)", re_terms) } re_rhs <- function(re_terms) { sub("^\\|", "", get_matches("\\|[^\\|]*$", re_terms)) } # extract the three parts of group-level terms # @param re_terms character vector of RE terms in lme4 syntax # @return a data.frame with one row per group-level term re_parts <- function(re_terms) { lhs <- re_lhs(re_terms) mid <- re_mid(re_terms) rhs <- re_rhs(re_terms) out <- nlist(lhs, mid, rhs) if (any(lengths(out) != length(re_terms))) { stop2("Invalid syntax used in group-level terms.") } as.data.frame(out, stringsAsFactors = FALSE) } # split nested group-level terms and check for special effects terms # @param re_terms character vector of RE terms in extended lme4 syntax split_re_terms <- function(re_terms) { if (!length(re_terms)) { return(re_terms) } stopifnot(is.character(re_terms)) # split after grouping factor terms re_parts <- re_parts(re_terms) new_re_terms <- vector("list", length(re_terms)) for (i in seq_along(re_terms)) { new_re_rhs <- terms(formula(paste0("~", re_parts$rhs[i]))) new_re_rhs <- attr(new_re_rhs, "term.labels") new_re_rhs <- ifelse( !grepl("^(gr|mm)\\(", new_re_rhs), paste0("gr(", new_re_rhs, ")"), new_re_rhs ) new_re_terms[[i]] <- paste0( re_parts$lhs[i], re_parts$mid[i], new_re_rhs ) } re_terms <- unlist(new_re_terms) # split after coefficient types re_parts <- re_parts(re_terms) new_re_terms <- type <- vector("list", length(re_terms)) for (i in seq_along(re_terms)) { lhs_form <- formula(paste("~", re_parts$lhs[i])) lhs_all_terms <- all_terms(lhs_form) # otherwise varying intercepts cannot be handled reliably is_cs_term <- grepl_expr(regex_sp("cs"), lhs_all_terms) if (any(is_cs_term) && !all(is_cs_term)) { stop2("Please specify category specific effects ", "in separate group-level terms.") } new_lhs <- NULL # prepare effects of special terms valid_types <- c("sp", "cs", "mmc") invalid_types <- c("sm", "gp") for (t in c(valid_types, invalid_types)) { lhs_tform <- do_call(paste0("terms_", t), list(lhs_form)) if (is.formula(lhs_tform)) { if (t %in% invalid_types) { stop2("Cannot handle splines or GPs in group-level terms.") } new_lhs <- c(new_lhs, formula2str(lhs_tform, rm = 1)) type[[i]] <- c(type[[i]], t) } } # prepare effects of basic terms lhs_terms <- terms(lhs_form) fe_form <- terms_fe(lhs_terms) fe_terms <- all_terms(fe_form) # the intercept lives within not outside of 'cs' terms has_intercept <- has_intercept(lhs_terms) && !"cs" %in% type[[i]] if (length(fe_terms) || has_intercept) { new_lhs <- c(new_lhs, formula2str(fe_form, rm = 1)) type[[i]] <- c(type[[i]], "") } # extract information from the mid section of the terms rhs_call <- str2lang(re_parts$rhs[i]) if (re_parts$mid[i] == "||") { # ||-syntax overwrites the 'cor' argument rhs_call$cor <- FALSE } gcall <- eval(rhs_call) if (gcall$cor) { id <- gsub("\\|", "", re_parts$mid[i]) if (nzchar(id)) { # ID-syntax overwrites the 'id' argument rhs_call$id <- id } else { id <- gcall$id } if (length(new_lhs) > 1 && isNA(id)) { # ID is required to model coefficients as correlated # if multiple types are provided within the same term rhs_call$id <- collapse(sample(0:9, 10, TRUE)) } } re_parts$mid[i] <- "|" re_parts$rhs[i] <- deparse_combine(rhs_call) new_re_terms[[i]] <- paste0(new_lhs, re_parts$mid[i], re_parts$rhs[i]) new_re_terms[[i]] <- new_re_terms[[i]][order(type[[i]])] type[[i]] <- sort(type[[i]]) } re_terms <- unlist(new_re_terms) structure(re_terms, type = unlist(type)) } # extract group-level terms from a formula of character vector # @param x formula or character vector # @param formula return a formula rather than a character string? # @param brackets include group-level terms in brackets? get_re_terms <- function(x, formula = FALSE, brackets = TRUE) { if (is.formula(x)) { x <- all_terms(x) } re_pos <- grepl("\\|", x) out <- x[re_pos] if (brackets && length(out)) { out <- paste0("(", out, ")") } if (formula) { out <- str2formula(out) } out } # validate the re_formula argument # @inheritParams extract_draws.brmsfit # @param formula: formula to match re_formula with # @return updated re_formula containing only terms existent in formula check_re_formula <- function(re_formula, formula) { old_re_formula <- get_re_terms(formula, formula = TRUE) if (is.null(re_formula)) { re_formula <- old_re_formula } else if (SW(anyNA(re_formula))) { re_formula <- ~1 } else { re_formula <- get_re_terms(as.formula(re_formula), formula = TRUE) new <- brmsterms(re_formula, check_response = FALSE)$dpars$mu[["re"]] old <- brmsterms(old_re_formula, check_response = FALSE)$dpars$mu[["re"]] if (NROW(new) && NROW(old)) { # compare old and new ranefs new_terms <- lapply(new$form, terms) found <- rep(FALSE, NROW(new)) for (i in seq_rows(new)) { group <- new$group[[i]] old_terms <- lapply(old$form[old$group == group], terms) j <- 1 while (!found[i] && j <= length(old_terms)) { new_term_labels <- attr(new_terms[[i]], "term.labels") old_term_labels <- attr(old_terms[[j]], "term.labels") new_intercept <- attr(new_terms[[i]], "intercept") old_intercept <- attr(old_terms[[j]], "intercept") found[i] <- isTRUE( all(new_term_labels %in% old_term_labels) && new_intercept <= old_intercept ) if (found[i]) { # terms have to maintain the original order so that Z_* data # and r_* parameters match in 'extract_draws' (fixes issue #844) term_matches <- match(new_term_labels, old_term_labels) if (is.unsorted(term_matches)) { stop2("Order of terms in 're_formula' should match the original order.") } } j <- j + 1 } } new <- new[found, ] if (NROW(new)) { forms <- ulapply(new$form, formula2str, rm = 1) groups <- ulapply(new$gcall, "[[", "label") re_terms <- paste("(", forms, "|", groups, ")") re_formula <- formula(paste("~", paste(re_terms, collapse = "+"))) } else { re_formula <- ~1 } } else { re_formula <- ~1 } } re_formula } # remove existing group-level terms in formula and # add valid group-level terms of re_formula update_re_terms <- function(formula, re_formula) { UseMethod("update_re_terms") } #' @export update_re_terms.mvbrmsformula <- function(formula, re_formula) { formula$forms <- lapply(formula$forms, update_re_terms, re_formula) formula } #' @export update_re_terms.brmsformula <- function(formula, re_formula) { formula$formula <- update_re_terms(formula$formula, re_formula) formula$pforms <- lapply(formula$pforms, update_re_terms, re_formula) formula } #' @export update_re_terms.formula <- function(formula, re_formula = NULL) { if (is.null(re_formula) || get_nl(formula)) { return(formula) } re_formula <- check_re_formula(re_formula, formula) new_formula <- formula2str(formula) old_re_terms <- get_re_terms(formula, brackets = FALSE) if (length(old_re_terms)) { # remove old group-level terms rm_terms <- c( paste0("+ (", old_re_terms, ")"), paste0("(", old_re_terms, ")"), old_re_terms ) new_formula <- rename(new_formula, rm_terms, "") if (grepl("~( *\\+*)*$", new_formula)) { # lhs only formulas are syntactically invalid # also check for trailing '+' signs (#769) new_formula <- paste(new_formula, "1") } } # add new group-level terms new_re_terms <- get_re_terms(re_formula) new_formula <- paste(c(new_formula, new_re_terms), collapse = "+") new_formula <- formula(new_formula) attributes(new_formula) <- attributes(formula) new_formula } # extract group-level terms get_re <- function(x, ...) { UseMethod("get_re") } #' @export get_re.default <- function(x, ...) { NULL } # get group-level information in a data.frame # @param bterms object of class 'brmsterms' # @param all logical; include ranefs of additional parameters? #' @export get_re.brmsterms <- function(x, ...) { re <- named_list(c(names(x$dpars), names(x$nlpars))) for (dp in names(x$dpars)) { re[[dp]] <- get_re(x$dpars[[dp]]) } for (nlp in names(x$nlpars)) { re[[nlp]] <- get_re(x$nlpars[[nlp]]) } do_call(rbind, re) } #' @export get_re.mvbrmsterms <- function(x, ...) { do_call(rbind, lapply(x$terms, get_re, ...)) } #' @export get_re.btl <- function(x, ...) { px <- check_prefix(x) re <- x[["re"]] if (is.null(re)) { re <- empty_re() } re$resp <- rep(px$resp, nrow(re)) re$dpar <- rep(px$dpar, nrow(re)) re$nlpar <- rep(px$nlpar, nrow(re)) re } # gather information on group-level effects # @param bterms object of class brmsterms # @param data data.frame containing all model variables # @param old_levels optional original levels of the grouping factors # @return a tidy data.frame with the following columns: # id: ID of the group-level effect # group: name of the grouping factor # gn: number of the grouping term within the respective formula # coef: name of the group-level effect # cn: number of the effect within the ID # resp: name of the response variable # dpar: name of the distributional parameter # nlpar: name of the non-linear parameter # cor: are correlations modeled for this effect? # ggn: global number of the grouping factor # type: special effects type; can be 'sp' or 'cs' # gcall: output of functions 'gr' or 'mm' # form: formula used to compute the effects tidy_ranef <- function(bterms, data, old_levels = NULL) { data <- combine_groups(data, get_group_vars(bterms)) re <- get_re(bterms) ranef <- vector("list", nrow(re)) used_ids <- new_ids <- NULL id_groups <- list() j <- 1 for (i in seq_rows(re)) { if (!nzchar(re$type[i])) { coef <- colnames(get_model_matrix(re$form[[i]], data)) } else if (re$type[i] == "sp") { coef <- tidy_spef(re$form[[i]], data)$coef } else if (re$type[i] == "mmc") { coef <- rename(all_terms(re$form[[i]])) } else if (re$type[i] == "cs") { resp <- re$resp[i] if (nzchar(resp)) { stopifnot(is.mvbrmsterms(bterms)) nthres <- max(get_thres(bterms$terms[[resp]])) } else { stopifnot(is.brmsterms(bterms)) nthres <- max(get_thres(bterms)) } indices <- paste0("[", seq_len(nthres), "]") coef <- colnames(get_model_matrix(re$form[[i]], data = data)) coef <- as.vector(t(outer(coef, indices, paste0))) } avoid_dpars(coef, bterms = bterms) rdat <- data.frame( id = re$id[[i]], group = re$group[[i]], gn = re$gn[[i]], gtype = re$gtype[[i]], coef = coef, cn = NA, resp = re$resp[[i]], dpar = re$dpar[[i]], nlpar = re$nlpar[[i]], ggn = NA, cor = re$cor[[i]], type = re$type[[i]], by = re$gcall[[i]]$by, cov = re$gcall[[i]]$cov, dist = re$gcall[[i]]$dist, stringsAsFactors = FALSE ) bylevels <- NULL if (nzchar(rdat$by[1])) { bylevels <- eval2(rdat$by[1], data) bylevels <- rm_wsp(levels(factor(bylevels))) } rdat$bylevels <- repl(bylevels, nrow(rdat)) rdat$form <- repl(re$form[[i]], nrow(rdat)) rdat$gcall <- repl(re$gcall[[i]], nrow(rdat)) # prepare group-level IDs id <- re$id[[i]] if (is.na(id)) { rdat$id <- j j <- j + 1 } else { if (id %in% used_ids) { k <- match(id, used_ids) rdat$id <- new_ids[k] new_id_groups <- c(re$group[[i]], re$gcall[[i]]$groups) if (!identical(new_id_groups, id_groups[[k]])) { stop2("Can only combine group-level terms of the ", "same grouping factors.") } } else { used_ids <- c(used_ids, id) k <- length(used_ids) rdat$id <- new_ids[k] <- j id_groups[[k]] <- c(re$group[[i]], re$gcall[[i]]$groups) j <- j + 1 } } ranef[[i]] <- rdat } ranef <- do_call(rbind, c(list(empty_ranef()), ranef)) # check for overlap between different group types rsv_groups <- ranef[nzchar(ranef$gtype), "group"] other_groups <- ranef[!nzchar(ranef$gtype), "group"] inv_groups <- intersect(rsv_groups, other_groups) if (length(inv_groups)) { inv_groups <- paste0("'", inv_groups, "'", collapse = ", ") stop2("Grouping factor names ", inv_groups, " are resevered.") } # check for duplicated and thus not identified effects dup <- duplicated(ranef[, c("group", "coef", vars_prefix())]) if (any(dup)) { dr <- ranef[which(dup)[1], ] stop2( "Duplicated group-level effects are not allowed.\n", "Occured for effect '", dr$coef, "' of group '", dr$group, "'." ) } if (nrow(ranef)) { for (id in unique(ranef$id)) { ranef$cn[ranef$id == id] <- seq_len(sum(ranef$id == id)) } ranef$ggn <- match(ranef$group, unique(ranef$group)) if (is.null(old_levels)) { rsub <- ranef[!duplicated(ranef$group), ] levels <- named_list(rsub$group) for (i in seq_along(levels)) { # combine levels of all grouping factors within one grouping term levels[[i]] <- unique(ulapply( rsub$gcall[[i]]$groups, function(g) levels(factor(get(g, data))) )) # store information of corresponding by levels if (nzchar(rsub$by[i])) { stopifnot(rsub$type[i] %in% c("", "mmc")) by <- rsub$by[i] bylevels <- rsub$bylevels[[i]] byvar <- rm_wsp(eval2(by, data)) groups <- rsub$gcall[[i]]$groups if (rsub$gtype[i] == "mm") { byvar <- as.matrix(byvar) if (!identical(dim(byvar), c(nrow(data), length(groups)))) { stop2( "Grouping structure 'mm' expects 'by' to be ", "a matrix with as many columns as grouping factors." ) } df <- J <- named_list(groups) for (k in seq_along(groups)) { J[[k]] <- match(get(groups[k], data), levels[[i]]) df[[k]] <- data.frame(J = J[[k]], by = byvar[, k]) } J <- unlist(J) df <- do_call(rbind, df) } else { J <- match(get(groups, data), levels[[i]]) df <- data.frame(J = J, by = byvar) } df <- unique(df) if (nrow(df) > length(unique(J))) { stop2("Some levels of ", collapse_comma(groups), " correspond to multiple levels of '", by, "'.") } df <- df[order(df$J), ] by_per_level <- bylevels[match(df$by, bylevels)] attr(levels[[i]], "by") <- by_per_level } } attr(ranef, "levels") <- levels } else { # for newdata numeration has to depend on the original levels attr(ranef, "levels") <- old_levels } # incorporate deprecated 'cov_ranef' argument ranef <- update_ranef_cov(ranef, bterms) } # ordering after IDs matches the order of the posterior draws # if multiple IDs are used for the same grouping factor (#835) ranef <- ranef[order(ranef$id), , drop = FALSE] structure(ranef, class = c("ranef_frame", "data.frame")) } empty_ranef <- function() { structure( data.frame( id = numeric(0), group = character(0), gn = numeric(0), coef = character(0), cn = numeric(0), resp = character(0), dpar = character(0), nlpar = character(0), ggn = numeric(0), cor = logical(0), type = character(0), form = character(0), stringsAsFactors = FALSE ), class = c("ranef_frame", "data.frame") ) } empty_re <- function() { data.frame( group = character(0), gtype = character(0), gn = numeric(0), id = numeric(0), type = character(0), cor = logical(0), form = character(0) ) } is.ranef_frame <- function(x) { inherits(x, "ranef_frame") } # extract names of all grouping variables get_group_vars <- function(x, ...) { UseMethod("get_group_vars") } #' @export get_group_vars.brmsfit <- function(x, ...) { get_group_vars(x$formula, ...) } #' @export get_group_vars.default <- function(x, ...) { get_group_vars(brmsterms(x), ...) } #' @export get_group_vars.brmsterms <- function(x, ...) { .get_group_vars(x, ...) } #' @export get_group_vars.mvbrmsterms <- function(x, ...) { .get_group_vars(x, ...) } .get_group_vars <- function(x, ...) { out <- c(get_re_groups(x), get_me_groups(x), get_ac_groups(x)) out <- out[nzchar(out)] if (length(out)) { c(out) <- unlist(strsplit(out, ":")) out <- sort(unique(out)) } out } # get names of grouping variables of re terms get_re_groups <- function(x, ...) { ulapply(get_re(x)$gcall, "[[", "groups") } # extract information about groups with a certain distribution get_dist_groups <- function(ranef, dist) { out <- subset2(ranef, dist = dist) out[!duplicated(out$group), c("group", "ggn", "id")] } # extract list of levels with one element per grouping factor # @param ... objects with a level attribute get_levels <- function(...) { dots <- list(...) out <- vector("list", length(dots)) for (i in seq_along(out)) { levels <- attr(dots[[i]], "levels", exact = TRUE) if (is.list(levels)) { stopifnot(!is.null(names(levels))) out[[i]] <- as.list(levels) } else if (!is.null(levels)) { stopifnot(isTRUE(nzchar(names(dots)[i]))) out[[i]] <- setNames(list(levels), names(dots)[[i]]) } } out <- unlist(out, recursive = FALSE) out[!duplicated(names(out))] } # extract names of group-level effects # @param ranef output of tidy_ranef() # @param group optinal name of a grouping factor for # which to extract effect names # @param bylevels optional names of 'by' levels for # which to extract effect names # @return a vector of character strings get_rnames <- function(ranef, group = NULL, bylevels = NULL) { stopifnot(is.data.frame(ranef)) if (!is.null(group)) { group <- as_one_character(group) ranef <- subset2(ranef, group = group) } stopifnot(length(unique(ranef$group)) == 1L) out <- paste0(usc(combine_prefix(ranef), "suffix"), ranef$coef) if (isTRUE(nzchar(ranef$by[1]))) { if (!is.null(bylevels)) { stopifnot(all(bylevels %in% ranef$bylevels[[1]])) } else { bylevels <- ranef$bylevels[[1]] } bylabels <- paste0(ranef$by[1], bylevels) out <- outer(out, bylabels, paste, sep = ":") } out } # validate within-group covariance matrices # @param M a matrix to be validated validate_recov_matrix <- function(M) { M <- as.matrix(M) if (!isSymmetric(unname(M))) { stop2("Within-group covariance matrices must be symmetric.") } found_levels <- rownames(M) if (is.null(found_levels)) { found_levels <- colnames(M) } if (is.null(found_levels)) { stop2("Row or column names are required for within-group covariance matrices.") } rownames(M) <- colnames(M) <- found_levels evs <- eigen(M, symmetric = TRUE, only.values = TRUE)$values if (min(evs) <= 0) { stop2("Within-group covariance matrices must be positive definite.") } M } # check validity of the 'cov_ranef' argument # argument 'cov_ranef' is deprecated as of version 2.12.5 validate_cov_ranef <- function(cov_ranef) { if (is.null(cov_ranef)) { return(cov_ranef) } warning2( "Argument 'cov_ranef' is deprecated and will be removed in the future. ", "Please use argument 'cov' in function 'gr' instead." ) cr_names <- names(cov_ranef) cr_is_named <- length(cr_names) && all(nzchar(cr_names)) if (!is.list(cov_ranef) || !cr_is_named) { stop2("'cov_ranef' must be a named list.") } if (any(duplicated(cr_names))) { stop2("Names of 'cov_ranef' must be unique.") } cov_ranef } # update 'ranef' according to information in 'cov_ranef' # argument 'cov_ranef' is deprecated as of version 2.12.5 update_ranef_cov <- function(ranef, bterms) { cr_names <- names(bterms$cov_ranef) if (!length(cr_names)) { return(ranef) } unused_names <- setdiff(cr_names, ranef$group) if (length(unused_names)) { stop2("The following elements of 'cov_ranef' are unused: ", collapse_comma(unused_names)) } has_cov <- ranef$group %in% cr_names ranef$cov[has_cov] <- ranef$group[has_cov] ranef } # extract 'cov_ranef' for storage in 'data2' # @param x a list-like object get_data2_cov_ranef <- function(x) { x[["cov_ranef"]] } brms/R/rename_pars.R0000644000175000017500000004745114116647132014207 0ustar nileshnilesh#' Rename Parameters #' #' Rename parameters within the \code{stanfit} object after model fitting to #' ensure reasonable parameter names. This function is usually called #' automatically by \code{\link{brm}} and users will rarely be required to call #' it themselves. #' #' @param x A brmsfit object. #' @return A brmfit object with adjusted parameter names. #' #' @examples #' \dontrun{ #' # fit a model manually via rstan #' scode <- make_stancode(count ~ Trt, data = epilepsy) #' sdata <- make_standata(count ~ Trt, data = epilepsy) #' stanfit <- rstan::stan(model_code = scode, data = sdata) #' #' # feed the Stan model back into brms #' fit <- brm(count ~ Trt, data = epilepsy, empty = TRUE) #' fit$fit <- stanfit #' fit <- rename_pars(fit) #' summary(fit) #' } #' #' @export rename_pars <- function(x) { if (!length(x$fit@sim)) { return(x) } bterms <- brmsterms(x$formula) data <- model.frame(x) meef <- tidy_meef(bterms, data) pars <- variables(x) # find positions of parameters and define new names change <- c( change_effects(bterms, data = data, pars = pars), change_re(x$ranef, pars = pars), change_Xme(meef, pars = pars) ) # perform the actual renaming in x$fit@sim x <- save_old_par_order(x) x <- do_renaming(x, change) x$fit <- repair_stanfit_names(x$fit) x <- compute_quantities(x) x <- reorder_pars(x) x } # helps in renaming parameters after model fitting # @return a list whose elements can be interpreted by do_renaming change_effects <- function(x, ...) { UseMethod("change_effects") } #' @export change_effects.default <- function(x, ...) { NULL } #' @export change_effects.mvbrmsterms <- function(x, pars, ...) { out <- list() for (i in seq_along(x$terms)) { c(out) <- change_effects(x$terms[[i]], pars = pars, ...) } if (x$rescor) { rescor_names <- get_cornames( x$responses, type = "rescor", brackets = FALSE ) lc(out) <- clist(grepl("^rescor\\[", pars), rescor_names) } out } #' @export change_effects.brmsterms <- function(x, ...) { out <- list() for (dp in names(x$dpars)) { c(out) <- change_effects(x$dpars[[dp]], ...) } for (nlp in names(x$nlpars)) { c(out) <- change_effects(x$nlpars[[nlp]], ...) } if (is.formula(x$adforms$mi)) { c(out) <- change_Ymi(x, ...) } out } # helps in renaming parameters of additive predictor terms # @param pars vector of all parameter names #' @export change_effects.btl <- function(x, data, pars, ...) { c(change_fe(x, data, pars), change_sm(x, data, pars), change_cs(x, data, pars), change_sp(x, data, pars), change_gp(x, data, pars), change_thres(x, pars)) } # helps in renaming fixed effects parameters change_fe <- function(bterms, data, pars) { out <- list() px <- check_prefix(bterms) fixef <- colnames(data_fe(bterms, data)$X) if (stan_center_X(bterms)) { fixef <- setdiff(fixef, "Intercept") } if (length(fixef)) { b <- paste0("b", usc(combine_prefix(px))) pos <- grepl(paste0("^", b, "\\["), pars) bnames <- paste0(b, "_", fixef) lc(out) <- clist(pos, bnames) c(out) <- change_prior(b, pars, names = fixef) c(out) <- change_special_prior_local(bterms, fixef, pars) } out } # helps in renaming special effects parameters change_sp <- function(bterms, data, pars) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) return(out) p <- usc(combine_prefix(bterms)) bsp <- paste0("bsp", p) pos <- grepl(paste0("^", bsp, "\\["), pars) newnames <- paste0("bsp", p, "_", spef$coef) lc(out) <- clist(pos, newnames) c(out) <- change_prior(bsp, pars, names = spef$coef) simo_coef <- get_simo_labels(spef) for (i in seq_along(simo_coef)) { simo_old <- paste0("simo", p, "_", i) simo_new <- paste0("simo", p, "_", simo_coef[i]) pos <- grepl(paste0("^", simo_old, "\\["), pars) simo_names <- paste0(simo_new, "[", seq_len(sum(pos)), "]") lc(out) <- clist(pos, simo_names) c(out) <- change_prior( simo_old, pars, new_class = simo_new, is_vector = TRUE ) } out } # helps in renaming category specific effects parameters change_cs <- function(bterms, data, pars) { out <- list() csef <- colnames(data_cs(bterms, data)$Xcs) if (length(csef)) { p <- usc(combine_prefix(bterms)) bcsp <- paste0("bcs", p) ncs <- length(csef) thres <- sum(grepl(paste0("^b", p, "_Intercept\\["), pars)) csenames <- t(outer(csef, paste0("[", 1:thres, "]"), FUN = paste0)) csenames <- paste0(bcsp, "_", csenames) sort_cse <- ulapply(seq_len(ncs), seq, to = thres * ncs, by = ncs) lc(out) <- clist( grepl(paste0("^", bcsp, "\\["), pars), csenames, sort = sort_cse ) c(out) <- change_prior(bcsp, pars, names = csef) } out } # rename threshold parameters in ordinal models change_thres <- function(bterms, pars) { out <- list() # renaming is only required if multiple threshold were estimated if (!has_thres_groups(bterms)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) int <- paste0("b", p, "_Intercept") groups <- get_thres_groups(bterms) for (i in seq_along(groups)) { thres <- get_thres(bterms, groups[i]) pos <- grepl(glue("^{int}_{i}\\["), pars) int_names <- glue("{int}[{groups[i]},{thres}]") lc(out) <- clist(pos, int_names) } out } # helps in renaming global noise free variables # @param meef data.frame returned by 'tidy_meef' change_Xme <- function(meef, pars) { stopifnot(is.meef_frame(meef)) out <- list() levels <- attr(meef, "levels") groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) # rename mean and sd parameters for (par in c("meanme", "sdme")) { hpar <- paste0(par, "_", i) pos <- grepl(paste0("^", hpar, "\\["), pars) hpar_new <- paste0(par, "_", meef$coef[K]) lc(out) <- clist(pos, hpar_new) c(out) <- change_prior(hpar, pars, names = hpar_new) } # rename latent variable parameters for (k in K) { if (any(grepl("^Xme_", pars))) { Xme <- paste0("Xme_", k) pos <- grepl(paste0("^", Xme, "\\["), pars) Xme_new <- paste0("Xme_", meef$coef[k]) if (nzchar(g)) { indices <- gsub("[ \t\r\n]", ".", levels[[g]]) } else { indices <- seq_len(sum(pos)) } fnames <- paste0(Xme_new, "[", indices, "]") lc(out) <- clist(pos, fnames) } } # rename correlation parameters if (meef$cor[K[1]] && length(K) > 1L) { cor_type <- paste0("corme", usc(g)) cor_names <- get_cornames(meef$coef[K], cor_type, brackets = FALSE) cor_regex <- paste0("^corme_", i, "(\\[|$)") cor_pos <- grepl(cor_regex, pars) lc(out) <- clist(cor_pos, cor_names) c(out) <- change_prior( paste0("corme_", i), pars, new_class = paste0("corme", usc(g)) ) } } out } # helps in renaming estimated missing values change_Ymi <- function(bterms, data, pars, ...) { stopifnot(is.brmsterms(bterms)) out <- list() if (is.formula(bterms$adforms$mi)) { resp <- usc(combine_prefix(bterms)) resp_data <- data_response(bterms, data, check_response = FALSE) Ymi <- paste0("Ymi", resp) pos <- grepl(paste0("^", Ymi, "\\["), pars) if (any(pos)) { Jmi <- resp_data$Jmi fnames <- paste0(Ymi, "[", Jmi, "]") lc(out) <- clist(pos, fnames) } } out } # helps in renaming parameters of gaussian processes change_gp <- function(bterms, data, pars) { out <- list() p <- usc(combine_prefix(bterms), "prefix") gpef <- tidy_gpef(bterms, data) for (i in seq_rows(gpef)) { # rename GP hyperparameters sfx1 <- gpef$sfx1[[i]] sfx2 <- as.vector(gpef$sfx2[[i]]) sdgp <- paste0("sdgp", p) sdgp_old <- paste0(sdgp, "_", i) sdgp_pos <- grepl(paste0("^", sdgp_old, "\\["), pars) sdgp_names <- paste0(sdgp, "_", sfx1) lc(out) <- clist(sdgp_pos, sdgp_names) c(out) <- change_prior(sdgp_old, pars, names = sfx1, new_class = sdgp) lscale <- paste0("lscale", p) lscale_old <- paste0(lscale, "_", i) lscale_pos <- grepl(paste0("^", lscale_old, "\\["), pars) lscale_names <- paste0(lscale, "_", sfx2) lc(out) <- clist(lscale_pos, lscale_names) c(out) <- change_prior(lscale_old, pars, names = sfx2, new_class = lscale) zgp <- paste0("zgp", p) zgp_old <- paste0(zgp, "_", i) if (length(sfx1) > 1L) { # categorical 'by' variable for (j in seq_along(sfx1)) { zgp_old_sub <- paste0(zgp_old, "_", j) zgp_pos <- grepl(paste0("^", zgp_old_sub, "\\["), pars) if (any(zgp_pos)) { zgp_new <- paste0(zgp, "_", sfx1[j]) fnames <- paste0(zgp_new, "[", seq_len(sum(zgp_pos)), "]") lc(out) <- clist(zgp_pos, fnames) } } } else { zgp_pos <- grepl(paste0("^", zgp_old, "\\["), pars) if (any(zgp_pos)) { zgp_new <- paste0(zgp, "_", sfx1) fnames <- paste0(zgp_new, "[", seq_len(sum(zgp_pos)), "]") lc(out) <- clist(zgp_pos, fnames) } } } out } # helps in renaming smoothing term parameters change_sm <- function(bterms, data, pars) { out <- list() smef <- tidy_smef(bterms, data) if (NROW(smef)) { p <- usc(combine_prefix(bterms), "prefix") Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { bs <- paste0("bs", p) pos <- grepl(paste0("^", bs, "\\["), pars) bsnames <- paste0(bs, "_", Xs_names) lc(out) <- clist(pos, bsnames) c(out) <- change_prior(bs, pars, names = Xs_names) } sds <- paste0("sds", p) sds_names <- paste0(sds, "_", smef$label) s <- paste0("s", p) snames <- paste0(s, "_", smef$label) for (i in seq_rows(smef)) { for (j in seq_len(smef$nbases[i])) { ij <- paste0(i, "_", j) sds_pos <- grepl(paste0("^", sds, "_", ij), pars) lc(out) <- clist(sds_pos, paste0(sds_names[i], "_", j)) spos <- grepl(paste0("^", s, "_", ij), pars) sfnames <- paste0(snames[i], "_", j, "[", seq_len(sum(spos)), "]") lc(out) <- clist(spos, sfnames) new_prior_class <- paste0(sds, "_", smef$label[i], "_", j) c(out) <- change_prior( paste0(sds, "_", ij), pars, new_class = new_prior_class ) } } } out } # helps in renaming group-level parameters # @param ranef: data.frame returned by 'tidy_ranef' change_re <- function(ranef, pars) { out <- list() if (has_rows(ranef)) { for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] rnames <- get_rnames(r) sd_names <- paste0("sd_", g, "__", as.vector(rnames)) sd_pos <- grepl(paste0("^sd_", id, "(\\[|$)"), pars) lc(out) <- clist(sd_pos, sd_names) c(out) <- change_prior( paste0("sd_", id), pars, new_class = paste0("sd_", g), names = paste0("_", as.vector(rnames)) ) # rename group-level correlations if (nrow(r) > 1L && isTRUE(r$cor[1])) { type <- paste0("cor_", g) if (isTRUE(nzchar(r$by[1]))) { cor_names <- named_list(r$bylevels[[1]]) for (j in seq_along(cor_names)) { cor_names[[j]] <- get_cornames( rnames[, j], type, brackets = FALSE ) } cor_names <- unlist(cor_names) } else { cor_names <- get_cornames(rnames, type, brackets = FALSE) } cor_regex <- paste0("^cor_", id, "(_[[:digit:]]+)?(\\[|$)") cor_pos <- grepl(cor_regex, pars) lc(out) <- clist(cor_pos, cor_names) c(out) <- change_prior( paste0("cor_", id), pars, new_class = paste0("cor_", g) ) } } if (any(grepl("^r_", pars))) { c(out) <- change_re_levels(ranef, pars = pars) } tranef <- get_dist_groups(ranef, "student") for (i in seq_rows(tranef)) { df_pos <- grepl(paste0("^df_", tranef$ggn[i], "$"), pars) df_name <- paste0("df_", tranef$group[i]) lc(out) <- clist(df_pos, df_name) } } out } # helps in renaming varying effects parameters per level # @param ranef: data.frame returned by 'tidy_ranef' change_re_levels <- function(ranef, pars) { out <- list() for (i in seq_rows(ranef)) { r <- ranef[i, ] p <- usc(combine_prefix(r)) r_parnames <- paste0("r_", r$id, p, "_", r$cn) r_regex <- paste0("^", r_parnames, "(\\[|$)") r_new_parname <- paste0("r_", r$group, usc(p)) # rstan doesn't like whitespaces in parameter names levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[r$group]]) index_names <- make_index_names(levels, r$coef, dim = 2) fnames <- paste0(r_new_parname, index_names) lc(out) <- clist(grepl(r_regex, pars), fnames) } out } # rename parameters related to special priors change_special_prior_local <- function(bterms, coef, pars) { out <- list() p <- combine_prefix(bterms) # rename parameters related to the R2D2 prior pos_R2D2_phi <- grepl(paste0("^R2D2_phi", p), pars) if (any(pos_R2D2_phi)) { phi <- paste0("R2D2_phi", p) new_phi <- paste0(phi, "_", coef) lc(out) <- clist(pos_R2D2_phi, new_phi) c(out) <- change_prior(phi, pars, names = coef, is_vector = TRUE) } out } # helps in renaming prior parameters # @param class the class of the parameters # @param pars names of all parameters in the model # @param names names to replace digits at the end of parameter names # @param new_class optional replacement of the orginal class name # @param is_vector indicate if the prior parameter is a vector change_prior <- function(class, pars, names = NULL, new_class = class, is_vector = FALSE) { out <- list() regex <- paste0("^prior_", class, "(_[[:digit:]]+|$|\\[)") pos_priors <- which(grepl(regex, pars)) if (length(pos_priors)) { priors <- gsub( paste0("^prior_", class), paste0("prior_", new_class), pars[pos_priors] ) if (is_vector) { if (!is.null(names)) { .names <- paste0("_", names) for (i in seq_along(priors)) { priors[i] <- gsub("\\[[[:digit:]]+\\]$", .names[i], priors[i]) } } lc(out) <- clist(pos_priors, priors) } else { digits <- sapply(priors, function(prior) { d <- regmatches(prior, gregexpr("_[[:digit:]]+$", prior))[[1]] if (length(d)) as.numeric(substr(d, 2, nchar(d))) else 0 }) for (i in seq_along(priors)) { if (digits[i] && !is.null(names)) { priors[i] <- gsub("[[:digit:]]+$", names[digits[i]], priors[i]) } if (pars[pos_priors[i]] != priors[i]) { lc(out) <- clist(pos_priors[i], priors[i]) } } } } out } # helper for change_* functions clist <- function(pos, fnames, ...) { structure(nlist(pos, fnames, ...), class = c("clist", "list")) } is.clist <- function(x) { inherits(x, "clist") } # compute index names in square brackets for indexing stan parameters # @param rownames a vector of row names # @param colnames a vector of columns # @param dim the number of output dimensions # @return all index pairs of rows and cols make_index_names <- function(rownames, colnames = NULL, dim = 1) { if (!dim %in% c(1, 2)) stop("dim must be 1 or 2") if (dim == 1) { index_names <- paste0("[", rownames, "]") } else { tmp <- outer(rownames, colnames, FUN = paste, sep = ",") index_names <- paste0("[", tmp, "]") } index_names } # save original order of the parameters in the stanfit object save_old_par_order <- function(x) { x$fit@sim$pars_oi_old <- x$fit@sim$pars_oi x$fit@sim$dims_oi_old <- x$fit@sim$dims_oi x$fit@sim$fnames_oi_old <- x$fit@sim$fnames_oi x } # perform actual renaming of Stan parameters # @param x a brmsfit object # @param change a list of lists each element allowing # to rename certain parameters # @return a brmsfit object with updated parameter names do_renaming <- function(x, change) { .do_renaming <- function(x, change) { stopifnot(is.clist(change)) x$fit@sim$fnames_oi[change$pos] <- change$fnames for (i in seq_len(chains)) { names(x$fit@sim$samples[[i]])[change$pos] <- change$fnames if (!is.null(change$sort)) { x$fit@sim$samples[[i]][change$pos] <- x$fit@sim$samples[[i]][change$pos][change$sort] } } return(x) } chains <- length(x$fit@sim$samples) # temporary fix for issue #387 until fixed in rstan for (i in seq_len(chains)) { x$fit@sim$samples[[i]]$lp__.1 <- NULL } for (i in seq_along(change)) { x <- .do_renaming(x, change[[i]]) } x } # order parameter draws after parameter class # @param x brmsfit object reorder_pars <- function(x) { all_classes <- unique(c( "b", "bs", "bsp", "bcs", "ar", "ma", "lagsar", "errorsar", "car", "sdcar", "cosy", "sd", "cor", "df", "sds", "sdgp", "lscale", valid_dpars(x), "Intercept", "tmp", "rescor", "delta", "lasso", "simo", "r", "s", "zgp", "rcar", "sbhaz", "R2D2", "Ymi", "Yl", "meanme", "sdme", "corme", "Xme", "prior", "lp" )) # reorder parameter classes class <- get_matches("^[^_]+", x$fit@sim$pars_oi) new_order <- order( factor(class, levels = all_classes), !grepl("_Intercept(_[[:digit:]]+)?$", x$fit@sim$pars_oi) ) x$fit@sim$dims_oi <- x$fit@sim$dims_oi[new_order] x$fit@sim$pars_oi <- names(x$fit@sim$dims_oi) # reorder single parameter names nsubpars <- ulapply(x$fit@sim$dims_oi, prod) has_subpars <- nsubpars > 0 new_order <- new_order[has_subpars] nsubpars <- nsubpars[has_subpars] num <- lapply(seq_along(new_order), function(x) as.numeric(paste0(x, ".", sprintf("%010d", seq_len(nsubpars[x])))) ) new_order <- order(unlist(num[order(new_order)])) x$fit@sim$fnames_oi <- x$fit@sim$fnames_oi[new_order] chains <- length(x$fit@sim$samples) for (i in seq_len(chains)) { # attributes of samples must be kept x$fit@sim$samples[[i]] <- subset_keep_attr(x$fit@sim$samples[[i]], new_order) } x } # wrapper function to compute and store quantities in the stanfit # object which were not computed / stored by Stan itself # @param x a brmsfit object # @return a brmsfit object compute_quantities <- function(x) { stopifnot(is.brmsfit(x)) x <- compute_xi(x) x } # helper function to compute parameter xi, which is currently # defined in the Stan model block and thus not being stored # @param x a brmsfit object # @return a brmsfit object compute_xi <- function(x, ...) { UseMethod("compute_xi") } #' @export compute_xi.brmsfit <- function(x, ...) { if (!any(grepl("^tmp_xi(_|$)", variables(x)))) { return(x) } draws <- try(extract_draws(x)) if (is(draws, "try-error")) { warning2("Trying to compute 'xi' was unsuccessful. ", "Some S3 methods may not work as expected.") return(x) } compute_xi(draws, fit = x, ...) } #' @export compute_xi.mvbrmsprep <- function(x, fit, ...) { stopifnot(is.brmsfit(fit)) for (resp in names(x$resps)) { fit <- compute_xi(x$resps[[resp]], fit = fit, ...) } fit } #' @export compute_xi.brmsprep <- function(x, fit, ...) { stopifnot(is.brmsfit(fit)) resp <- usc(x$resp) tmp_xi_name <- paste0("tmp_xi", resp) if (!tmp_xi_name %in% variables(fit)) { return(fit) } mu <- get_dpar(x, "mu") sigma <- get_dpar(x, "sigma") y <- matrix(x$data$Y, dim(mu)[1], dim(mu)[2], byrow = TRUE) bs <- -1 / matrixStats::rowRanges((y - mu) / sigma) bs <- matrixStats::rowRanges(bs) tmp_xi <- as.vector(as.matrix(fit, pars = tmp_xi_name)) xi <- inv_logit(tmp_xi) * (bs[, 2] - bs[, 1]) + bs[, 1] # write xi into stanfit object xi_name <- paste0("xi", resp) samp_chain <- length(xi) / fit$fit@sim$chains for (i in seq_len(fit$fit@sim$chains)) { xi_part <- xi[((i - 1) * samp_chain + 1):(i * samp_chain)] # add warmup draws not used anyway xi_part <- c(rep(0, fit$fit@sim$warmup2[1]), xi_part) fit$fit@sim$samples[[i]][[xi_name]] <- xi_part } fit$fit@sim$pars_oi <- c(fit$fit@sim$pars_oi, xi_name) fit$fit@sim$dims_oi[[xi_name]] <- numeric(0) fit$fit@sim$fnames_oi <- c(fit$fit@sim$fnames_oi, xi_name) fit$fit@sim$n_flatnames <- fit$fit@sim$n_flatnames + 1 fit } brms/R/bridgesampling.R0000644000175000017500000002142514111751665014675 0ustar nileshnilesh#' Log Marginal Likelihood via Bridge Sampling #' #' Computes log marginal likelihood via bridge sampling, #' which can be used in the computation of bayes factors #' and posterior model probabilities. #' The \code{brmsfit} method is just a thin wrapper around #' the corresponding method for \code{stanfit} objects. #' #' @aliases bridge_sampler #' #' @param samples A \code{brmsfit} object. #' @param ... Additional arguments passed to #' \code{\link[bridgesampling:bridge_sampler]{bridge_sampler.stanfit}}. #' #' @details Computing the marginal likelihood requires samples of all variables #' defined in Stan's \code{parameters} block to be saved. Otherwise #' \code{bridge_sampler} cannot be computed. Thus, please set \code{save_pars #' = save_pars(all = TRUE)} in the call to \code{brm}, if you are planning to #' apply \code{bridge_sampler} to your models. #' #' The computation of marginal likelihoods based on bridge sampling requires #' a lot more posterior draws than usual. A good conservative #' rule of thump is perhaps 10-fold more draws (read: the default of 4000 #' draws may not be enough in many cases). If not enough posterior #' draws are provided, the bridge sampling algorithm tends to be #' unstable leading to considerably different results each time it is run. #' We thus recommend running \code{bridge_sampler} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:bridge_sampler]{bridgesampling::bridge_sampler}}. #' #' @seealso \code{ #' \link[brms:bayes_factor.brmsfit]{bayes_factor}, #' \link[brms:post_prob.brmsfit]{post_prob} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_pars = save_pars(all = TRUE) #' ) #' summary(fit1) #' bridge_sampler(fit1) #' #' # model without the treatment effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_pars = save_pars(all = TRUE) #' ) #' summary(fit2) #' bridge_sampler(fit2) #' } #' #' @method bridge_sampler brmsfit #' @importFrom bridgesampling bridge_sampler #' @export bridge_sampler #' @export bridge_sampler.brmsfit <- function(samples, ...) { out <- get_criterion(samples, "marglik") if (inherits(out, "bridge") && !is.na(out$logml)) { # return precomputed criterion return(out) } samples <- restructure(samples) if (samples$version$brms <= "1.8.0") { stop2( "Models fitted with brms 1.8.0 or lower are not ", "usable in method 'bridge_sampler'." ) } if (!is_normalized(samples$model)) { stop2( "The Stan model has to be normalized to be ", "usable in method 'bridge_sampler'." ) } # otherwise bridge_sampler might not work in a new R session samples <- update_misc_env(samples) out <- try(bridge_sampler(samples$fit, ...)) if (is(out, "try-error")) { stop2( "Bridgesampling failed. Perhaps you did not set ", "'save_pars = save_pars(all = TRUE)' when fitting your model?" ) } out } #' Bayes Factors from Marginal Likelihoods #' #' Compute Bayes factors from marginal likelihoods. #' #' @aliases bayes_factor #' #' @param x1 A \code{brmsfit} object #' @param x2 Another \code{brmsfit} object based on the same responses. #' @param log Report Bayes factors on the log-scale? #' @param ... Additional arguments passed to #' \code{\link[brms:bridge_sampler.brmsfit]{bridge_sampler}}. #' #' @details Computing the marginal likelihood requires samples #' of all variables defined in Stan's \code{parameters} block #' to be saved. Otherwise \code{bayes_factor} cannot be computed. #' Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, #' if you are planning to apply \code{bayes_factor} to your models. #' #' The computation of Bayes factors based on bridge sampling requires #' a lot more posterior samples than usual. A good conservative #' rule of thumb is perhaps 10-fold more samples (read: the default of 4000 #' samples may not be enough in many cases). If not enough posterior #' samples are provided, the bridge sampling algorithm tends to be unstable, #' leading to considerably different results each time it is run. #' We thus recommend running \code{bayes_factor} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:bf]{bridgesampling::bayes_factor}}. #' #' @seealso \code{ #' \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, #' \link[brms:post_prob.brmsfit]{post_prob} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit1) #' #' # model without the treatment effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit2) #' #' # compute the bayes factor #' bayes_factor(fit1, fit2) #' } #' #' @method bayes_factor brmsfit #' @importFrom bridgesampling bayes_factor #' @export bayes_factor #' @export bayes_factor.brmsfit <- function(x1, x2, log = FALSE, ...) { model_name_1 <- deparse_combine(substitute(x1)) model_name_2 <- deparse_combine(substitute(x2)) match_response(list(x1, x2)) bridge1 <- bridge_sampler(x1, ...) bridge2 <- bridge_sampler(x2, ...) out <- bayes_factor(bridge1, bridge2, log = log) attr(out, "model_names") <- c(model_name_1, model_name_2) out } #' Posterior Model Probabilities from Marginal Likelihoods #' #' Compute posterior model probabilities from marginal likelihoods. #' The \code{brmsfit} method is just a thin wrapper around #' the corresponding method for \code{bridge} objects. #' #' @aliases post_prob #' #' @inheritParams loo.brmsfit #' @param prior_prob Numeric vector with prior model probabilities. #' If omitted, a uniform prior is used (i.e., all models are equally #' likely a priori). The default \code{NULL} corresponds to equal #' prior model weights. #' #' @details Computing the marginal likelihood requires samples #' of all variables defined in Stan's \code{parameters} block #' to be saved. Otherwise \code{post_prob} cannot be computed. #' Thus, please set \code{save_all_pars = TRUE} in the call to \code{brm}, #' if you are planning to apply \code{post_prob} to your models. #' #' The computation of model probabilities based on bridge sampling requires #' a lot more posterior samples than usual. A good conservative #' rule of thump is perhaps 10-fold more samples (read: the default of 4000 #' samples may not be enough in many cases). If not enough posterior #' samples are provided, the bridge sampling algorithm tends to be #' unstable leading to considerably different results each time it is run. #' We thus recommend running \code{post_prob} #' multiple times to check the stability of the results. #' #' More details are provided under #' \code{\link[bridgesampling:post_prob]{bridgesampling::post_prob}}. #' #' @seealso \code{ #' \link[brms:bridge_sampler.brmsfit]{bridge_sampler}, #' \link[brms:bayes_factor.brmsfit]{bayes_factor} #' } #' #' @examples #' \dontrun{ #' # model with the treatment effect #' fit1 <- brm( #' count ~ zAge + zBase + Trt, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit1) #' #' # model without the treatent effect #' fit2 <- brm( #' count ~ zAge + zBase, #' data = epilepsy, family = negbinomial(), #' prior = prior(normal(0, 1), class = b), #' save_all_pars = TRUE #' ) #' summary(fit2) #' #' # compute the posterior model probabilities #' post_prob(fit1, fit2) #' #' # specify prior model probabilities #' post_prob(fit1, fit2, prior_prob = c(0.8, 0.2)) #' } #' #' @method post_prob brmsfit #' @importFrom bridgesampling post_prob #' @export post_prob #' @export post_prob.brmsfit <- function(x, ..., prior_prob = NULL, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL bs <- vector("list", length(models)) for (i in seq_along(models)) { bs[[i]] <- do_call(bridge_sampler, c(list(models[[i]]), args)) } model_names <- names(models) do_call(post_prob, c(bs, nlist(prior_prob, model_names))) } brms/R/datasets.R0000644000175000017500000001607514105230573013514 0ustar nileshnilesh#' Infections in kidney patients #' #' @description This dataset, originally discussed in #' McGilchrist and Aisbett (1991), describes the first and second #' (possibly right censored) recurrence time of #' infection in kidney patients using portable dialysis equipment. #' In addition, information on the risk variables age, sex and disease #' type is provided. #' #' @format A data frame of 76 observations containing #' information on the following 7 variables. #' \describe{ #' \item{time}{The time to first or second recurrence of the infection, #' or the time of censoring} #' \item{recur}{A factor of levels \code{1} or \code{2} #' indicating if the infection recurred for the first #' or second time for this patient} #' \item{censored}{Either \code{0} or \code{1}, where \code{0} indicates #' no censoring of recurrence time and \code{1} indicates right censoring} #' \item{patient}{The patient number} #' \item{age}{The age of the patient} #' \item{sex}{The sex of the patient} #' \item{disease}{A factor of levels \code{other, GN, AN}, #' and \code{PKD} specifying the type of disease} #' } #' #' @source McGilchrist, C. A., & Aisbett, C. W. (1991). #' Regression with frailty in survival analysis. #' \emph{Biometrics}, 47(2), 461-466. #' #' @examples #' \dontrun{ #' ## performing surivival analysis using the "weibull" family #' fit1 <- brm(time | cens(censored) ~ age + sex + disease, #' data = kidney, family = weibull, inits = "0") #' summary(fit1) #' plot(fit1) #' #' ## adding random intercepts over patients #' fit2 <- brm(time | cens(censored) ~ age + sex + disease + (1|patient), #' data = kidney, family = weibull(), inits = "0", #' prior = set_prior("cauchy(0,2)", class = "sd")) #' summary(fit2) #' plot(fit2) #' } #' "kidney" #' Clarity of inhaler instructions #' #' @description Ezzet and Whitehead (1991) analyze data from a two-treatment, #' two-period crossover trial to compare 2 inhalation devices for #' delivering the drug salbutamol in 286 asthma patients. #' Patients were asked to rate the clarity of leaflet instructions #' accompanying each device, using a 4-point ordinal scale. #' #' @format A data frame of 572 observations containing #' information on the following 5 variables. #' \describe{ #' \item{subject}{The subject number} #' \item{rating}{The rating of the inhaler instructions #' on a scale ranging from 1 to 4} #' \item{treat}{A contrast to indicate which of #' the two inhaler devices was used} #' \item{period}{A contrast to indicate the time of administration} #' \item{carry}{A contrast to indicate possible carry over effects} #' } #' #' @source Ezzet, F., & Whitehead, J. (1991). #' A random effects model for ordinal responses from a crossover trial. #' \emph{Statistics in Medicine}, 10(6), 901-907. #' #' @examples #' \dontrun{ #' ## ordinal regression with family "sratio" #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler, family = sratio(), #' prior = set_prior("normal(0,5)")) #' summary(fit1) #' plot(fit1) #' #' ## ordinal regression with family "cumulative" #' ## and random intercept over subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative(), #' prior = set_prior("normal(0,5)")) #' summary(fit2) #' plot(fit2) #' } #' "inhaler" #' Epileptic seizure counts #' #' @description Breslow and Clayton (1993) analyze data initially #' provided by Thall and Vail (1990) concerning #' seizure counts in a randomized trial of anti-convulsant #' therapy in epilepsy. Covariates are treatment, #' 8-week baseline seizure counts, and age of the patients in years. #' #' @format A data frame of 236 observations containing information #' on the following 9 variables. #' \describe{ #' \item{Age}{The age of the patients in years} #' \item{Base}{The seizure count at 8-weeks baseline} #' \item{Trt}{Either \code{0} or \code{1} indicating #' if the patient received anti-convulsant therapy} #' \item{patient}{The patient number} #' \item{visit}{The session number from \code{1} (first visit) #' to \code{4} (last visit)} #' \item{count}{The seizure count between two visits} #' \item{obs}{The observation number, that is #' a unique identifier for each observation} #' \item{zAge}{Standardized \code{Age}} #' \item{zBase}{Standardized \code{Base}} #' } #' #' @source Thall, P. F., & Vail, S. C. (1990). #' Some covariance models for longitudinal count data with overdispersion. #' \emph{Biometrics, 46(2)}, 657-671. \cr #' #' Breslow, N. E., & Clayton, D. G. (1993). #' Approximate inference in generalized linear mixed models. #' \emph{Journal of the American Statistical Association}, 88(421), 9-25. #' #' @examples #' \dontrun{ #' ## poisson regression without random effects. #' fit1 <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit1) #' plot(fit1) #' #' ## poisson regression with varying intercepts of patients #' ## as well as normal priors for overall effects parameters. #' fit2 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' prior = set_prior("normal(0,5)")) #' summary(fit2) #' plot(fit2) #' } #' "epilepsy" #' Cumulative Insurance Loss Payments #' #' @description This dataset, discussed in Gesmann & Morris (2020), contains #' cumulative insurance loss payments over the course of ten years. #' #' @format A data frame of 55 observations containing information #' on the following 4 variables. #' \describe{ #' \item{AY}{Origin year of the insurance (1991 to 2000)} #' \item{dev}{Deviation from the origin year in months} #' \item{cum}{Cumulative loss payments} #' \item{premium}{Achieved premiums for the given origin year} #' } #' #' @source Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving #' Models. \emph{CAS Research Papers}. #' #' @examples #' \dontrun{ #' # non-linear model to predict cumulative loss payments #' fit_loss <- brm( #' bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), #' ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, #' nl = TRUE), #' data = loss, family = gaussian(), #' prior = c( #' prior(normal(5000, 1000), nlpar = "ult"), #' prior(normal(1, 2), nlpar = "omega"), #' prior(normal(45, 10), nlpar = "theta") #' ), #' control = list(adapt_delta = 0.9) #' ) #' #' # basic summaries #' summary(fit_loss) #' conditional_effects(fit_loss) #' #' # plot predictions per origin year #' conditions <- data.frame(AY = unique(loss$AY)) #' rownames(conditions) <- unique(loss$AY) #' me_loss <- conditional_effects( #' fit_loss, conditions = conditions, #' re_formula = NULL, method = "predict" #' ) #' plot(me_loss, ncol = 5, points = TRUE) #' } #' "loss" brms/R/update.R0000644000175000017500000002601714136562240013166 0ustar nileshnilesh#' Update \pkg{brms} models #' #' This method allows to update an existing \code{brmsfit} object. #' #' @param object An object of class \code{brmsfit}. #' @param formula. Changes to the formula; for details see #' \code{\link{update.formula}} and \code{\link{brmsformula}}. #' @param newdata Optional \code{data.frame} to update the model with new data. #' Data-dependent default priors will not be updated automatically. #' @param recompile Logical, indicating whether the Stan model should #' be recompiled. If \code{NULL} (the default), \code{update} tries #' to figure out internally, if recompilation is necessary. #' Setting it to \code{FALSE} will cause all Stan code changing #' arguments to be ignored. #' @param ... Other arguments passed to \code{\link{brm}}. #' #' @examples #' \dontrun{ #' fit1 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = gaussian("log")) #' summary(fit1) #' #' ## remove effects of 'disease' #' fit2 <- update(fit1, formula. = ~ . - disease) #' summary(fit2) #' #' ## remove the group specific term of 'patient' and #' ## change the data (just take a subset in this example) #' fit3 <- update(fit1, formula. = ~ . - (1|patient), #' newdata = kidney[1:38, ]) #' summary(fit3) #' #' ## use another family and add population-level priors #' fit4 <- update(fit1, family = weibull(), inits = "0", #' prior = set_prior("normal(0,5)")) #' summary(fit4) #' } #' #' @export update.brmsfit <- function(object, formula., newdata = NULL, recompile = NULL, ...) { dots <- list(...) testmode <- isTRUE(dots[["testmode"]]) dots$testmode <- NULL silent <- dots[["silent"]] if (!is.null(silent)) { silent <- validate_silent(silent) } else { silent <- 1L } object <- restructure(object) if (isTRUE(object$version$brms < "2.0.0")) { warning2("Updating models fitted with older versions of brms may fail.") } object$file <- NULL if ("data" %in% names(dots)) { # otherwise the data name cannot be found by substitute stop2("Please use argument 'newdata' to update the data.") } if (!is.null(newdata)) { dots$data <- newdata data_name <- substitute_name(newdata) } else { dots$data <- object$data data_name <- get_data_name(object$data) } if (missing(formula.) || is.null(formula.)) { dots$formula <- object$formula if (!is.null(dots[["family"]])) { dots$formula <- bf(dots$formula, family = dots$family) } if (!is.null(dots[["autocor"]])) { dots$formula <- bf(dots$formula, autocor = dots$autocor) } } else { # TODO: restructure updating of the model formula if (is.mvbrmsformula(formula.) || is.mvbrmsformula(object$formula)) { stop2("Updating formulas of multivariate models is not yet possible.") } if (is.brmsformula(formula.)) { nl <- get_nl(formula.) } else { formula. <- as.formula(formula.) nl <- get_nl(formula(object)) } family <- get_arg("family", formula., dots, object) autocor <- get_arg("autocor", formula., dots, object) dots$formula <- bf(formula., family = family, autocor = autocor, nl = nl) if (is_nonlinear(object)) { if (length(setdiff(all.vars(dots$formula$formula), ".")) == 0L) { dots$formula <- update(object$formula, dots$formula, mode = "keep") } else { dots$formula <- update(object$formula, dots$formula, mode = "replace") if (silent < 2) { message("Argument 'formula.' will completely replace the ", "original formula in non-linear models.") } } } else { mvars <- all.vars(dots$formula$formula) mvars <- setdiff(mvars, c(names(object$data), ".")) if (length(mvars) && is.null(newdata)) { stop2("New variables found: ", collapse_comma(mvars), "\nPlease supply your data again via argument 'newdata'.") } dots$formula <- update(formula(object), dots$formula) } } # update response categories and ordinal thresholds dots$formula <- validate_formula(dots$formula, data = dots$data) if (is.null(dots$prior)) { dots$prior <- object$prior } else { if (!is.brmsprior(dots$prior)) { stop2("Argument 'prior' needs to be a 'brmsprior' object.") } # update existing priors manually dots$prior <- rbind(dots$prior, object$prior) dupl_priors <- duplicated(dots$prior[, rcols_prior()]) dots$prior <- dots$prior[!dupl_priors, ] } # make sure potentially updated priors pass 'validate_prior' attr(dots$prior, "allow_invalid_prior") <- TRUE if (!"sample_prior" %in% names(dots)) { dots$sample_prior <- attr(object$prior, "sample_prior") if (is.null(dots$sample_prior)) { has_prior_pars <- any(grepl("^prior_", variables(object))) dots$sample_prior <- if (has_prior_pars) "yes" else "no" } } # do not use 'is.null' to allow updating arguments to NULL if (!"data2" %in% names(dots)) { dots$data2 <- object$data2 } if (!"stanvars" %in% names(dots)) { dots$stanvars <- object$stanvars } if (!"algorithm" %in% names(dots)) { dots$algorithm <- object$algorithm } if (!"backend" %in% names(dots)) { dots$backend <- object$backend } if (!"threads" %in% names(dots)) { dots$threads <- object$threads } if (!"save_pars" %in% names(dots)) { dots$save_pars <- object$save_pars } if (!"knots" %in% names(dots)) { dots$knots <- attr(object$data, "knots") } if (!"normalize" %in% names(dots)) { dots$normalize <- is_normalized(object$model) } # update arguments controlling the sampling process if (is.null(dots$iter)) { # only keep old 'warmup' if also keeping old 'iter' dots$warmup <- first_not_null(dots$warmup, object$fit@sim$warmup) } dots$iter <- first_not_null(dots$iter, object$fit@sim$iter) dots$chains <- first_not_null(dots$chains, object$fit@sim$chains) dots$thin <- first_not_null(dots$thin, object$fit@sim$thin) control <- attr(object$fit@sim$samples[[1]], "args")$control control <- control[setdiff(names(control), names(dots$control))] dots$control[names(control)] <- control if (is.null(recompile)) { dots$backend <- match.arg(dots$backend, backend_choices()) # only recompile if new and old stan code do not match new_stancode <- suppressMessages(do_call(make_stancode, dots)) # stan code may differ just because of the version number (#288) new_stancode <- sub("^[^\n]+\n", "", new_stancode) old_stancode <- stancode(object, version = FALSE) recompile <- needs_recompilation(object) || !is_equal(new_stancode, old_stancode) || !is_equal(dots$backend, object$backend) if (recompile && silent < 2) { message("The desired updates require recompiling the model") } } recompile <- as_one_logical(recompile) if (recompile) { # recompliation is necessary dots$fit <- NA if (!testmode) { object <- do_call(brm, dots) } } else { # refit the model without compiling it again if (!is.null(dots$formula)) { object$formula <- dots$formula dots$formula <- NULL } bterms <- brmsterms(object$formula) object$data <- validate_data(dots$data, bterms = bterms) object$data2 <- validate_data2(dots$data2, bterms = bterms) object$family <- get_element(object$formula, "family") object$autocor <- get_element(object$formula, "autocor") object$ranef <- tidy_ranef(bterms, data = object$data) object$stanvars <- validate_stanvars(dots$stanvars) object$threads <- validate_threads(dots$threads) if ("sample_prior" %in% names(dots)) { dots$sample_prior <- validate_sample_prior(dots$sample_prior) attr(object$prior, "sample_prior") <- dots$sample_prior } object$save_pars <- validate_save_pars( save_pars = dots$save_pars, save_ranef = dots$save_ranef, save_mevars = dots$save_mevars, save_all_pars = dots$save_all_pars ) algorithm <- match.arg(dots$algorithm, algorithm_choices()) dots$algorithm <- object$algorithm <- algorithm # can only avoid recompilation when using the old backend dots$backend <- object$backend if (!testmode) { dots$fit <- object object <- do_call(brm, dots) } } attr(object$data, "data_name") <- data_name object } #' Update \pkg{brms} models based on multiple data sets #' #' This method allows to update an existing \code{brmsfit_multiple} object. #' #' @param object An object of class \code{brmsfit_multiple}. #' @param formula. Changes to the formula; for details see #' \code{\link{update.formula}} and \code{\link{brmsformula}}. #' @param newdata List of \code{data.frames} to update the model with new data. #' Currently required even if the original data should be used. #' @param ... Other arguments passed to \code{\link{update.brmsfit}} #' and \code{\link{brm_multiple}}. #' #' @examples #' \dontrun{ #' library(mice) #' imp <- mice(nhanes2) #' #' # initially fit the model #' fit_imp1 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) #' summary(fit_imp1) #' #' # update the model using fewer predictors #' fit_imp2 <- update(fit_imp1, formula. = . ~ hyp + chl, newdata = imp) #' summary(fit_imp2) #' } #' #' @export update.brmsfit_multiple <- function(object, formula., newdata = NULL, ...) { dots <- list(...) if ("data" %in% names(dots)) { # otherwise the data name cannot be found by substitute stop2("Please use argument 'newdata' to update the data.") } if (is.null(newdata)) { stop2("'newdata' is required when updating a 'brmsfit_multiple' object.") } data_name <- substitute_name(newdata) if (inherits(newdata, "mids")) { require_package("mice", version = "3.0.0") newdata <- lapply(seq_len(newdata$m), mice::complete, data = newdata) } else if (!(is.list(newdata) && is.vector(newdata))) { stop2("'newdata' must be a list of data.frames.") } # update the template model using all arguments if (missing(formula.)) { formula. <- NULL } args <- c(nlist(object, formula., newdata = newdata[[1]]), dots) args$file <- NULL args$chains <- 0 fit <- do_call(update.brmsfit, args) # arguments later passed to brm_multiple args <- c(nlist(fit, data = newdata), dots) # update arguments controlling the sampling process # they cannot be accessed directly from the template model # as it does not contain any draws (chains = 0) if (is.null(args$iter)) { # only keep old 'warmup' if also keeping old 'iter' args$warmup <- first_not_null(args$warmup, object$fit@sim$warmup) } if (is.null(args$chains)) { # chains were combined across all submodels args$chains <- object$fit@sim$chains / max(NROW(object$rhats), 1) } args$iter <- first_not_null(args$iter, object$fit@sim$iter) args$thin <- first_not_null(args$thin, object$fit@sim$thin) control <- attr(object$fit@sim$samples[[1]], "args")$control control <- control[setdiff(names(control), names(args$control))] args$control[names(control)] <- control args$recompile <- NULL out <- do_call(brm_multiple, args) attr(out$data, "data_name") <- data_name out } brms/R/posterior_predict.R0000644000175000017500000011001514111751666015441 0ustar nileshnilesh#' Draws from the Posterior Predictive Distribution #' #' Compute posterior draws of the posterior predictive distribution. Can be #' performed for the data used to fit the model (posterior predictive checks) or #' for new data. By definition, these draws have higher variance than draws #' of the means of the posterior predictive distribution computed by #' \code{\link{posterior_epred.brmsfit}}. This is because the residual error #' is incorporated in \code{posterior_predict}. However, the estimated means of #' both methods averaged across draws should be very similar. #' #' @inheritParams prepare_predictions #' @param object An object of class \code{brmsfit}. #' @param re.form Alias of \code{re_formula}. #' @param transform (Deprecated) A function or a character string naming #' a function to be applied on the predicted responses #' before summary statistics are computed. #' @param negative_rt Only relevant for Wiener diffusion models. #' A flag indicating whether response times of responses #' on the lower boundary should be returned as negative values. #' This allows to distinguish responses on the upper and #' lower boundary. Defaults to \code{FALSE}. #' @param sort Logical. Only relevant for time series models. #' Indicating whether to return predicted values in the original #' order (\code{FALSE}; default) or in the order of the #' time series (\code{TRUE}). #' @param ntrys Parameter used in rejection sampling #' for truncated discrete models only #' (defaults to \code{5}). See Details for more information. #' @param cores Number of cores (defaults to \code{1}). On non-Windows systems, #' this argument can be set globally via the \code{mc.cores} option. #' @param ... Further arguments passed to \code{\link{prepare_predictions}} #' that control several aspects of data validation and prediction. #' #' @return An \code{array} of predicted response values. In univariate models, #' the output is as an S x N matrix, where S is the number of posterior #' draws and N is the number of observations. In multivariate models, an #' additional dimension is added to the output which indexes along the #' different response variables. #' #' @template details-newdata-na #' @template details-allow_new_levels #' @details For truncated discrete models only: In the absence of any general #' algorithm to sample from truncated discrete distributions, rejection #' sampling is applied in this special case. This means that values are #' sampled until a value lies within the defined truncation boundaries. In #' practice, this procedure may be rather slow (especially in \R). Thus, we #' try to do approximate rejection sampling by sampling each value #' \code{ntrys} times and then select a valid value. If all values are #' invalid, the closest boundary is used, instead. If there are more than a #' few of these pathological cases, a warning will occur suggesting to #' increase argument \code{ntrys}. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), #' data = kidney, family = "exponential", inits = "0") #' #' ## predicted responses #' pp <- posterior_predict(fit) #' str(pp) #' #' ## predicted responses excluding the group-level effect of age #' pp <- posterior_predict(fit, re_formula = ~ (1 | patient)) #' str(pp) #' #' ## predicted responses of patient 1 for new data #' newdata <- data.frame( #' sex = factor(c("male", "female")), #' age = c(20, 50), #' patient = c(1, 1) #' ) #' pp <- posterior_predict(fit, newdata = newdata) #' str(pp) #' } #' #' @aliases posterior_predict #' @method posterior_predict brmsfit #' @importFrom rstantools posterior_predict #' @export #' @export posterior_predict posterior_predict.brmsfit <- function( object, newdata = NULL, re_formula = NULL, re.form = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, ... ) { cl <- match.call() if ("re.form" %in% names(cl)) { re_formula <- re.form } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_predict( prep, transform = transform, sort = sort, ntrys = ntrys, negative_rt = negative_rt, cores = cores, summary = FALSE ) } #' @export posterior_predict.mvbrmsprep <- function(object, ...) { if (length(object$mvpars$rescor)) { object$mvpars$Mu <- get_Mu(object) object$mvpars$Sigma <- get_Sigma(object) out <- posterior_predict.brmsprep(object, ...) } else { out <- lapply(object$resps, posterior_predict, ...) along <- ifelse(length(out) > 1L, 3, 2) out <- do_call(abind, c(out, along = along)) } out } #' @export posterior_predict.brmsprep <- function(object, transform = NULL, sort = FALSE, summary = FALSE, robust = FALSE, probs = c(0.025, 0.975), cores = NULL, ...) { summary <- as_one_logical(summary) cores <- validate_cores_post_processing(cores) if (is.customfamily(object$family)) { # ensure that the method can be found during parallel execution object$family$posterior_predict <- custom_family_method(object$family, "posterior_predict") } for (nlp in names(object$nlpars)) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in names(object$dpars)) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } pp_fun <- paste0("posterior_predict_", object$family$fun) pp_fun <- get(pp_fun, asNamespace("brms")) N <- choose_N(object) out <- plapply(seq_len(N), pp_fun, cores = cores, prep = object, ...) if (grepl("_mv$", object$family$fun)) { out <- do_call(abind, c(out, along = 3)) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- names(object$resps) } else if (has_multicol(object$family)) { out <- do_call(abind, c(out, along = 3)) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- object$cats } else { out <- do_call(cbind, out) } colnames(out) <- rownames(out) <- NULL if (use_int(object$family)) { out <- check_discrete_trunc_bounds( out, lb = object$data$lb, ub = object$data$ub ) } out <- reorder_obs(out, object$old_order, sort = sort) # transform predicted response draws before summarizing them if (!is.null(transform)) { # deprecated as of brms 2.12.3 warning2("Argument 'transform' is deprecated ", "and will be removed in the future.") out <- do_call(transform, list(out)) } attr(out, "levels") <- object$cats if (summary) { # only for compatibility with the 'predict' method if (is_ordinal(object$family)) { levels <- seq_len(max(object$data$nthres) + 1) out <- posterior_table(out, levels = levels) } else if (is_categorical(object$family)) { levels <- seq_len(object$data$ncat) out <- posterior_table(out, levels = levels) } else { out <- posterior_summary(out, probs = probs, robust = robust) } } out } #' Draws from the Posterior Predictive Distribution #' #' This method is an alias of \code{\link{posterior_predict.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams posterior_predict.brmsfit #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predicted response values. #' If \code{summary = FALSE} the output resembles those of #' \code{\link{posterior_predict.brmsfit}}. #' #' If \code{summary = TRUE} the output depends on the family: For categorical #' and ordinal families, the output is an N x C matrix, where N is the number #' of observations, C is the number of categories, and the values are #' predicted category probabilities. For all other families, the output is a N #' x E matrix where E = \code{2 + length(probs)} is the number of summary #' statistics: The \code{Estimate} column contains point estimates (either #' mean or median depending on argument \code{robust}), while the #' \code{Est.Error} column contains uncertainty estimates (either standard #' deviation or median absolute deviation depending on argument #' \code{robust}). The remaining columns starting with \code{Q} contain #' quantile estimates as specified via argument \code{probs}. #' #' @seealso \code{\link{posterior_predict.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(time | cens(censored) ~ age + sex + (1 + age || patient), #' data = kidney, family = "exponential", inits = "0") #' #' ## predicted responses #' pp <- predict(fit) #' head(pp) #' #' ## predicted responses excluding the group-level effect of age #' pp <- predict(fit, re_formula = ~ (1 | patient)) #' head(pp) #' #' ## predicted responses of patient 1 for new data #' newdata <- data.frame( #' sex = factor(c("male", "female")), #' age = c(20, 50), #' patient = c(1, 1) #' ) #' predict(fit, newdata = newdata) #' } #' #' @export predict.brmsfit <- function(object, newdata = NULL, re_formula = NULL, transform = NULL, resp = NULL, negative_rt = FALSE, ndraws = NULL, draw_ids = NULL, sort = FALSE, ntrys = 5, cores = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_predict( prep, transform = transform, ntrys = ntrys, negative_rt = negative_rt, sort = sort, cores = cores, summary = summary, robust = robust, probs = probs ) } #' Predictive Intervals #' #' Compute intervals from the posterior predictive distribution. #' #' @aliases predictive_interval #' #' @param object An \R object of class \code{brmsfit}. #' @param prob A number p (0 < p < 1) indicating the desired probability mass to #' include in the intervals. Defaults to \code{0.9}. #' @param ... Further arguments passed to \code{\link{posterior_predict}}. #' #' @return A matrix with 2 columns for the lower and upper bounds of the #' intervals, respectively, and as many rows as observations being predicted. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zBase, data = epilepsy, family = poisson()) #' predictive_interval(fit) #' } #' #' @importFrom rstantools predictive_interval #' @export predictive_interval #' @export predictive_interval.brmsfit <- function(object, prob = 0.9, ...) { out <- posterior_predict(object, ...) predictive_interval(out, prob = prob) } # validate method name to obtain posterior predictions # @param method name of the method # @return validated name of the method validate_pp_method <- function(method) { method <- as_one_character(method) if (method %in% c("posterior_predict", "predict", "pp")) { method <- "posterior_predict" } else if (method %in% c("posterior_epred", "fitted", "pp_expect")) { method <- "posterior_epred" } else if (method %in% c("posterior_linpred")) { method <- "posterior_linpred" } else if (method %in% c("predictive_error", "residuals")) { method <- "predictive_error" } else { stop2("Posterior predictive method '", method, "' it not supported.") } method } # ------------------- family specific posterior_predict methods --------------------- # All posterior_predict_ functions have the same arguments structure # @param i index of the observatio for which to compute pp values # @param prep A named list returned by prepare_predictions containing # all required data and posterior draws # @param ... ignored arguments # @param A vector of length prep$ndraws containing draws # from the posterior predictive distribution posterior_predict_gaussian <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) rcontinuous( n = prep$ndraws, dist = "norm", mean = mu, sd = sigma, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_student <- function(i, prep, ntrys = 5, ...) { nu <- get_dpar(prep, "nu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) rcontinuous( n = prep$ndraws, dist = "student_t", df = nu, mu = mu, sigma = sigma, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_lognormal <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "lnorm", meanlog = get_dpar(prep, "mu", i = i), sdlog = get_dpar(prep, "sigma", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_shifted_lognormal <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "shifted_lnorm", meanlog = get_dpar(prep, "mu", i = i), sdlog = get_dpar(prep, "sigma", i = i), shift = get_dpar(prep, "ndt", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_skew_normal <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) alpha <- get_dpar(prep, "alpha", i = i) rcontinuous( n = prep$ndraws, dist = "skew_normal", mu = mu, sigma = sigma, alpha = alpha, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gaussian_mv <- function(i, prep, ...) { Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) .predict <- function(s) { rmulti_normal(1, mu = Mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_mv <- function(i, prep, ...) { nu <- get_dpar(prep, "nu", i = i) Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) .predict <- function(s) { rmulti_student_t(1, df = nu[s], mu = Mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_time <- function(i, prep, ...) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs) .predict <- function(s) { rmulti_normal(1, mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_time <- function(i, prep, ...) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) nu <- as.matrix(get_dpar(prep, "nu", i = obs)) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs) .predict <- function(s) { rmulti_student_t(1, df = nu[s, ], mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_lagsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$lagsar[s] * ac$Msar) mu <- as.numeric(solve(M_new) %*% mu[s, ]) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_normal(1, mu = mu, Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_lagsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$lagsar[s] * ac$Msar) mu <- as.numeric(solve(M_new) %*% mu[s, ]) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_student_t(1, df = nu[s], mu = mu, Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") nu <- get_dpar(prep, "nu") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_errorsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$errorsar[s] * ac$Msar) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_normal(1, mu = mu[s, ], Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_errorsar <- function(i, prep, ...) { stopifnot(i == 1) .predict <- function(s) { M_new <- with(prep, diag(nobs) - ac$errorsar[s] * ac$Msar) Sigma <- solve(crossprod(M_new)) * sigma[s]^2 rmulti_student_t(1, df = nu[s], mu = mu[s, ], Sigma = Sigma) } mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") nu <- get_dpar(prep, "nu") rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_gaussian_fcor <- function(i, prep, ...) { stopifnot(i == 1) mu <- as.matrix(get_dpar(prep, "mu")) Sigma <- get_cov_matrix_ac(prep) .predict <- function(s) { rmulti_normal(1, mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_student_fcor <- function(i, prep, ...) { stopifnot(i == 1) nu <- as.matrix(get_dpar(prep, "nu")) mu <- as.matrix(get_dpar(prep, "mu")) Sigma <- get_cov_matrix_ac(prep) .predict <- function(s) { rmulti_student_t(1, df = nu[s, ], mu = mu[s, ], Sigma = Sigma[s, , ]) } rblapply(seq_len(prep$ndraws), .predict) } posterior_predict_binomial <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "binom", size = prep$data$trials[i], prob = get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_bernoulli <- function(i, prep, ...) { mu <- get_dpar(prep, "mu", i = i) rbinom(length(mu), size = 1, prob = mu) } posterior_predict_poisson <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) rdiscrete( n = prep$ndraws, dist = "pois", lambda = mu, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_negbinomial <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- get_dpar(prep, "shape", i) shape <- multiply_dpar_rate_denom(shape, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_negbinomial2 <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) sigma <- get_dpar(prep, "sigma", i) shape <- multiply_dpar_rate_denom(1 / sigma, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_geometric <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- 1 shape <- multiply_dpar_rate_denom(shape, prep, i = i) rdiscrete( n = prep$ndraws, dist = "nbinom", mu = mu, size = shape, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_discrete_weibull <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "discrete_weibull", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_com_poisson <- function(i, prep, ntrys = 5, ...) { rdiscrete( n = prep$ndraws, dist = "com_poisson", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_exponential <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "exp", rate = 1 / get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gamma <- function(i, prep, ntrys = 5, ...) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / shape rcontinuous( n = prep$ndraws, dist = "gamma", shape = shape, scale = scale, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_weibull <- function(i, prep, ntrys = 5, ...) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 + 1 / shape) rcontinuous( n = prep$ndraws, dist = "weibull", shape = shape, scale = scale, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_frechet <- function(i, prep, ntrys = 5, ...) { nu <- get_dpar(prep, "nu", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 - 1 / nu) rcontinuous( n = prep$ndraws, dist = "frechet", scale = scale, shape = nu, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_gen_extreme_value <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "gen_extreme_value", sigma = get_dpar(prep, "sigma", i = i), xi = get_dpar(prep, "xi", i = i), mu = get_dpar(prep, "mu", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_inverse.gaussian <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "inv_gaussian", mu = get_dpar(prep, "mu", i = i), shape = get_dpar(prep, "shape", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_exgaussian <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "exgaussian", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), beta = get_dpar(prep, "beta", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_wiener <- function(i, prep, negative_rt = FALSE, ntrys = 5, ...) { out <- rcontinuous( n = 1, dist = "wiener", delta = get_dpar(prep, "mu", i = i), alpha = get_dpar(prep, "bs", i = i), tau = get_dpar(prep, "ndt", i = i), beta = get_dpar(prep, "bias", i = i), types = if (negative_rt) c("q", "resp") else "q", lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) if (negative_rt) { # code lower bound responses as negative RTs out <- out[["q"]] * ifelse(out[["resp"]], 1, -1) } out } posterior_predict_beta <- function(i, prep, ntrys = 5, ...) { mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) rcontinuous( n = prep$ndraws, dist = "beta", shape1 = mu * phi, shape2 = (1 - mu) * phi, lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_von_mises <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "von_mises", mu = get_dpar(prep, "mu", i = i), kappa = get_dpar(prep, "kappa", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_asym_laplace <- function(i, prep, ntrys = 5, ...) { rcontinuous( n = prep$ndraws, dist = "asym_laplace", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), quantile = get_dpar(prep, "quantile", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) } posterior_predict_zero_inflated_asym_laplace <- function(i, prep, ntrys = 5, ...) { zi <- get_dpar(prep, "zi", i = i) tmp <- runif(prep$ndraws, 0, 1) ifelse( tmp < zi, 0, rcontinuous( n = prep$ndraws, dist = "asym_laplace", mu = get_dpar(prep, "mu", i = i), sigma = get_dpar(prep, "sigma", i = i), quantile = get_dpar(prep, "quantile", i = i), lb = prep$data$lb[i], ub = prep$data$ub[i], ntrys = ntrys ) ) } posterior_predict_cox <- function(i, prep, ...) { stop2("Cannot sample from the posterior predictive ", "distribution for family 'cox'.") } posterior_predict_hurdle_poisson <- function(i, prep, ...) { # theta is the bernoulli hurdle parameter theta <- get_dpar(prep, "hu", i = i) lambda <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the hurdle process hu <- runif(ndraws, 0, 1) # sample from a truncated poisson distribution # by adjusting lambda and adding 1 t = -log(1 - runif(ndraws) * (1 - exp(-lambda))) ifelse(hu < theta, 0, rpois(ndraws, lambda = lambda - t) + 1) } posterior_predict_hurdle_negbinomial <- function(i, prep, ...) { # theta is the bernoulli hurdle parameter theta <- get_dpar(prep, "hu", i = i) mu <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the hurdle process hu <- runif(ndraws, 0, 1) # sample from an approximate(!) truncated negbinomial distribution # by adjusting mu and adding 1 t = -log(1 - runif(ndraws) * (1 - exp(-mu))) shape <- get_dpar(prep, "shape", i = i) ifelse(hu < theta, 0, rnbinom(ndraws, mu = mu - t, size = shape) + 1) } posterior_predict_hurdle_gamma <- function(i, prep, ...) { # theta is the bernoulli hurdle parameter theta <- get_dpar(prep, "hu", i = i) shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / shape ndraws <- prep$ndraws # compare with theta to incorporate the hurdle process hu <- runif(ndraws, 0, 1) ifelse(hu < theta, 0, rgamma(ndraws, shape = shape, scale = scale)) } posterior_predict_hurdle_lognormal <- function(i, prep, ...) { # theta is the bernoulli hurdle parameter theta <- get_dpar(prep, "hu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the hurdle process hu <- runif(ndraws, 0, 1) ifelse(hu < theta, 0, rlnorm(ndraws, meanlog = mu, sdlog = sigma)) } posterior_predict_zero_inflated_beta <- function(i, prep, ...) { # theta is the bernoulli hurdle parameter theta <- get_dpar(prep, "zi", i = i) mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) # compare with theta to incorporate the hurdle process hu <- runif(prep$ndraws, 0, 1) ifelse( hu < theta, 0, rbeta(prep$ndraws, shape1 = mu * phi, shape2 = (1 - mu) * phi) ) } posterior_predict_zero_one_inflated_beta <- function(i, prep, ...) { zoi <- get_dpar(prep, "zoi", i) coi <- get_dpar(prep, "coi", i) mu <- get_dpar(prep, "mu", i = i) phi <- get_dpar(prep, "phi", i = i) hu <- runif(prep$ndraws, 0, 1) one_or_zero <- runif(prep$ndraws, 0, 1) ifelse(hu < zoi, ifelse(one_or_zero < coi, 1, 0), rbeta(prep$ndraws, shape1 = mu * phi, shape2 = (1 - mu) * phi) ) } posterior_predict_zero_inflated_poisson <- function(i, prep, ...) { # theta is the bernoulli zero-inflation parameter theta <- get_dpar(prep, "zi", i = i) lambda <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the zero-inflation process zi <- runif(ndraws, 0, 1) ifelse(zi < theta, 0, rpois(ndraws, lambda = lambda)) } posterior_predict_zero_inflated_negbinomial <- function(i, prep, ...) { # theta is the bernoulli zero-inflation parameter theta <- get_dpar(prep, "zi", i = i) mu <- get_dpar(prep, "mu", i = i) shape <- get_dpar(prep, "shape", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the zero-inflation process zi <- runif(ndraws, 0, 1) ifelse(zi < theta, 0, rnbinom(ndraws, mu = mu, size = shape)) } posterior_predict_zero_inflated_binomial <- function(i, prep, ...) { # theta is the bernoulii zero-inflation parameter theta <- get_dpar(prep, "zi", i = i) trials <- prep$data$trials[i] prob <- get_dpar(prep, "mu", i = i) ndraws <- prep$ndraws # compare with theta to incorporate the zero-inflation process zi <- runif(ndraws, 0, 1) ifelse(zi < theta, 0, rbinom(ndraws, size = trials, prob = prob)) } posterior_predict_categorical <- function(i, prep, ...) { eta <- cblapply(names(prep$dpars), get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) p <- pcategorical(seq_len(prep$data$ncat), eta = eta) first_greater(p, target = runif(prep$ndraws, min = 0, max = 1)) } posterior_predict_multinomial <- function(i, prep, ...) { eta <- cblapply(names(prep$dpars), get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) p <- dcategorical(seq_len(prep$data$ncat), eta = eta) size <- prep$data$trials[i] out <- lapply(seq_rows(p), function(s) t(rmultinom(1, size, p[s, ]))) do_call(rbind, out) } posterior_predict_dirichlet <- function(i, prep, ...) { mu_dpars <- str_subset(names(prep$dpars), "^mu") eta <- cblapply(mu_dpars, get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) phi <- get_dpar(prep, "phi", i = i) cats <- seq_len(prep$data$ncat) alpha <- dcategorical(cats, eta = eta) * phi rdirichlet(prep$ndraws, alpha = alpha) } posterior_predict_dirichlet2 <- function(i, prep, ...) { mu_dpars <- str_subset(names(prep$dpars), "^mu") mu <- cblapply(mu_dpars, get_dpar, prep = prep, i = i) rdirichlet(prep$ndraws, alpha = mu) } posterior_predict_cumulative <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_sratio <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_cratio <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_acat <- function(i, prep, ...) { posterior_predict_ordinal(i = i, prep = prep) } posterior_predict_ordinal <- function(i, prep, ...) { thres <- subset_thres(prep, i) nthres <- NCOL(thres) p <- pordinal( seq_len(nthres + 1), eta = get_dpar(prep, "mu", i = i), disc = get_dpar(prep, "disc", i = i), thres = thres, family = prep$family$family, link = prep$family$link ) first_greater(p, target = runif(prep$ndraws, min = 0, max = 1)) } posterior_predict_custom <- function(i, prep, ...) { custom_family_method(prep$family, "posterior_predict")(i, prep, ...) } posterior_predict_mixture <- function(i, prep, ...) { families <- family_names(prep$family) theta <- get_theta(prep, i = i) smix <- sample_mixture_ids(theta) out <- rep(NA, prep$ndraws) for (j in seq_along(families)) { draw_ids <- which(smix == j) if (length(draw_ids)) { pp_fun <- paste0("posterior_predict_", families[j]) pp_fun <- get(pp_fun, asNamespace("brms")) tmp_prep <- pseudo_prep_for_mixture(prep, j, draw_ids) out[draw_ids] <- pp_fun(i, tmp_prep, ...) } } out } # ------------ predict helper-functions ---------------------- # random numbers from (possibly truncated) continuous distributions # @param n number of random values to generate # @param dist name of a distribution for which the functions # p, q, and r are available # @param ... additional arguments passed to the distribution functions # @param ntrys number of trys in rejection sampling for truncated models # @return vector of random values prep from the distribution rcontinuous <- function(n, dist, ..., lb = NULL, ub = NULL, ntrys = 5) { args <- list(...) if (is.null(lb) && is.null(ub)) { # sample as usual rdist <- paste0("r", dist) out <- do_call(rdist, c(list(n), args)) } else { # sample from truncated distribution pdist <- paste0("p", dist) qdist <- paste0("q", dist) if (!exists(pdist, mode = "function") || !exists(qdist, mode = "function")) { # use rejection sampling as CDF or quantile function are not available out <- rdiscrete(n, dist, ..., lb = lb, ub = ub, ntrys = ntrys) } else { if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf plb <- do_call(pdist, c(list(lb), args)) pub <- do_call(pdist, c(list(ub), args)) out <- runif(n, min = plb, max = pub) out <- do_call(qdist, c(list(out), args)) # infinite values may be caused by numerical imprecision out[out %in% c(-Inf, Inf)] <- NA } } out } # random numbers from (possibly truncated) discrete distributions # currently rejection sampling is used for truncated distributions # @param n number of random values to generate # @param dist name of a distribution for which the functions # p, q, and r are available # @param ... additional arguments passed to the distribution functions # @param lb optional lower truncation bound # @param ub optional upper truncation bound # @param ntrys number of trys in rejection sampling for truncated models # @return a vector of random values draws from the distribution rdiscrete <- function(n, dist, ..., lb = NULL, ub = NULL, ntrys = 5) { args <- list(...) rdist <- paste0("r", dist) if (is.null(lb) && is.null(ub)) { # sample as usual out <- do_call(rdist, c(list(n), args)) } else { # sample from truncated distribution via rejection sampling if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf out <- vector("list", ntrys) for (i in seq_along(out)) { # loop of the trys to prevent a mismatch between 'n' # and length of the parameter vectors passed as arguments out[[i]] <- as.vector(do_call(rdist, c(list(n), args))) } out <- do_call(cbind, out) out <- apply(out, 1, extract_valid_sample, lb = lb, ub = ub) } out } # sample from the IDs of the mixture components sample_mixture_ids <- function(theta) { stopifnot(is.matrix(theta)) mix_comp <- seq_cols(theta) ulapply(seq_rows(theta), function(s) sample(mix_comp, 1, prob = theta[s, ]) ) } # extract the first valid predicted value per Stan sample per observation # @param x draws to be check against truncation boundaries # @param lb vector of lower bounds # @param ub vector of upper bound # @return a valid truncated sample or else the closest boundary extract_valid_sample <- function(x, lb, ub) { valid <- match(TRUE, x >= lb & x <= ub) if (is.na(valid)) { # no valid truncated value found # set sample to lb or ub # 1e-10 is only to identify the invalid draws later on out <- ifelse(max(x) < lb, lb - 1e-10, ub + 1e-10) } else { out <- x[valid] } out } # check for invalid predictions of truncated discrete models # @param x matrix of predicted values # @param lb optional lower truncation bound # @param ub optional upper truncation bound # @param thres threshold (in %) of invalid values at which to warn the user # @return rounded values of 'x' check_discrete_trunc_bounds <- function(x, lb = NULL, ub = NULL, thres = 0.01) { if (is.null(lb) && is.null(ub)) { return(x) } if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf thres <- as_one_numeric(thres) # ensure correct comparison with vector bounds y <- as.vector(t(x)) pct_invalid <- mean(y < lb | y > ub, na.rm = TRUE) if (pct_invalid >= thres) { warning2( round(pct_invalid * 100), "% of all predicted values ", "were invalid. Increasing argument 'ntrys' may help." ) } round(x) } brms/R/log_lik.R0000644000175000017500000010354414111751666013332 0ustar nileshnilesh#' Compute the Pointwise Log-Likelihood #' #' @aliases log_lik logLik.brmsfit #' #' @param object A fitted model object of class \code{brmsfit}. #' @inheritParams posterior_predict.brmsfit #' @param combine Only relevant in multivariate models. #' Indicates if the log-likelihoods of the submodels should #' be combined per observation (i.e. added together; the default) #' or if the log-likelihoods should be returned separately. #' @param pointwise A flag indicating whether to compute the full #' log-likelihood matrix at once (the default), or just return #' the likelihood function along with all data and draws #' required to compute the log-likelihood separately for each #' observation. The latter option is rarely useful when #' calling \code{log_lik} directly, but rather when computing #' \code{\link{waic}} or \code{\link{loo}}. #' @param add_point_estimate For internal use only. Ensures compatibility #' with the \code{\link{loo_subsample}} method. #' #' @return Usually, an S x N matrix containing the pointwise log-likelihood #' draws, where S is the number of draws and N is the number #' of observations in the data. For multivariate models and if #' \code{combine} is \code{FALSE}, an S x N x R array is returned, #' where R is the number of response variables. #' If \code{pointwise = TRUE}, the output is a function #' with a \code{draws} attribute containing all relevant #' data and posterior draws. #' #' @template details-newdata-na #' @template details-allow_new_levels #' #' @aliases log_lik #' @method log_lik brmsfit #' @export #' @export log_lik #' @importFrom rstantools log_lik log_lik.brmsfit <- function(object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, add_point_estimate = FALSE, cores = NULL, ...) { pointwise <- as_one_logical(pointwise) combine <- as_one_logical(combine) add_point_estimate <- as_one_logical(add_point_estimate) contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = TRUE, ... ) if (add_point_estimate) { # required for the loo_subsample method # Computing a point estimate based on the full prep object is too # difficult due to its highly nested structure. As an alternative, a second # prep object is created from the point estimates of the draws directly. attr(prep, "point_estimate") <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = TRUE, point_estimate = "median", ... ) } if (pointwise) { stopifnot(combine) log_lik <- log_lik_pointwise # names need to be 'data' and 'draws' as per ?loo::loo.function attr(log_lik, "data") <- data.frame(i = seq_len(choose_N(prep))) attr(log_lik, "draws") <- prep } else { log_lik <- log_lik(prep, combine = combine, cores = cores) if (anyNA(log_lik)) { warning2( "NAs were found in the log-likelihood. Possibly this is because ", "some of your responses contain NAs. If you use 'mi' terms, try ", "setting 'resp' to those response variables without missing values. ", "Alternatively, use 'newdata' to predict only complete cases." ) } } log_lik } #' @export logLik.brmsfit <- function(object, newdata = NULL, re_formula = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, pointwise = FALSE, combine = TRUE, cores = NULL, ...) { cl <- match.call() cl[[1]] <- quote(log_lik) eval(cl, parent.frame()) } #' @export log_lik.mvbrmsprep <- function(object, combine = TRUE, ...) { if (length(object$mvpars$rescor)) { object$mvpars$Mu <- get_Mu(object) object$mvpars$Sigma <- get_Sigma(object) out <- log_lik.brmsprep(object, ...) } else { out <- lapply(object$resps, log_lik, ...) if (combine) { out <- Reduce("+", out) } else { along <- ifelse(length(out) > 1L, 3, 2) out <- do_call(abind, c(out, along = along)) } } out } #' @export log_lik.brmsprep <- function(object, cores = NULL, ...) { cores <- validate_cores_post_processing(cores) log_lik_fun <- paste0("log_lik_", object$family$fun) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) if (is.customfamily(object$family)) { # ensure that the method can be found during parallel execution object$family$log_lik <- custom_family_method(object$family, "log_lik") } for (nlp in names(object$nlpars)) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in names(object$dpars)) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } N <- choose_N(object) out <- plapply(seq_len(N), log_lik_fun, cores = cores, prep = object) out <- do_call(cbind, out) colnames(out) <- NULL old_order <- object$old_order sort <- isTRUE(ncol(out) != length(old_order)) reorder_obs(out, old_order, sort = sort) } # evaluate log_lik in a pointwise manner # cannot be an S3 method since 'data_i' must be the first argument # names must be 'data_i' and 'draws' as per ?loo::loo.function log_lik_pointwise <- function(data_i, draws, ...) { i <- data_i$i if (is.mvbrmsprep(draws) && !length(draws$mvpars$rescor)) { out <- lapply(draws$resps, log_lik_pointwise, i = i) out <- Reduce("+", out) } else { log_lik_fun <- paste0("log_lik_", draws$family$fun) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) out <- log_lik_fun(i, draws) } out } # All log_lik_ functions have the same arguments structure # @param i index of the observatio for which to compute log-lik values # @param prep A named list returned by prepare_predictions containing # all required data and posterior draws # @return a vector of length prep$ndraws containing the pointwise # log-likelihood for the ith observation log_lik_gaussian <- function(i, prep) { mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) args <- list(mean = mu, sd = sigma) # log_lik_censor computes the conventional log_lik in case of no censoring out <- log_lik_censor(dist = "norm", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pnorm, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_student <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) mu <- get_dpar(prep, "mu", i = i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) args <- list(df = nu, mu = mu, sigma = sigma) out <- log_lik_censor( dist = "student_t", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pstudent_t, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_lognormal <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) args <- list(meanlog = get_dpar(prep, "mu", i), sdlog = sigma) out <- log_lik_censor(dist = "lnorm", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = plnorm, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_shifted_lognormal <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) ndt <- get_dpar(prep, "ndt", i = i) args <- list(meanlog = get_dpar(prep, "mu", i), sdlog = sigma, shift = ndt) out <- log_lik_censor("shifted_lnorm", args, i = i, prep = prep) out <- log_lik_truncate(out, pshifted_lnorm, args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_skew_normal <- function(i, prep) { mu <- get_dpar(prep, "mu", i) sigma <- get_dpar(prep, "sigma", i = i) sigma <- add_sigma_se(sigma, prep, i = i) alpha <- get_dpar(prep, "alpha", i = i) args <- nlist(mu, sigma, alpha) out <- log_lik_censor( dist = "skew_normal", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pskew_normal, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gaussian_mv <- function(i, prep) { Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) dmn <- function(s) { dmulti_normal( prep$data$Y[i, ], mu = Mu[s, ], Sigma = Sigma[s, , ], log = TRUE ) } out <- sapply(1:prep$ndraws, dmn) log_lik_weight(out, i = i, prep = prep) } log_lik_student_mv <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) Mu <- get_Mu(prep, i = i) Sigma <- get_Sigma(prep, i = i) dmst <- function(s) { dmulti_student_t( prep$data$Y[i, ], df = nu[s], mu = Mu[s, ], Sigma = Sigma[s, , ], log = TRUE ) } out <- sapply(1:prep$ndraws, dmst) log_lik_weight(out, i = i, prep = prep) } log_lik_gaussian_time <- function(i, prep) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Y <- as.numeric(prep$data$Y[obs]) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs) .log_lik <- function(s) { C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_time <- function(i, prep) { obs <- with(prep$ac, begin_tg[i]:end_tg[i]) Y <- as.numeric(prep$data$Y[obs]) nu <- as.matrix(get_dpar(prep, "nu", i = obs)) mu <- as.matrix(get_dpar(prep, "mu", i = obs)) Sigma <- get_cov_matrix_ac(prep, obs) .log_lik <- function(s) { df <- nu[s, ] C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_lagsar <- function(i, prep) { mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) stopifnot(i == 1) # see http://mc-stan.org/loo/articles/loo2-non-factorizable.html .log_lik <- function(s) { IB <- I - with(prep$ac, lagsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - solve(IB, mu[s, ]) g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_lagsar <- function(i, prep) { nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) stopifnot(i == 1) # see http://mc-stan.org/loo/articles/loo2-non-factorizable.html .log_lik <- function(s) { df <- nu[s] IB <- I - with(prep$ac, lagsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - solve(IB, mu[s, ]) g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_errorsar <- function(i, prep) { stopifnot(i == 1) mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) .log_lik <- function(s) { IB <- I - with(prep$ac, errorsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - mu[s, ] g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_errorsar <- function(i, prep) { stopifnot(i == 1) nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") sigma <- get_dpar(prep, "sigma") Y <- as.numeric(prep$data$Y) I <- diag(prep$nobs) .log_lik <- function(s) { df <- nu[s] IB <- I - with(prep$ac, errorsar[s, ] * Msar) Cinv <- t(IB) %*% IB / sigma[s]^2 e <- Y - mu[s, ] g <- Cinv %*% e cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_gaussian_fcor <- function(i, prep) { stopifnot(i == 1) Y <- as.numeric(prep$data$Y) mu <- get_dpar(prep, "mu") Sigma <- get_cov_matrix_ac(prep) .log_lik <- function(s) { C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar) ll <- dnorm(Y, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_student_fcor <- function(i, prep) { stopifnot(i == 1) Y <- as.numeric(prep$data$Y) nu <- get_dpar(prep, "nu") mu <- get_dpar(prep, "mu") Sigma <- get_cov_matrix_ac(prep) .log_lik <- function(s) { df <- nu[s] C <- as.matrix(Sigma[s, , ]) Cinv <- solve(C) e <- Y - mu[s, ] g <- solve(C, e) cbar <- diag(Cinv) yloo <- Y - g / cbar sdloo <- sqrt(1 / cbar * student_t_cov_factor(df, Cinv, e)) dfloo <- df + nrow(Cinv) - 1 ll <- dstudent_t(Y, dfloo, yloo, sdloo, log = TRUE) return(as.numeric(ll)) } rblapply(seq_len(prep$ndraws), .log_lik) } log_lik_binomial <- function(i, prep) { trials <- prep$data$trials[i] args <- list(size = trials, prob = get_dpar(prep, "mu", i)) out <- log_lik_censor( dist = "binom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_bernoulli <- function(i, prep) { args <- list(size = 1, prob = get_dpar(prep, "mu", i)) out <- log_lik_censor( dist = "binom", args = args, i = i, prep = prep ) # no truncation allowed log_lik_weight(out, i = i, prep = prep) } log_lik_poisson <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) args <- list(lambda = mu) out <- log_lik_censor( dist = "pois", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = ppois, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_negbinomial <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- get_dpar(prep, "shape", i) shape <- multiply_dpar_rate_denom(shape, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_negbinomial2 <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) sigma <- get_dpar(prep, "sigma", i) shape <- multiply_dpar_rate_denom(1 / sigma, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_geometric <- function(i, prep) { mu <- get_dpar(prep, "mu", i) mu <- multiply_dpar_rate_denom(mu, prep, i = i) shape <- 1 shape <- multiply_dpar_rate_denom(shape, prep, i = i) args <- list(mu = mu, size = shape) out <- log_lik_censor( dist = "nbinom", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pnbinom, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_discrete_weibull <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i) ) out <- log_lik_censor( dist = "discrete_weibull", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pdiscrete_weibull, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_com_poisson <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i) ) # no censoring or truncation allowed yet out <- do_call(dcom_poisson, c(prep$data$Y[i], args, log = TRUE)) log_lik_weight(out, i = i, prep = prep) } log_lik_exponential <- function(i, prep) { args <- list(rate = 1 / get_dpar(prep, "mu", i)) out <- log_lik_censor(dist = "exp", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pexp, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gamma <- function(i, prep) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i) / shape args <- nlist(shape, scale) out <- log_lik_censor(dist = "gamma", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pgamma, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_weibull <- function(i, prep) { shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 + 1 / shape) args <- list(shape = shape, scale = scale) out <- log_lik_censor( dist = "weibull", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pweibull, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_frechet <- function(i, prep) { nu <- get_dpar(prep, "nu", i = i) scale <- get_dpar(prep, "mu", i = i) / gamma(1 - 1 / nu) args <- list(scale = scale, shape = nu) out <- log_lik_censor( dist = "frechet", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pfrechet, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_gen_extreme_value <- function(i, prep) { sigma <- get_dpar(prep, "sigma", i = i) xi <- get_dpar(prep, "xi", i = i) mu <- get_dpar(prep, "mu", i) args <- nlist(mu, sigma, xi) out <- log_lik_censor(dist = "gen_extreme_value", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pgen_extreme_value, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_inverse.gaussian <- function(i, prep) { args <- list(mu = get_dpar(prep, "mu", i), shape = get_dpar(prep, "shape", i = i)) out <- log_lik_censor(dist = "inv_gaussian", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pinv_gaussian, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_exgaussian <- function(i, prep) { args <- list(mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i = i), beta = get_dpar(prep, "beta", i = i)) out <- log_lik_censor(dist = "exgaussian", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pexgaussian, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_wiener <- function(i, prep) { args <- list( delta = get_dpar(prep, "mu", i), alpha = get_dpar(prep, "bs", i = i), tau = get_dpar(prep, "ndt", i = i), beta = get_dpar(prep, "bias", i = i), resp = prep$data[["dec"]][i] ) out <- do_call(dwiener, c(prep$data$Y[i], args, log = TRUE)) log_lik_weight(out, i = i, prep = prep) } log_lik_beta <- function(i, prep) { mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) args <- list(shape1 = mu * phi, shape2 = (1 - mu) * phi) out <- log_lik_censor(dist = "beta", args = args, i = i, prep = prep) out <- log_lik_truncate( out, cdf = pbeta, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_von_mises <- function(i, prep) { args <- list( mu = get_dpar(prep, "mu", i), kappa = get_dpar(prep, "kappa", i = i) ) out <- log_lik_censor( dist = "von_mises", args = args, i = i, prep = prep ) out <- log_lik_truncate( out, cdf = pvon_mises, args = args, i = i, prep = prep ) log_lik_weight(out, i = i, prep = prep) } log_lik_asym_laplace <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i), quantile = get_dpar(prep, "quantile", i) ) out <- log_lik_censor(dist = "asym_laplace", args, i, prep) out <- log_lik_truncate(out, pasym_laplace, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_asym_laplace <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), sigma = get_dpar(prep, "sigma", i), quantile = get_dpar(prep, "quantile", i), zi = get_dpar(prep, "zi", i) ) out <- log_lik_censor(dist = "zero_inflated_asym_laplace", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_asym_laplace, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_cox <- function(i, prep, ...) { args <- list( mu = get_dpar(prep, "mu", i), bhaz = prep$bhaz$bhaz[, i], cbhaz = prep$bhaz$cbhaz[, i] ) out <- log_lik_censor(dist = "cox", args = args, i = i, prep = prep) out <- log_lik_truncate(out, cdf = pcox, args = args, i = i, prep = prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_poisson <- function(i, prep) { hu <- get_dpar(prep, "hu", i) lambda <- get_dpar(prep, "mu", i) args <- nlist(lambda, hu) out <- log_lik_censor("hurdle_poisson", args, i, prep) out <- log_lik_truncate(out, phurdle_poisson, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_negbinomial <- function(i, prep) { hu <- get_dpar(prep, "hu", i) mu <- get_dpar(prep, "mu", i) shape <- get_dpar(prep, "shape", i = i) args <- nlist(mu, shape, hu) out <- log_lik_censor("hurdle_negbinomial", args, i, prep) out <- log_lik_truncate(out, phurdle_negbinomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_gamma <- function(i, prep) { hu <- get_dpar(prep, "hu", i) shape <- get_dpar(prep, "shape", i = i) scale <- get_dpar(prep, "mu", i) / shape args <- nlist(shape, scale, hu) out <- log_lik_censor("hurdle_gamma", args, i, prep) out <- log_lik_truncate(out, phurdle_gamma, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_hurdle_lognormal <- function(i, prep) { hu <- get_dpar(prep, "hu", i) mu <- get_dpar(prep, "mu", i) sigma <- get_dpar(prep, "sigma", i = i) args <- nlist(mu, sigma, hu) out <- log_lik_censor("hurdle_lognormal", args, i, prep) out <- log_lik_truncate(out, phurdle_lognormal, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_poisson <- function(i, prep) { zi <- get_dpar(prep, "zi", i) lambda <- get_dpar(prep, "mu", i) args <- nlist(lambda, zi) out <- log_lik_censor("zero_inflated_poisson", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_poisson, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_negbinomial <- function(i, prep) { zi <- get_dpar(prep, "zi", i) mu <- get_dpar(prep, "mu", i) shape <- get_dpar(prep, "shape", i = i) args <- nlist(mu, shape, zi) out <- log_lik_censor("zero_inflated_negbinomial", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_negbinomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_binomial <- function(i, prep) { trials <- prep$data$trials[i] mu <- get_dpar(prep, "mu", i) zi <- get_dpar(prep, "zi", i) args <- list(size = trials, prob = mu, zi) out <- log_lik_censor("zero_inflated_binomial", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_binomial, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_inflated_beta <- function(i, prep) { zi <- get_dpar(prep, "zi", i) mu <- get_dpar(prep, "mu", i) phi <- get_dpar(prep, "phi", i) args <- nlist(shape1 = mu * phi, shape2 = (1 - mu) * phi, zi) out <- log_lik_censor("zero_inflated_beta", args, i, prep) out <- log_lik_truncate(out, pzero_inflated_beta, args, i, prep) log_lik_weight(out, i = i, prep = prep) } log_lik_zero_one_inflated_beta <- function(i, prep) { zoi <- get_dpar(prep, "zoi", i) coi <- get_dpar(prep, "coi", i) if (prep$data$Y[i] %in% c(0, 1)) { out <- dbinom(1, size = 1, prob = zoi, log = TRUE) + dbinom(prep$data$Y[i], size = 1, prob = coi, log = TRUE) } else { phi <- get_dpar(prep, "phi", i) mu <- get_dpar(prep, "mu", i) args <- list(shape1 = mu * phi, shape2 = (1 - mu) * phi) out <- dbinom(0, size = 1, prob = zoi, log = TRUE) + do_call(dbeta, c(prep$data$Y[i], args, log = TRUE)) } log_lik_weight(out, i = i, prep = prep) } log_lik_categorical <- function(i, prep) { stopifnot(prep$family$link == "logit") eta <- cblapply(names(prep$dpars), get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) out <- dcategorical(prep$data$Y[i], eta = eta, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_multinomial <- function(i, prep) { stopifnot(prep$family$link == "logit") eta <- cblapply(names(prep$dpars), get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) out <- dmultinomial(prep$data$Y[i, ], eta = eta, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_dirichlet <- function(i, prep) { stopifnot(prep$family$link == "logit") mu_dpars <- str_subset(names(prep$dpars), "^mu") eta <- cblapply(mu_dpars, get_dpar, prep = prep, i = i) eta <- insert_refcat(eta, family = prep$family) phi <- get_dpar(prep, "phi", i = i) cats <- seq_len(prep$data$ncat) alpha <- dcategorical(cats, eta = eta) * phi out <- ddirichlet(prep$data$Y[i, ], alpha = alpha, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_dirichlet2 <- function(i, prep) { mu_dpars <- str_subset(names(prep$dpars), "^mu") mu <- cblapply(mu_dpars, get_dpar, prep = prep, i = i) out <- ddirichlet(prep$data$Y[i, ], alpha = mu, log = TRUE) log_lik_weight(out, i = i, prep = prep) } log_lik_cumulative <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (thres - mu) y <- prep$data$Y[i] if (y == 1) { out <- log(ilink(eta[, 1], prep$family$link)) } else if (y == nthres + 1) { out <- log(1 - ilink(eta[, y - 1], prep$family$link)) } else { out <- log( ilink(eta[, y], prep$family$link) - ilink(eta[, y - 1], prep$family$link) ) } log_lik_weight(out, i = i, prep = prep) } log_lik_sratio <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (thres - mu) y <- prep$data$Y[i] q <- sapply(seq_len(min(y, nthres)), function(k) 1 - ilink(eta[, k], prep$family$link) ) if (y == 1) { out <- log(1 - q[, 1]) } else if (y == 2) { out <- log(1 - q[, 2]) + log(q[, 1]) } else if (y == nthres + 1) { out <- rowSums(log(q)) } else { out <- log(1 - q[, y]) + rowSums(log(q[, 1:(y - 1)])) } log_lik_weight(out, i = i, prep = prep) } log_lik_cratio <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (mu - thres) y <- prep$data$Y[i] q <- sapply(seq_len(min(y, nthres)), function(k) ilink(eta[, k], prep$family$link) ) if (y == 1) { out <- log(1 - q[, 1]) } else if (y == 2) { out <- log(1 - q[, 2]) + log(q[, 1]) } else if (y == nthres + 1) { out <- rowSums(log(q)) } else { out <- log(1 - q[, y]) + rowSums(log(q[, 1:(y - 1)])) } log_lik_weight(out, i = i, prep = prep) } log_lik_acat <- function(i, prep) { disc <- get_dpar(prep, "disc", i = i) mu <- get_dpar(prep, "mu", i = i) thres <- subset_thres(prep, i) nthres <- NCOL(thres) eta <- disc * (mu - thres) y <- prep$data$Y[i] if (prep$family$link == "logit") { # more efficient calculation q <- sapply(1:nthres, function(k) eta[, k]) p <- cbind(rep(0, nrow(eta)), q[, 1], matrix(0, nrow = nrow(eta), ncol = nthres - 1)) if (nthres > 1L) { p[, 3:(nthres + 1)] <- sapply(3:(nthres + 1), function(k) rowSums(q[, 1:(k - 1)])) } out <- p[, y] - log(rowSums(exp(p))) } else { q <- sapply(1:nthres, function(k) ilink(eta[, k], prep$family$link)) p <- cbind(apply(1 - q[, 1:nthres], 1, prod), matrix(0, nrow = nrow(eta), ncol = nthres)) if (nthres > 1L) { p[, 2:nthres] <- sapply(2:nthres, function(k) apply(as.matrix(q[, 1:(k - 1)]), 1, prod) * apply(as.matrix(1 - q[, k:nthres]), 1, prod)) } p[, nthres + 1] <- apply(q[, 1:nthres], 1, prod) out <- log(p[, y]) - log(apply(p, 1, sum)) } log_lik_weight(out, i = i, prep = prep) } log_lik_custom <- function(i, prep) { custom_family_method(prep$family, "log_lik")(i, prep) } log_lik_mixture <- function(i, prep) { families <- family_names(prep$family) theta <- get_theta(prep, i = i) out <- array(NA, dim = dim(theta)) for (j in seq_along(families)) { log_lik_fun <- paste0("log_lik_", families[j]) log_lik_fun <- get(log_lik_fun, asNamespace("brms")) tmp_draws <- pseudo_prep_for_mixture(prep, j) out[, j] <- exp(log(theta[, j]) + log_lik_fun(i, tmp_draws)) } if (isTRUE(prep[["pp_mixture"]])) { out <- log(out) - log(rowSums(out)) } else { out <- log(rowSums(out)) } log_lik_weight(out, i = i, prep = prep) } # ----------- log_lik helper-functions ----------- # compute (possibly censored) log_lik values # @param dist name of a distribution for which the functions # d (pdf) and p (cdf) are available # @param args additional arguments passed to pdf and cdf # @param prep a brmsprep object # @return vector of log_lik values log_lik_censor <- function(dist, args, i, prep) { pdf <- get(paste0("d", dist), mode = "function") cdf <- get(paste0("p", dist), mode = "function") y <- prep$data$Y[i] cens <- prep$data$cens[i] if (is.null(cens) || cens == 0) { x <- do_call(pdf, c(y, args, log = TRUE)) } else if (cens == 1) { x <- do_call(cdf, c(y, args, lower.tail = FALSE, log.p = TRUE)) } else if (cens == -1) { x <- do_call(cdf, c(y, args, log.p = TRUE)) } else if (cens == 2) { rcens <- prep$data$rcens[i] x <- log(do_call(cdf, c(rcens, args)) - do_call(cdf, c(y, args))) } x } # adjust log_lik in truncated models # @param x vector of log_lik values # @param cdf a cumulative distribution function # @param args arguments passed to cdf # @param i observation number # @param prep a brmsprep object # @return vector of log_lik values log_lik_truncate <- function(x, cdf, args, i, prep) { lb <- prep$data$lb[i] ub <- prep$data$ub[i] if (!(is.null(lb) && is.null(ub))) { if (is.null(lb)) lb <- -Inf if (is.null(ub)) ub <- Inf x - log(do_call(cdf, c(ub, args)) - do_call(cdf, c(lb, args))) } x } # weight log_lik values according to defined weights # @param x vector of log_lik values # @param i observation number # @param prep a brmsprep object # @return vector of log_lik values log_lik_weight <- function(x, i, prep) { weight <- prep$data$weights[i] if (!is.null(weight)) { x <- x * weight } x } # after some discussion with Aki Vehtari and Daniel Simpson, # I disallowed computation of log-likelihood values for some models # until pointwise solutions are implemented stop_no_pw <- function() { stop2("Cannot yet compute pointwise log-likelihood for this model ", "because the observations are not conditionally independent.") } # multiplicate factor for conditional student-t models # see http://proceedings.mlr.press/v33/shah14.pdf # note that brms parameterizes C instead of Cov(y) = df / (df - 2) * C # @param df degrees of freedom parameter # @param Cinv inverse of the full matrix # @param e vector of error terms, that is, y - mu student_t_cov_factor <- function(df, Cinv, e) { beta1 <- ulapply(seq_rows(Cinv), student_t_beta1_i, Cinv, e) (df + beta1) / (df + nrow(Cinv) - 1) } # beta1 in equation (6) of http://proceedings.mlr.press/v33/shah14.pdf # @param i observation index to exclude in the submatrix # @param Cinv inverse of the full matrix # @param e vector of error terms, that is, y - mu # @param vector of length one student_t_beta1_i <- function(i, Cinv, e) { sub_Cinv_i <- sub_inverse_symmetric(Cinv, i) t(e[-i]) %*% sub_Cinv_i %*% e[-i] } # efficient submatrix inverse for a symmetric matrix # see http://www.scielo.org.mx/pdf/cys/v20n2/1405-5546-cys-20-02-00251.pdf # @param Cinv inverse of the full matrix # @param i observation index to exclude in the submatrix # @return inverse of the submatrix after removing observation i sub_inverse_symmetric <- function(Cinv, i) { csub <- Cinv[i, -i] D <- outer(csub, csub) Cinv[-i, -i] - D / Cinv[i, i] } brms/R/brmsformula.R0000644000175000017500000021515214111751665014241 0ustar nileshnilesh#' Set up a model formula for use in \pkg{brms} #' #' Set up a model formula for use in the \pkg{brms} package #' allowing to define (potentially non-linear) additive multilevel #' models for all parameters of the assumed response distribution. #' #' @aliases bf #' #' @param formula An object of class \code{formula} #' (or one that can be coerced to that class): #' a symbolic description of the model to be fitted. #' The details of model specification are given in 'Details'. #' @param ... Additional \code{formula} objects to specify predictors of #' non-linear and distributional parameters. Formulas can either be named #' directly or contain names on their left-hand side. Alternatively, #' it is possible to fix parameters to certain values by passing #' numbers or character strings in which case arguments have to be named #' to provide the parameter names. See 'Details' for more information. #' @param flist Optional list of formulas, which are treated in the #' same way as formulas passed via the \code{...} argument. #' @param nl Logical; Indicates whether \code{formula} should be #' treated as specifying a non-linear model. By default, \code{formula} #' is treated as an ordinary linear model formula. #' @param loop Logical; Only used in non-linear models. #' Indicates if the computation of the non-linear formula should be #' done inside (\code{TRUE}) or outside (\code{FALSE}) a loop #' over observations. Defaults to \code{TRUE}. #' @param center Logical; Indicates if the population-level design #' matrix should be centered, which usually increases sampling efficiency. #' See the 'Details' section for more information. #' Defaults to \code{TRUE} for distributional parameters #' and to \code{FALSE} for non-linear parameters. #' @param cmc Logical; Indicates whether automatic cell-mean coding #' should be enabled when removing the intercept by adding \code{0} #' to the right-hand of model formulas. Defaults to \code{TRUE} to #' mirror the behavior of standard \R formula parsing. #' @param sparse Logical; indicates whether the population-level design matrices #' should be treated as sparse (defaults to \code{FALSE}). For design matrices #' with many zeros, this can considerably reduce required memory. Sampling #' speed is currently not improved or even slightly decreased. #' @param decomp Optional name of the decomposition used for the #' population-level design matrix. Defaults to \code{NULL} that is #' no decomposition. Other options currently available are #' \code{"QR"} for the QR decomposition that helps in fitting models #' with highly correlated predictors. #' @param family Same argument as in \code{\link{brm}}. #' If \code{family} is specified in \code{brmsformula}, it will #' overwrite the value specified in other functions. #' @param autocor An optional \code{formula} which contains #' autocorrelation terms as described in \code{\link{autocor-terms}} #' or alternatively a \code{\link{cor_brms}} object (deprecated). #' If \code{autocor} is specified in \code{brmsformula}, it will #' overwrite the value specified in other functions. #' @param unused An optional \code{formula} which contains variables #' that are unused in the model but should still be stored in the #' model's data frame. This can be useful, for example, #' if those variables are required for post-processing the model. #' #' @return An object of class \code{brmsformula}, which #' is essentially a \code{list} containing all model #' formulas as well as some additional information. #' #' @seealso \code{\link{mvbrmsformula}}, \code{\link{brmsformula-helpers}} #' #' @details #' #' \bold{General formula structure} #' #' The \code{formula} argument accepts formulas of the following syntax: #' #' \code{response | aterms ~ pterms + (gterms | group)} #' #' The \code{pterms} part contains effects that are assumed to be the same #' across observations. We call them 'population-level' or 'overall' effects, #' or (adopting frequentist vocabulary) 'fixed' effects. The optional #' \code{gterms} part may contain effects that are assumed to vary across #' grouping variables specified in \code{group}. We call them 'group-level' or #' 'varying' effects, or (adopting frequentist vocabulary) 'random' effects, #' although the latter name is misleading in a Bayesian context. For more #' details type \code{vignette("brms_overview")} and #' \code{vignette("brms_multilevel")}. #' #' \bold{Group-level terms} #' #' Multiple grouping factors each with multiple group-level effects are #' possible. (Of course we can also run models without any group-level #' effects.) Instead of \code{|} you may use \code{||} in grouping terms to #' prevent correlations from being modeled. Equivalently, the \code{cor} #' argument of the \code{\link{gr}} function can be used for this purpose, #' for example, \code{(1 + x || g)} is equivalent to #' \code{(1 + x | gr(g, cor = FALSE))}. #' #' It is also possible to model different group-level terms of the same #' grouping factor as correlated (even across different formulas, e.g., in #' non-linear models) by using \code{||} instead of \code{|}. All #' group-level terms sharing the same ID will be modeled as correlated. If, #' for instance, one specifies the terms \code{(1+x|i|g)} and \code{(1+z|i|g)} #' somewhere in the formulas passed to \code{brmsformula}, correlations #' between the corresponding group-level effects will be estimated. In the #' above example, \code{i} is not a variable in the data but just a symbol to #' indicate correlations between multiple group-level terms. Equivalently, the #' \code{id} argument of the \code{\link{gr}} function can be used as well, #' for example, \code{(1 + x | gr(g, id = "i"))}. #' #' If levels of the grouping factor belong to different sub-populations, #' it may be reasonable to assume a different covariance matrix for each #' of the sub-populations. For instance, the variation within the #' treatment group and within the control group in a randomized control #' trial might differ. Suppose that \code{y} is the outcome, and #' \code{x} is the factor indicating the treatment and control group. #' Then, we could estimate different hyper-parameters of the varying #' effects (in this case a varying intercept) for treatment and control #' group via \code{y ~ x + (1 | gr(subject, by = x))}. #' #' You can specify multi-membership terms using the \code{\link{mm}} #' function. For instance, a multi-membership term with two members #' could be \code{(1 | mm(g1, g2))}, where \code{g1} and \code{g2} #' specify the first and second member, respectively. Moreover, #' if a covariate \code{x} varies across the levels of the grouping-factors #' \code{g1} and \code{g2}, we can save the respective covariate values #' in the variables \code{x1} and \code{x2} and then model the varying #' effect as \code{(1 + mmc(x1, x2) | mm(g1, g2))}. #' #' \bold{Special predictor terms} #' #' Flexible non-linear smooth terms can modeled using the \code{\link{s}} #' and \code{\link{t2}} functions in the \code{pterms} part #' of the model formula. This allows to fit generalized additive mixed #' models (GAMMs) with \pkg{brms}. The implementation is similar to that #' used in the \pkg{gamm4} package. For more details on this model class #' see \code{\link[mgcv:gam]{gam}} and \code{\link[mgcv:gamm]{gamm}}. #' #' Gaussian process terms can be fitted using the \code{\link{gp}} #' function in the \code{pterms} part of the model formula. Similar to #' smooth terms, Gaussian processes can be used to model complex non-linear #' relationships, for instance temporal or spatial autocorrelation. #' However, they are computationally demanding and are thus not recommended #' for very large datasets or approximations need to be used. #' #' The \code{pterms} and \code{gterms} parts may contain four non-standard #' effect types namely monotonic, measurement error, missing value, and #' category specific effects, which can be specified using terms of the #' form \code{mo(predictor)}, \code{me(predictor, sd_predictor)}, #' \code{mi(predictor)}, and \code{cs()}, respectively. #' Category specific effects can only be estimated in #' ordinal models and are explained in more detail in the package's #' main vignette (type \code{vignette("brms_overview")}). #' The other three effect types are explained in the following. #' #' A monotonic predictor must either be integer valued or an ordered factor, #' which is the first difference to an ordinary continuous predictor. #' More importantly, predictor categories (or integers) are not assumed to be #' equidistant with respect to their effect on the response variable. #' Instead, the distance between adjacent predictor categories (or integers) #' is estimated from the data and may vary across categories. #' This is realized by parameterizing as follows: #' One parameter takes care of the direction and size of the effect similar #' to an ordinary regression parameter, while an additional parameter vector #' estimates the normalized distances between consecutive predictor categories. #' A main application of monotonic effects are ordinal predictors that #' can this way be modeled without (falsely) treating them as continuous #' or as unordered categorical predictors. For more details and examples #' see \code{vignette("brms_monotonic")}. #' #' Quite often, predictors are measured and as such naturally contain #' measurement error. Although most researchers are well aware of this problem, #' measurement error in predictors is ignored in most #' regression analyses, possibly because only few packages allow #' for modeling it. Notably, measurement error can be handled in #' structural equation models, but many more general regression models #' (such as those featured by \pkg{brms}) cannot be transferred #' to the SEM framework. In \pkg{brms}, effects of noise-free predictors #' can be modeled using the \code{me} (for 'measurement error') function. #' If, say, \code{y} is the response variable and #' \code{x} is a measured predictor with known measurement error #' \code{sdx}, we can simply include it on the right-hand side of the #' model formula via \code{y ~ me(x, sdx)}. #' This can easily be extended to more general formulas. #' If \code{x2} is another measured predictor with corresponding error #' \code{sdx2} and \code{z} is a predictor without error #' (e.g., an experimental setting), we can model all main effects #' and interactions of the three predictors in the well known manner: #' \code{y ~ me(x, sdx) * me(x2, sdx2) * z}. #' The \code{me} function is soft deprecated in favor of the more flexible #' and consistent \code{mi} function (see below). #' #' When a variable contains missing values, the corresponding rows will #' be excluded from the data by default (row-wise exclusion). However, #' quite often we want to keep these rows and instead estimate the missing values. #' There are two approaches for this: (a) Impute missing values before #' the model fitting for instance via multiple imputation (see #' \code{\link{brm_multiple}} for a way to handle multiple imputed datasets). #' (b) Impute missing values on the fly during model fitting. The latter #' approach is explained in the following. Using a variable with missing #' values as predictors requires two things, First, we need to specify that #' the predictor contains missings that should to be imputed. #' If, say, \code{y} is the primary response, \code{x} is a #' predictor with missings and \code{z} is a predictor without missings, #' we go for \code{y ~ mi(x) + z}. Second, we need to model \code{x} #' as an additional response with corresponding predictors and the #' addition term \code{mi()}. In our example, we could write #' \code{x | mi() ~ z}. Measurement error may be included via #' the \code{sdy} argument, say, \code{x | mi(sdy = se) ~ z}. #' See \code{\link{mi}} for examples with real data. #' #' #' \bold{Autocorrelation terms} #' #' Autocorrelation terms can be directly specified inside the \code{pterms} #' part as well. Details can be found in \code{\link{autocor-terms}}. #' #' \bold{Additional response information} #' #' Another special of the \pkg{brms} formula syntax is the optional #' \code{aterms} part, which may contain multiple terms of the form #' \code{fun()} separated by \code{+} each providing special #' information on the response variable. \code{fun} can be replaced with #' either \code{se}, \code{weights}, \code{subset}, \code{cens}, \code{trunc}, #' \code{trials}, \code{cat}, \code{dec}, \code{rate}, \code{vreal}, or #' \code{vint}. Their meanings are explained below. #' (see also \code{\link{addition-terms}}). #' #' For families \code{gaussian}, \code{student} and \code{skew_normal}, it is #' possible to specify standard errors of the observations, thus allowing #' to perform meta-analysis. Suppose that the variable \code{yi} contains #' the effect sizes from the studies and \code{sei} the corresponding #' standard errors. Then, fixed and random effects meta-analyses can #' be conducted using the formulas \code{yi | se(sei) ~ 1} and #' \code{yi | se(sei) ~ 1 + (1|study)}, respectively, where #' \code{study} is a variable uniquely identifying every study. #' If desired, meta-regression can be performed via #' \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1|study)} #' or \cr \code{yi | se(sei) ~ 1 + mod1 + mod2 + (1 + mod1 + mod2|study)}, #' where \code{mod1} and \code{mod2} represent moderator variables. #' By default, the standard errors replace the parameter \code{sigma}. #' To model \code{sigma} in addition to the known standard errors, #' set argument \code{sigma} in function \code{se} to \code{TRUE}, #' for instance, \code{yi | se(sei, sigma = TRUE) ~ 1}. #' #' For all families, weighted regression may be performed using #' \code{weights} in the \code{aterms} part. Internally, this is #' implemented by multiplying the log-posterior values of each #' observation by their corresponding weights. #' Suppose that variable \code{wei} contains the weights #' and that \code{yi} is the response variable. #' Then, formula \code{yi | weights(wei) ~ predictors} #' implements a weighted regression. #' #' For multivariate models, \code{subset} may be used in the \code{aterms} #' part, to use different subsets of the data in different univariate #' models. For instance, if \code{sub} is a logical variable and #' \code{y} is the response of one of the univariate models, we may #' write \code{y | subset(sub) ~ predictors} so that \code{y} is #' predicted only for those observations for which \code{sub} evaluates #' to \code{TRUE}. #' #' For log-linear models such as poisson models, \code{rate} may be used #' in the \code{aterms} part to specify the denominator of a response that #' is expressed as a rate. The numerator is given by the actual response #' variable and has a distribution according to the family as usual. Using #' \code{rate(denom)} is equivalent to adding \code{offset(log(denom))} to #' the linear predictor of the main parameter but the former is arguably #' more convenient and explicit. #' #' With the exception of categorical and ordinal families, #' left, right, and interval censoring can be modeled through #' \code{y | cens(censored) ~ predictors}. The censoring variable #' (named \code{censored} in this example) should contain the values #' \code{'left'}, \code{'none'}, \code{'right'}, and \code{'interval'} #' (or equivalently \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate that #' the corresponding observation is left censored, not censored, right censored, #' or interval censored. For interval censored data, a second variable #' (let's call it \code{y2}) has to be passed to \code{cens}. In this case, #' the formula has the structure \code{y | cens(censored, y2) ~ predictors}. #' While the lower bounds are given in \code{y}, the upper bounds are given #' in \code{y2} for interval censored data. Intervals are assumed to be open #' on the left and closed on the right: \code{(y, y2]}. #' #' With the exception of categorical and ordinal families, #' the response distribution can be truncated using the \code{trunc} #' function in the addition part. If the response variable is truncated #' between, say, 0 and 100, we can specify this via #' \code{yi | trunc(lb = 0, ub = 100) ~ predictors}. #' Instead of numbers, variables in the data set can also be passed allowing #' for varying truncation points across observations. Defining only one of #' the two arguments in \code{trunc} leads to one-sided truncation. #' #' For all continuous families, missing values in the responses can be imputed #' within Stan by using the addition term \code{mi}. This is mostly #' useful in combination with \code{mi} predictor terms as explained #' above under 'Special predictor terms'. #' #' For families \code{binomial} and \code{zero_inflated_binomial}, #' addition should contain a variable indicating the number of trials #' underlying each observation. In \code{lme4} syntax, we may write for instance #' \code{cbind(success, n - success)}, which is equivalent #' to \code{success | trials(n)} in \pkg{brms} syntax. If the number of trials #' is constant across all observations, say \code{10}, #' we may also write \code{success | trials(10)}. #' \bold{Please note that the \code{cbind()} syntax will not work #' in \pkg{brms} in the expected way because this syntax is reserved #' for other purposes.} #' #' For all ordinal families, \code{aterms} may contain a term #' \code{thres(number)} to specify the number thresholds (e.g, #' \code{thres(6)}), which should be equal to the total number of response #' categories - 1. If not given, the number of thresholds is calculated from #' the data. If different threshold vectors should be used for different #' subsets of the data, the \code{gr} argument can be used to provide the #' grouping variable (e.g, \code{thres(6, gr = item)}, if \code{item} is the #' grouping variable). In this case, the number of thresholds can also be a #' variable in the data with different values per group. #' #' A deprecated quasi alias of \code{thres()} is \code{cat()} with which the #' total number of response categories (i.e., number of thresholds + 1) can be #' specified. #' #' In Wiener diffusion models (family \code{wiener}) the addition term #' \code{dec} is mandatory to specify the (vector of) binary decisions #' corresponding to the reaction times. Non-zero values will be treated #' as a response on the upper boundary of the diffusion process and zeros #' will be treated as a response on the lower boundary. Alternatively, #' the variable passed to \code{dec} might also be a character vector #' consisting of \code{'lower'} and \code{'upper'}. #' #' All families support the \code{index} addition term to uniquely identify #' each observation of the corresponding response variable. Currently, #' \code{index} is primarily useful in combination with the \code{subset} #' addition and \code{\link{mi}} terms. #' #' For custom families, it is possible to pass an arbitrary number of real and #' integer vectors via the addition terms \code{vreal} and \code{vint}, #' respectively. An example is provided in #' \code{vignette('brms_customfamilies')}. To pass multiple vectors of the #' same data type, provide them separated by commas inside a single #' \code{vreal} or \code{vint} statement. #' #' Multiple addition terms of different types may be specified at the same #' time using the \code{+} operator. For example, the formula #' \code{formula = yi | se(sei) + cens(censored) ~ 1} implies a censored #' meta-analytic model. #' #' The addition argument \code{disp} (short for dispersion) #' has been removed in version 2.0. You may instead use the #' distributional regression approach by specifying #' \code{sigma ~ 1 + offset(log(xdisp))} or #' \code{shape ~ 1 + offset(log(xdisp))}, where \code{xdisp} is #' the variable being previously passed to \code{disp}. #' #' \bold{Parameterization of the population-level intercept} #' #' By default, the population-level intercept (if incorporated) is estimated #' separately and not as part of population-level parameter vector \code{b} As #' a result, priors on the intercept also have to be specified separately. #' Furthermore, to increase sampling efficiency, the population-level design #' matrix \code{X} is centered around its column means \code{X_means} if the #' intercept is incorporated. This leads to a temporary bias in the intercept #' equal to \code{}, where \code{<,>} is the scalar product. The #' bias is corrected after fitting the model, but be aware that you are #' effectively defining a prior on the intercept of the centered design matrix #' not on the real intercept. You can turn off this special handling of the #' intercept by setting argument \code{center} to \code{FALSE}. For more #' details on setting priors on population-level intercepts, see #' \code{\link{set_prior}}. #' #' This behavior can be avoided by using the reserved #' (and internally generated) variable \code{Intercept}. #' Instead of \code{y ~ x}, you may write #' \code{y ~ 0 + Intercept + x}. This way, priors can be #' defined on the real intercept, directly. In addition, #' the intercept is just treated as an ordinary population-level effect #' and thus priors defined on \code{b} will also apply to it. #' Note that this parameterization may be less efficient #' than the default parameterization discussed above. #' #' \bold{Formula syntax for non-linear models} #' #' In \pkg{brms}, it is possible to specify non-linear models #' of arbitrary complexity. #' The non-linear model can just be specified within the \code{formula} #' argument. Suppose, that we want to predict the response \code{y} #' through the predictor \code{x}, where \code{x} is linked to \code{y} #' through \code{y = alpha - beta * lambda^x}, with parameters #' \code{alpha}, \code{beta}, and \code{lambda}. This is certainly a #' non-linear model being defined via #' \code{formula = y ~ alpha - beta * lambda^x} (addition arguments #' can be added in the same way as for ordinary formulas). #' To tell \pkg{brms} that this is a non-linear model, #' we set argument \code{nl} to \code{TRUE}. #' Now we have to specify a model for each of the non-linear parameters. #' Let's say we just want to estimate those three parameters #' with no further covariates or random effects. Then we can pass #' \code{alpha + beta + lambda ~ 1} or equivalently #' (and more flexible) \code{alpha ~ 1, beta ~ 1, lambda ~ 1} #' to the \code{...} argument. #' This can, of course, be extended. If we have another predictor \code{z} and #' observations nested within the grouping factor \code{g}, we may write for #' instance \code{alpha ~ 1, beta ~ 1 + z + (1|g), lambda ~ 1}. #' The formula syntax described above applies here as well. #' In this example, we are using \code{z} and \code{g} only for the #' prediction of \code{beta}, but we might also use them for the other #' non-linear parameters (provided that the resulting model is still #' scientifically reasonable). #' #' By default, non-linear covariates are treated as real vectors in Stan. #' However, if the data of the covariates is of type `integer` in \R (which #' can be enforced by the `as.integer` function), the Stan type will be #' changed to an integer array. That way, covariates can also be used #' for indexing purposes in Stan. #' #' Non-linear models may not be uniquely identified and / or show bad convergence. #' For this reason it is mandatory to specify priors on the non-linear parameters. #' For instructions on how to do that, see \code{\link{set_prior}}. #' For some examples of non-linear models, see \code{vignette("brms_nonlinear")}. #' #' \bold{Formula syntax for predicting distributional parameters} #' #' It is also possible to predict parameters of the response distribution such #' as the residual standard deviation \code{sigma} in gaussian models or the #' hurdle probability \code{hu} in hurdle models. The syntax closely resembles #' that of a non-linear parameter, for instance \code{sigma ~ x + s(z) + #' (1+x|g)}. For some examples of distributional models, see #' \code{vignette("brms_distreg")}. #' #' Parameter \code{mu} exists for every family and can be used as an #' alternative to specifying terms in \code{formula}. If both \code{mu} and #' \code{formula} are given, the right-hand side of \code{formula} is ignored. #' Accordingly, specifying terms on the right-hand side of both \code{formula} #' and \code{mu} at the same time is deprecated. In future versions, #' \code{formula} might be updated by \code{mu}. #' #' The following are #' distributional parameters of specific families (all other parameters are #' treated as non-linear parameters): \code{sigma} (residual standard #' deviation or scale of the \code{gaussian}, \code{student}, #' \code{skew_normal}, \code{lognormal} \code{exgaussian}, and #' \code{asym_laplace} families); \code{shape} (shape parameter of the #' \code{Gamma}, \code{weibull}, \code{negbinomial}, and related zero-inflated #' / hurdle families); \code{nu} (degrees of freedom parameter of the #' \code{student} and \code{frechet} families); \code{phi} (precision #' parameter of the \code{beta} and \code{zero_inflated_beta} families); #' \code{kappa} (precision parameter of the \code{von_mises} family); #' \code{beta} (mean parameter of the exponential component of the #' \code{exgaussian} family); \code{quantile} (quantile parameter of the #' \code{asym_laplace} family); \code{zi} (zero-inflation probability); #' \code{hu} (hurdle probability); \code{zoi} (zero-one-inflation #' probability); \code{coi} (conditional one-inflation probability); #' \code{disc} (discrimination) for ordinal models; \code{bs}, \code{ndt}, and #' \code{bias} (boundary separation, non-decision time, and initial bias of #' the \code{wiener} diffusion model). By default, distributional parameters #' are modeled on the log scale if they can be positive only or on the logit #' scale if the can only be within the unit interval. #' #' Alternatively, one may fix distributional parameters to certain values. #' However, this is mainly useful when models become too #' complicated and otherwise have convergence issues. #' We thus suggest to be generally careful when making use of this option. #' The \code{quantile} parameter of the \code{asym_laplace} distribution #' is a good example where it is useful. By fixing \code{quantile}, #' one can perform quantile regression for the specified quantile. #' For instance, \code{quantile = 0.25} allows predicting the 25\%-quantile. #' Furthermore, the \code{bias} parameter in drift-diffusion models, #' is assumed to be \code{0.5} (i.e. no bias) in many applications. #' To achieve this, simply write \code{bias = 0.5}. #' Other possible applications are the Cauchy distribution as a #' special case of the Student-t distribution with #' \code{nu = 1}, or the geometric distribution as a special case of #' the negative binomial distribution with \code{shape = 1}. #' Furthermore, the parameter \code{disc} ('discrimination') in ordinal #' models is fixed to \code{1} by default and not estimated, #' but may be modeled as any other distributional parameter if desired #' (see examples). For reasons of identification, \code{'disc'} #' can only be positive, which is achieved by applying the log-link. #' #' In categorical models, distributional parameters do not have #' fixed names. Instead, they are named after the response categories #' (excluding the first one, which serves as the reference category), #' with the prefix \code{'mu'}. If, for instance, categories are named #' \code{cat1}, \code{cat2}, and \code{cat3}, the distributional parameters #' will be named \code{mucat2} and \code{mucat3}. #' #' Some distributional parameters currently supported by \code{brmsformula} #' have to be positive (a negative standard deviation or precision parameter #' does not make any sense) or are bounded between 0 and 1 (for zero-inflated / #' hurdle probabilities, quantiles, or the initial bias parameter of #' drift-diffusion models). #' However, linear predictors can be positive or negative, and thus the log link #' (for positive parameters) or logit link (for probability parameters) are used #' by default to ensure that distributional parameters are within their valid intervals. #' This implies that, by default, effects for such distributional parameters are #' estimated on the log / logit scale and one has to apply the inverse link #' function to get to the effects on the original scale. #' Alternatively, it is possible to use the identity link to predict parameters #' on their original scale, directly. However, this is much more likely to lead #' to problems in the model fitting, if the parameter actually has a restricted range. #' #' See also \code{\link{brmsfamily}} for an overview of valid link functions. #' #' \bold{Formula syntax for mixture models} #' #' The specification of mixture models closely resembles that #' of non-mixture models. If not specified otherwise (see below), #' all mean parameters of the mixture components are predicted #' using the right-hand side of \code{formula}. All types of predictor #' terms allowed in non-mixture models are allowed in mixture models #' as well. #' #' Distributional parameters of mixture distributions have the same #' name as those of the corresponding ordinary distributions, but with #' a number at the end to indicate the mixture component. For instance, if #' you use family \code{mixture(gaussian, gaussian)}, the distributional #' parameters are \code{sigma1} and \code{sigma2}. #' Distributional parameters of the same class can be fixed to the same value. #' For the above example, we could write \code{sigma2 = "sigma1"} to make #' sure that both components have the same residual standard deviation, #' which is in turn estimated from the data. #' #' In addition, there are two types of special distributional parameters. #' The first are named \code{mu}, that allow for modeling different #' predictors for the mean parameters of different mixture components. #' For instance, if you want to predict the mean of the first component #' using predictor \code{x} and the mean of the second component using #' predictor \code{z}, you can write \code{mu1 ~ x} as well as \code{mu2 ~ z}. #' The second are named \code{theta}, which constitute the mixing #' proportions. If the mixing proportions are fixed to certain values, #' they are internally normalized to form a probability vector. #' If one seeks to predict the mixing proportions, all but #' one of the them has to be predicted, while the remaining one is used #' as the reference category to identify the model. The \code{softmax} #' function is applied on the linear predictor terms to form a #' probability vector. #' #' For more information on mixture models, see #' the documentation of \code{\link{mixture}}. #' #' \bold{Formula syntax for multivariate models} #' #' Multivariate models may be specified using \code{mvbind} notation #' or with help of the \code{\link{mvbf}} function. #' Suppose that \code{y1} and \code{y2} are response variables #' and \code{x} is a predictor. Then \code{mvbind(y1, y2) ~ x} #' specifies a multivariate model. #' The effects of all terms specified at the RHS of the formula #' are assumed to vary across response variables. #' For instance, two parameters will be estimated for \code{x}, #' one for the effect on \code{y1} and another for the effect on \code{y2}. #' This is also true for group-level effects. When writing, for instance, #' \code{mvbind(y1, y2) ~ x + (1+x|g)}, group-level effects will be #' estimated separately for each response. To model these effects #' as correlated across responses, use the ID syntax (see above). #' For the present example, this would look as follows: #' \code{mvbind(y1, y2) ~ x + (1+x|2|g)}. Of course, you could also use #' any value other than \code{2} as ID. #' #' It is also possible to specify different formulas for different responses. #' If, for instance, \code{y1} should be predicted by \code{x} and \code{y2} #' should be predicted by \code{z}, we could write \code{mvbf(y1 ~ x, y2 ~ z)}. #' Alternatively, multiple \code{brmsformula} objects can be added to #' specify a joint multivariate model (see 'Examples'). #' #' @examples #' # multilevel model with smoothing terms #' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2)) #' #' # additionally predict 'sigma' #' brmsformula(y ~ x1*x2 + s(z) + (1+x1|1) + (1|g2), #' sigma ~ x1 + (1|g2)) #' #' # use the shorter alias 'bf' #' (formula1 <- brmsformula(y ~ x + (x|g))) #' (formula2 <- bf(y ~ x + (x|g))) #' # will be TRUE #' identical(formula1, formula2) #' #' # incorporate censoring #' bf(y | cens(censor_variable) ~ predictors) #' #' # define a simple non-linear model #' bf(y ~ a1 - a2^x, a1 + a2 ~ 1, nl = TRUE) #' #' # predict a1 and a2 differently #' bf(y ~ a1 - a2^x, a1 ~ 1, a2 ~ x + (x|g), nl = TRUE) #' #' # correlated group-level effects across parameters #' bf(y ~ a1 - a2^x, a1 ~ 1 + (1 |2| g), a2 ~ x + (x |2| g), nl = TRUE) #' # alternative but equivalent way to specify the above model #' bf(y ~ a1 - a2^x, a1 ~ 1 + (1 | gr(g, id = 2)), #' a2 ~ x + (x | gr(g, id = 2)), nl = TRUE) #' #' # define a multivariate model #' bf(mvbind(y1, y2) ~ x * z + (1|g)) #' #' # define a zero-inflated model #' # also predicting the zero-inflation part #' bf(y ~ x * z + (1+x|ID1|g), zi ~ x + (1|ID1|g)) #' #' # specify a predictor as monotonic #' bf(y ~ mo(x) + more_predictors) #' #' # for ordinal models only #' # specify a predictor as category specific #' bf(y ~ cs(x) + more_predictors) #' # add a category specific group-level intercept #' bf(y ~ cs(x) + (cs(1)|g)) #' # specify parameter 'disc' #' bf(y ~ person + item, disc ~ item) #' #' # specify variables containing measurement error #' bf(y ~ me(x, sdx)) #' #' # specify predictors on all parameters of the wiener diffusion model #' # the main formula models the drift rate 'delta' #' bf(rt | dec(decision) ~ x, bs ~ x, ndt ~ x, bias ~ x) #' #' # fix the bias parameter to 0.5 #' bf(rt | dec(decision) ~ x, bias = 0.5) #' #' # specify different predictors for different mixture components #' mix <- mixture(gaussian, gaussian) #' bf(y ~ 1, mu1 ~ x, mu2 ~ z, family = mix) #' #' # fix both residual standard deviations to the same value #' bf(y ~ x, sigma2 = "sigma1", family = mix) #' #' # use the '+' operator to specify models #' bf(y ~ 1) + #' nlf(sigma ~ a * exp(b * x), a ~ x) + #' lf(b ~ z + (1|g), dpar = "sigma") + #' gaussian() #' #' # specify a multivariate model using the '+' operator #' bf(y1 ~ x + (1|g)) + #' gaussian() + cor_ar(~1|g) + #' bf(y2 ~ z) + poisson() #' #' # specify correlated residuals of a gaussian and a poisson model #' form1 <- bf(y1 ~ 1 + x + (1|c|obs), sigma = 1) + gaussian() #' form2 <- bf(y2 ~ 1 + x + (1|c|obs)) + poisson() #' #' # model missing values in predictors #' bf(bmi ~ age * mi(chl)) + #' bf(chl | mi() ~ age) + #' set_rescor(FALSE) #' #' # model sigma as a function of the mean #' bf(y ~ eta, nl = TRUE) + #' lf(eta ~ 1 + x) + #' nlf(sigma ~ tau * sqrt(eta)) + #' lf(tau ~ 1) #' #' @export brmsformula <- function(formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL, unused = NULL) { if (is.brmsformula(formula)) { out <- formula } else { out <- list(formula = as_formula(formula)) class(out) <- "brmsformula" } # parse and validate dots arguments dots <- c(out$pforms, out$pfix, list(...), flist) dots <- lapply(dots, function(x) if (is.list(x)) x else list(x)) dots <- unlist(dots, recursive = FALSE) forms <- list() for (i in seq_along(dots)) { c(forms) <- validate_par_formula(dots[[i]], par = names(dots)[i]) } is_dupl_pars <- duplicated(names(forms), fromLast = TRUE) if (any(is_dupl_pars)) { dupl_pars <- collapse_comma(names(forms)[is_dupl_pars]) message("Replacing initial definitions of parameters ", dupl_pars) forms[is_dupl_pars] <- NULL } not_form <- ulapply(forms, function(x) !is.formula(x)) fix <- forms[not_form] forms[names(fix)] <- NULL out$pforms <- forms # validate fixed distributional parameters fix_theta <- fix[dpar_class(names(fix)) %in% "theta"] if (length(fix_theta)) { # normalize mixing proportions sum_theta <- sum(unlist(fix_theta)) fix_theta <- lapply(fix_theta, "/", sum_theta) fix[names(fix_theta)] <- fix_theta } out$pfix <- fix for (dp in names(out$pfix)) { if (is.character(out$pfix[[dp]])) { if (identical(dp, out$pfix[[dp]])) { stop2("Equating '", dp, "' with itself is not meaningful.") } ap_class <- dpar_class(dp) if (ap_class == "mu") { stop2("Equating parameters of class 'mu' is not allowed.") } if (!identical(ap_class, dpar_class(out$pfix[[dp]]))) { stop2("Can only equate parameters of the same class.") } if (out$pfix[[dp]] %in% names(out$pfix)) { stop2("Cannot use fixed parameters on ", "the right-hand side of an equation.") } if (out$pfix[[dp]] %in% names(out$pforms)) { stop2("Cannot use predicted parameters on ", "the right-hand side of an equation.") } } } if (!is.null(nl)) { attr(out$formula, "nl") <- as_one_logical(nl) } else if (!is.null(out[["nl"]])) { # for backwards compatibility with brms <= 1.8.0 attr(out$formula, "nl") <- out[["nl"]] out[["nl"]] <- NULL } if (is.null(attr(out$formula, "nl"))) { attr(out$formula, "nl") <- FALSE } if (!is.null(loop)) { attr(out$formula, "loop") <- as_one_logical(loop) } if (is.null(attr(out$formula, "loop"))) { attr(out$formula, "loop") <- TRUE } if (!is.null(center)) { attr(out$formula, "center") <- as_one_logical(center) } if (!is.null(cmc)) { attr(out$formula, "cmc") <- as_one_logical(cmc) } if (!is.null(sparse)) { attr(out$formula, "sparse") <- as_one_logical(sparse) } if (!is.null(decomp)) { attr(out$formula, "decomp") <- match.arg(decomp, decomp_opts()) } if (!is.null(unused)) { attr(out$formula, "unused") <- as.formula(unused) } if (!is.null(autocor)) { attr(out$formula, "autocor") <- validate_autocor(autocor) } else if (!is.null(out$autocor)) { # for backwards compatibility with brms <= 2.11.0 attr(out$formula, "autocor") <- validate_autocor(out$autocor) out$autocor <- NULL } if (!is.null(family)) { out$family <- validate_family(family) } if (!is.null(lhs(formula))) { out$resp <- terms_resp(formula) } # add default values for unspecified elements defs <- list(pforms = list(), pfix = list(), family = NULL, resp = NULL) defs <- defs[setdiff(names(defs), names(rmNULL(out, FALSE)))] out[names(defs)] <- defs class(out) <- c("brmsformula", "bform") split_bf(out) } # alias of brmsformula #' @export bf <- function(formula, ..., flist = NULL, family = NULL, autocor = NULL, nl = NULL, loop = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL) { brmsformula( formula, ..., flist = flist, family = family, autocor = autocor, nl = nl, loop = loop, center = center, cmc = cmc, sparse = sparse, decomp = decomp ) } #' Linear and Non-linear formulas in \pkg{brms} #' #' Helper functions to specify linear and non-linear #' formulas for use with \code{\link[brms:brmsformula]{brmsformula}}. #' #' @name brmsformula-helpers #' @aliases bf-helpers nlf lf set_nl set_rescor #' #' @param formula Non-linear formula for a distributional parameter. #' The name of the distributional parameter can either be specified #' on the left-hand side of \code{formula} or via argument \code{dpar}. #' @param dpar Optional character string specifying the distributional #' parameter to which the formulas passed via \code{...} and #' \code{flist} belong. #' @param resp Optional character string specifying the response #' variable to which the formulas passed via \code{...} and #' \code{flist} belong. Only relevant in multivariate models. #' @param autocor A one sided formula containing autocorrelation #' terms. All none autocorrelation terms in \code{autocor} will #' be silently ignored. #' @param rescor Logical; Indicates if residual correlation between #' the response variables should be modeled. Currently this is only #' possible in multivariate \code{gaussian} and \code{student} models. #' Only relevant in multivariate models. #' @param mecor Logical; Indicates if correlations between latent variables #' defined by \code{\link{me}} terms should be modeled. Defaults to \code{TRUE}. #' @inheritParams brmsformula #' #' @return For \code{lf} and \code{nlf} a \code{list} that can be #' passed to \code{\link[brms:brmsformula]{brmsformula}} or added #' to an existing \code{brmsformula} or \code{mvbrmsformula} object. #' For \code{set_nl} and \code{set_rescor} a logical value that can be #' added to an existing \code{brmsformula} or \code{mvbrmsformula} object. #' #' @seealso \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} #' #' @examples #' # add more formulas to the model #' bf(y ~ 1) + #' nlf(sigma ~ a * exp(b * x)) + #' lf(a ~ x, b ~ z + (1|g)) + #' gaussian() #' #' # specify 'nl' later on #' bf(y ~ a * inv_logit(x * b)) + #' lf(a + b ~ z) + #' set_nl(TRUE) #' #' # specify a multivariate model #' bf(y1 ~ x + (1|g)) + #' bf(y2 ~ z) + #' set_rescor(TRUE) #' #' # add autocorrelation terms #' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(W)) NULL #' @rdname brmsformula-helpers #' @export nlf <- function(formula, ..., flist = NULL, dpar = NULL, resp = NULL, loop = NULL) { formula <- as_formula(formula) if (is.null(lhs(formula))) { stop2("Argument 'formula' must be two-sided.") } if (length(c(list(...), flist))) { warning2( "Arguments '...' and 'flist' in nlf() will be reworked ", "at some point. Please avoid using them if possible." ) } warn_dpar(dpar) if (!is.null(resp)) { resp <- as_one_character(resp) } if (!is.null(loop)) { attr(formula, "loop") <- as_one_logical(loop) } if (is.null(attr(formula, "loop"))) { attr(formula, "loop") <- TRUE } out <- c( list(structure(formula, nl = TRUE)), lf(..., flist = flist) ) structure(out, resp = resp) } #' @rdname brmsformula-helpers #' @export lf <- function(..., flist = NULL, dpar = NULL, resp = NULL, center = NULL, cmc = NULL, sparse = NULL, decomp = NULL) { out <- c(list(...), flist) warn_dpar(dpar) if (!is.null(resp)) { resp <- as_one_character(resp) } cmc <- if (!is.null(cmc)) as_one_logical(cmc) center <- if (!is.null(center)) as_one_logical(center) decomp <- if (!is.null(decomp)) match.arg(decomp, decomp_opts()) for (i in seq_along(out)) { if (!is.null(cmc)) { attr(out[[i]], "cmc") <- cmc } if (!is.null(center)) { attr(out[[i]], "center") <- center } if (!is.null(sparse)) { attr(out[[i]], "sparse") <- sparse } if (!is.null(decomp)) { attr(out[[i]], "decomp") <- decomp } } structure(out, resp = resp) } #' @rdname brmsformula-helpers #' @export acformula <- function(autocor, resp = NULL) { autocor <- terms_ac(as.formula(autocor)) if (!is.formula(autocor)) { stop2("'autocor' must contain at least one autocorrelation term.") } if (!is.null(resp)) { resp <- as_one_character(resp) } structure(autocor, resp = resp, class = c("acformula", "formula")) } #' @rdname brmsformula-helpers #' @export set_nl <- function(nl = TRUE, dpar = NULL, resp = NULL) { nl <- as_one_logical(nl) if (!is.null(dpar)) { dpar <- as_one_character(dpar) } if (!is.null(resp)) { resp <- as_one_character(resp) } structure(nl, dpar = dpar, resp = resp, class = "setnl") } #' Set up a multivariate model formula for use in \pkg{brms} #' #' Set up a multivariate model formula for use in the \pkg{brms} package #' allowing to define (potentially non-linear) additive multilevel #' models for all parameters of the assumed response distributions. #' #' @aliases mvbf #' #' @param ... Objects of class \code{formula} or \code{brmsformula}, #' each specifying a univariate model. See \code{\link{brmsformula}} #' for details on how to specify univariate models. #' @param flist Optional list of formulas, which are treated in the #' same way as formulas passed via the \code{...} argument. #' @param rescor Logical; Indicates if residual correlation between #' the response variables should be modeled. Currently, this is only #' possible in multivariate \code{gaussian} and \code{student} models. #' If \code{NULL} (the default), \code{rescor} is internally set to #' \code{TRUE} when possible. #' #' @return An object of class \code{mvbrmsformula}, which #' is essentially a \code{list} containing all model formulas #' as well as some additional information for multivariate models. #' #' @details See \code{vignette("brms_multivariate")} for a case study. #' #' @seealso \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} #' #' @examples #' bf1 <- bf(y1 ~ x + (1|g)) #' bf2 <- bf(y2 ~ s(z)) #' mvbf(bf1, bf2) #' #' @export mvbrmsformula <- function(..., flist = NULL, rescor = NULL) { dots <- c(list(...), flist) if (!length(dots)) { stop2("No objects passed to 'mvbrmsformula'.") } forms <- list() for (i in seq_along(dots)) { if (is.mvbrmsformula(dots[[i]])) { forms <- c(forms, dots[[i]]$forms) if (is.null(rescor)) { rescor <- dots[[i]]$rescor } } else { forms <- c(forms, list(bf(dots[[i]]))) } } if (!is.null(rescor)) { rescor <- as_one_logical(rescor) } responses <- ulapply(forms, "[[", "resp") if (any(duplicated(responses))) { stop2("Cannot use the same response variable twice in the same model.") } names(forms) <- responses structure( nlist(forms, responses, rescor), class = c("mvbrmsformula", "bform") ) } #' @export mvbf <- function(..., flist = NULL, rescor = NULL) { mvbrmsformula(..., flist = flist, rescor = rescor) } # build a mvbrmsformula object based on a brmsformula object # which uses mvbind on the left-hand side to specify MV models split_bf <- function(x) { stopifnot(is.brmsformula(x)) resp <- terms_resp(x$formula, check_names = FALSE) str_adform <- formula2str(x$formula) str_adform <- get_matches("\\|[^~]*(?=~)", str_adform, perl = TRUE) if (length(resp) > 1L) { # mvbind syntax used to specify MV model flist <- named_list(resp) for (i in seq_along(resp)) { flist[[i]] <- x str_lhs <- paste0(resp[[i]], str_adform) flist[[i]]$formula[[2]] <- parse(text = str_lhs)[[1]] flist[[i]]$resp <- resp[[i]] } x <- mvbf(flist = flist) } x } #' Bind response variables in multivariate models #' #' Can be used to specify a multivariate \pkg{brms} model within a single #' formula. Outside of \code{\link{brmsformula}}, it just behaves like #' \code{\link{cbind}}. #' #' @param ... Same as in \code{\link{cbind}} #' #' @seealso \code{\link{brmsformula}}, \code{\link{mvbrmsformula}} #' #' @examples #' bf(mvbind(y1, y2) ~ x) #' #' @export mvbind <- function(...) { cbind(...) } #' @rdname brmsformula-helpers #' @export set_rescor <- function(rescor = TRUE) { structure(as_one_logical(rescor), class = "setrescor") } allow_rescor <- function(x) { # indicate if estimating 'rescor' is allowed for this model if (!(is.mvbrmsformula(x) || is.mvbrmsterms(x))) { return(FALSE) } parts <- if (is.mvbrmsformula(x)) x$forms else x$terms families <- lapply(parts, "[[", "family") has_rescor <- ulapply(families, has_rescor) is_mixture <- ulapply(families, is.mixfamily) family_names <- ulapply(families, "[[", "family") all(has_rescor) && !any(is_mixture) && length(unique(family_names)) == 1L } #' @rdname brmsformula-helpers #' @export set_mecor <- function(mecor = TRUE) { structure(as_one_logical(mecor), class = "setmecor") } #' @export "+.bform" <- function(e1, e2) { if (is.brmsformula(e1)) { out <- plus_brmsformula(e1, e2) } else if (is.mvbrmsformula(e1)) { out <- plus_mvbrmsformula(e1, e2) } else { stop2("Method '+.bform' not implemented for ", class(e1), " objects.") } out } # internal helper function of '+.bform' plus_brmsformula <- function(e1, e2) { if (is.function(e2)) { e2 <- try(e2(), silent = TRUE) if (!is.family(e2)) { stop2("Don't know how to handle non-family functions.") } } if (is.family(e2)) { e1 <- bf(e1, family = e2) } else if (is.cor_brms(e2) || inherits(e2, "acformula")) { e1 <- bf(e1, autocor = e2) } else if (inherits(e2, "setnl")) { dpar <- attr(e2, "dpar") if (is.null(dpar)) { e1 <- bf(e1, nl = e2) } else { if (is.null(e1$pforms[[dpar]])) { stop2("Parameter '", dpar, "' has no formula.") } attr(e1$pforms[[dpar]], "nl") <- e2 e1 <- bf(e1) } } else if (inherits(e2, "setmecor")) { e1$mecor <- e2[1] } else if (is.brmsformula(e2)) { e1 <- mvbf(e1, e2) } else if (inherits(e2, "setrescor")) { stop2("Setting 'rescor' is only possible in multivariate models.") } else if (is.ac_term(e2)) { stop2("Autocorrelation terms can only be specified on the right-hand ", "side of a formula, not added to a 'brmsformula' object.") } else if (!is.null(e2)) { e1 <- bf(e1, e2) } e1 } # internal helper function of '+.bform' plus_mvbrmsformula <- function(e1, e2) { if (is.function(e2)) { e2 <- try(e2(), silent = TRUE) if (!is.family(e2)) { stop2("Don't know how to handle non-family functions.") } } if (is.family(e2) || is.cor_brms(e2)) { e1$forms <- lapply(e1$forms, "+", e2) } else if (inherits(e2, "setrescor")) { e1$rescor <- e2[1] } else if (inherits(e2, "setmecor")) { e1$mecor <- e2[1] } else if (is.brmsformula(e2)) { e1 <- mvbf(e1, e2) } else if (is.ac_term(e2)) { stop2("Autocorrelation terms can only be specified on the right-hand ", "side of a formula, not added to a 'mvbrmsformula' object.") } else if (!is.null(e2)) { resp <- attr(e2, "resp", TRUE) if (is.null(resp)) { stop2( "Don't know how to add a ", class(e2), " object ", "without the response variable name. ", "See help('brmsformula-helpers') for more details." ) } if (!isTRUE(resp %in% e1$responses)) { stop2("'resp' should be one of ", collapse_comma(e1$responses), ".") } e1$forms[[resp]] <- e1$forms[[resp]] + e2 } e1 } # extract the 'nl' attribute from a brmsformula object # @param x object to extract 'nl' from # @param dpar optional name of a distributional parameter # for which 'nl' should be extracted # @param resp: optional name of a response variable # for which 'nl' should be extracted # @param aol: (as one logical) apply isTRUE to the result? get_nl <- function(x, dpar = NULL, resp = NULL, aol = TRUE) { if (is.mvbrmsformula(x)) { resp <- as_one_character(resp) x <- x$forms[[resp]] } if (is.brmsformula(x)) { if (is.null(dpar)) { x <- x$formula } else { dpar <- as_one_character(dpar) x <- x$pforms[[dpar]] } } nl <- attr(x, "nl", TRUE) if (aol) { nl <- isTRUE(nl) } nl } # available options for the 'decomp' argument decomp_opts <- function() { c("none", "QR") } # validate a formula of an additional parameter # @param formula an formula object # @param par optional name of the parameter; if not specified # the parameter name will be inferred from the formula # @param rsv_pars optional character vector of reserved parameter names # @return a named list of length one containing the formula validate_par_formula <- function(formula, par = NULL, rsv_pars = NULL) { stopifnot(length(par) <= 1L) try_formula <- try(as_formula(formula), silent = TRUE) if (is(try_formula, "try-error")) { if (length(formula) != 1L) { stop2("Expecting a single value when fixing parameter '", par, "'.") } scalar <- SW(as.numeric(formula)) if (!is.na(scalar)) { formula <- scalar } else { formula <- as.character(formula) } out <- named_list(par, formula) } else { formula <- try_formula if (!is.null(lhs(formula))) { resp_pars <- all.vars(formula[[2]]) out <- named_list(resp_pars, list(formula)) for (i in seq_along(out)) { out[[i]][[2]] <- eval2(paste("quote(", resp_pars[i], ")")) } } else { if (!isTRUE(nzchar(par))) { stop2("Additional formulas must be named.") } formula <- formula(paste(par, formula2str(formula))) out <- named_list(par, list(formula)) } } pars <- names(out) if (any(grepl("\\.|_", pars))) { stop2("Parameter names should not contain dots or underscores.") } inv_pars <- intersect(pars, rsv_pars) if (length(inv_pars)) { stop2("The following parameter names are reserved", "for this model:\n", collapse_comma(inv_pars)) } out } # validate formulas dedicated to response variables # @param x coerced to a formula object # @param empty_ok is an empty left-hand-side ok? # @return a formula of the form ~ 1 validate_resp_formula <- function(x, empty_ok = TRUE) { out <- lhs(as_formula(x)) if (is.null(out)) { if (empty_ok) { out <- ~ 1 } else { str_x <- formula2str(x, space = "trim") stop2("Response variable is missing in formula ", str_x) } } out <- gsub("\\|+[^~]*~", "~", formula2str(out)) out <- try(formula(out), silent = TRUE) if (is(out, "try-error")) { str_x <- formula2str(x, space = "trim") stop2("Incorrect use of '|' on the left-hand side of ", str_x) } environment(out) <- environment(x) out } # incorporate additional arguments into the model formula validate_formula <- function(formula, ...) { UseMethod("validate_formula") } #' @export validate_formula.default <- function(formula, ...) { validate_formula(bf(formula), ...) } # incorporate additional arguments into the model formula # @param formula object of class 'formula' of 'brmsformula' # @param data optional data.frame to validate data related arguments # @param family optional 'family' object # @param autocor (deprecated) optional 'cor_brms' object # @param threshold (deprecated) threshold type for ordinal models # @param cov_ranef (deprecated) named list of group covariance matrices # @return a brmsformula object compatible with the current version of brms #' @export validate_formula.brmsformula <- function( formula, family = gaussian(), autocor = NULL, data = NULL, threshold = NULL, sparse = NULL, cov_ranef = NULL, ... ) { out <- bf(formula) if (is.null(out$family) && !is.null(family)) { out$family <- validate_family(family) } # allow the '.' symbol in the formulas out$formula <- expand_dot_formula(out$formula, data) for (i in seq_along(out$pforms)) { out$pforms[[i]] <- expand_dot_formula(out$pforms[[i]], data) } # allow 'me' terms to be correlated out$mecor <- default_mecor(out$mecor) if (has_cat(out) && !is.null(data)) { # for easy access of response categories # allow to update 'cats' with new data out$family$cats <- extract_cat_names(out, data) } if (is_ordinal(out$family)) { # thresholds and category names are data dependent try_terms <- try(stats::terms(out$formula), silent = TRUE) intercept <- attr(try_terms, "intercept", TRUE) if (!is(try_terms, "try-error") && isTRUE(intercept == 0)) { stop2("Cannot remove the intercept in an ordinal model.") } if (!is.null(data)) { # for easy access of thresholds and response categories (#838) # allow to update 'cats' and 'thres' with new data out$family$thres <- extract_thres_names(out, data) out$family$cats <- extract_cat_names(out, data) } if (is.mixfamily(out$family)) { # every mixture family needs to know about response categories for (i in seq_along(out$family$mix)) { out$family$mix[[i]]$thres <- out$family$thres } } } conv_cats_dpars <- conv_cats_dpars(out$family) if (conv_cats_dpars && !is.null(data)) { # allow to update 'dpars' with new data # define distributional parameters based on response categories if (length(out$family$cats) < 2L) { stop2("At least 2 response categories are required.") } if (is.null(out$family$refcat)) { # the first level serves as the reference category out$family$refcat <- out$family$cats[1] } if (isNA(out$family$refcat)) { # implies predicting all categories predcats <- out$family$cats } else { if (!out$family$refcat %in% out$family$cats) { stop2("The reference response category must be one of ", collapse_comma(out$family$cats), ".") } predcats <- setdiff(out$family$cats, out$family$refcat) } mu_dpars <- make_stan_names(paste0("mu", predcats)) if (any(duplicated(mu_dpars))) { stop2("Invalid response category names. Please avoid ", "using any special characters in the names.") } old_mu_dpars <- str_subset(out$family$dpars, "^mu") out$family$dpars <- setdiff(out$family$dpars, old_mu_dpars) out$family$dpars <- union(mu_dpars, out$family$dpars) } # incorporate deprecated arguments require_threshold <- is_ordinal(out$family) && is.null(out$family$threshold) if (require_threshold && !is.null(threshold)) { # slot 'threshold' is deprecated as of brms 1.7.0 out$family <- validate_family(out$family, threshold = threshold) } if (!is.null(sparse)) { # a global 'sparse' argument is deprecated as of brms 2.8.3 warning2( "Argument 'sparse' should be specified within the ", "'formula' argument. See ?brmsformula for help." ) sparse <- as_one_logical(sparse) if (is.null(attr(out$formula, "sparse"))) { attr(out$formula, "sparse") <- sparse } for (i in seq_along(out$pforms)) { if (is.null(attr(out$pforms[[i]], "sparse"))) { attr(out$pforms[[i]], "sparse") <- sparse } } } if (is.null(attr(out$formula, "autocor")) && !is.null(autocor)) { # 'autocor' interface has been changed in brms 2.11.1 warning2( "Argument 'autocor' should be specified within the ", "'formula' argument. See ?brmsformula for help." ) # store 'autocor' as an attribute to carry it around more easily attr(out$formula, "autocor") <- validate_autocor(autocor) } if (!is.null(cov_ranef)) { # 'cov_ranef' is deprecated as of brms 2.12.5 out$cov_ranef <- validate_cov_ranef(cov_ranef) } bf(out) } # incorporate additional arguments into MV model formulas # allow passing lists of families or autocors #' @export validate_formula.mvbrmsformula <- function( formula, family = NULL, autocor = NULL, cov_ranef = NULL, ... ) { nresp <- length(formula$forms) if (!is(family, "list")) { family <- replicate(nresp, family, simplify = FALSE) } else if (length(family) != nresp) { stop2("If 'family' is a list, it has to be of the same ", "length as the number of response variables.") } if (!is(autocor, "list")) { autocor <- replicate(nresp, autocor, simplify = FALSE) } else if (length(autocor) != nresp) { stop2("If 'autocor' is a list, it has to be of the same ", "length as the number of response variables.") } for (i in seq_len(nresp)) { formula$forms[[i]] <- validate_formula( formula$forms[[i]], family = family[[i]], autocor = autocor[[i]], ... ) } if (length(formula$forms) < 2L) { stop2("Multivariate models require at least two responses.") } allow_rescor <- allow_rescor(formula) if (is.null(formula$rescor)) { # with 'mi' terms we usually don't want rescor to be estimated miforms <- ulapply(formula$forms, function(f) terms_ad(f$formula, f$family, FALSE)[["mi"]] ) formula$rescor <- allow_rescor && !length(miforms) message("Setting 'rescor' to ", formula$rescor, " by default for this model") if (formula$rescor) { warning2( "In the future, 'rescor' will be set to FALSE by default for ", "all models. It is thus recommended to explicitely set ", "'rescor' via 'set_rescor' instead of using the default." ) } } formula$rescor <- as_one_logical(formula$rescor) if (formula$rescor) { if (!allow_rescor) { stop2("Currently, estimating 'rescor' is only possible ", "in multivariate gaussian or student models.") } } # handle default of correlations between 'me' terms formula$mecor <- default_mecor(formula$mecor) for (i in seq_along(formula$forms)) { formula$forms[[i]]$mecor <- formula$mecor } # incorporate deprecated arguments if (!is.null(cov_ranef)) { # 'cov_ranef' is deprecated as of brms 2.12.5 formula$cov_ranef <- validate_cov_ranef(cov_ranef) } formula } # update a brmsformula and / or its attributes # @param brmsformula object # @param formula.: formula to update 'object' # @param mode supports the following options: # "update": apply update.formula # "replace": replace old formula # "keep": keep old formula # attributes are always updated # @param ... currently unused # @return a brmsformula object #' @export update.brmsformula <- function(object, formula., mode = c("update", "replace", "keep"), ...) { mode <- match.arg(mode) object <- bf(object) up_nl <- get_nl(formula., aol = FALSE) if (is.null(up_nl)) { up_nl <- get_nl(object) } # already use up_nl here to avoid ordinary parsing of NL formulas formula. <- bf(formula., nl = up_nl) up_family <- formula.[["family"]] if (is.null(up_family)) { up_family <- object[["family"]] } up_autocor <- attr(formula.$formula, "autocor") if (is.null(up_autocor)) { up_autocor <- attr(object$formula, "autocor") } old_form <- object$formula up_form <- formula.$formula if (mode == "update") { new_form <- update(old_form, up_form, ...) } else if (mode == "replace") { new_form <- up_form } else if (mode == "keep") { new_form <- old_form } flist <- c(object$pforms, object$pfix, formula.$pforms, formula.$pfix) bf(new_form, flist = flist, family = up_family, autocor = up_autocor, nl = up_nl) } #' @export update.mvbrmsformula <- function(object, formula., ...) { # temporary until proper updating is implemented if (!missing(formula.)) { stop2("Updating formulas of multivariate models is not yet possible.") } object } #' Update Formula Addition Terms #' #' Update additions terms used in formulas of \pkg{brms}. See #' \code{\link{addition-terms}} for details. #' #' @param formula Two-sided formula to be updated. #' @param adform One-sided formula containing addition terms to update #' \code{formula} with. #' @param action Indicates what should happen to the existing addition terms in #' \code{formula}. If \code{"update"} (the default), old addition terms that #' have no corresponding term in \code{adform} will be kept. If #' \code{"replace"}, all old addition terms will be removed. #' #' @return An object of class \code{formula}. #' #' @examples #' form <- y | trials(size) ~ x #' update_adterms(form, ~ trials(10)) #' update_adterms(form, ~ weights(w)) #' update_adterms(form, ~ weights(w), action = "replace") #' update_adterms(y ~ x, ~ trials(10)) #' #' @export update_adterms <- function(formula, adform, action = c("update", "replace")) { formula <- as_formula(formula) adform <- as_formula(adform) action <- match.arg(action) if (is.null(lhs(formula))) { stop2("Can't update a ond-sided formula.") } str_formula <- formula2str(formula) old_ad <- get_matches("(?<=\\|)[^~]*(?=~)", str_formula, perl = TRUE) new_ad_terms <- attr(terms(adform), "term.labels") if (action == "update" && length(old_ad)) { # extract adterms from the original formula old_ad <- formula(paste("~", old_ad)) old_ad_terms <- attr(terms(old_ad), "term.labels") old_adnames <- get_matches("^[^\\(]+", old_ad_terms) old_adnames <- sub("^resp_", "", old_adnames) new_adnames <- get_matches("^[^\\(]+", new_ad_terms) new_adnames <- sub("^resp_", "", new_adnames) # keep unmatched adterms of the original formula keep <- !old_adnames %in% new_adnames new_ad_terms <- c(old_ad_terms[keep], new_ad_terms) } if (length(new_ad_terms)) { new_ad_terms <- paste(new_ad_terms, collapse = "+") new_ad_terms <- paste("|", new_ad_terms) } resp <- gsub("\\|.+", "", deparse_combine(formula[[2]])) out <- formula(paste(resp, new_ad_terms, "~1")) out[[3]] <- formula[[3]] attributes(out) <- attributes(formula) out } #' @export print.brmsformula <- function(x, wsp = 0, digits = 2, ...) { cat(formula2str(x$formula, space = "trim"), "\n") str_wsp <- collapse(rep(" ", wsp)) autocor <- attr(x$formula, "autocor") if (!is.null(autocor)) { autocor <- formula2str(autocor, rm = 1, space = "trim") cat(paste0(str_wsp, "autocor ~ ", autocor, "\n")) } pforms <- x$pforms if (length(pforms)) { pforms <- ulapply(pforms, formula2str, space = "trim") cat(collapse(str_wsp, pforms, "\n")) } pfix <- x$pfix if (length(pfix)) { pfix <- lapply(pfix, function(x) ifelse(is.numeric(x), round(x, digits), x) ) pfix <- paste0(names(pfix), " = ", unlist(pfix)) cat(collapse(str_wsp, pfix, "\n")) } invisible(x) } #' @export print.mvbrmsformula <- function(x, wsp = 0, ...) { for (i in seq_along(x$forms)) { if (i > 1) cat(collapse(rep(" ", wsp))) print(x$forms[[i]], wsp = wsp, ...) } invisible(x) } #' Checks if argument is a \code{brmsformula} object #' #' @param x An \R object #' #' @export is.brmsformula <- function(x) { inherits(x, "brmsformula") } #' Checks if argument is a \code{mvbrmsformula} object #' #' @param x An \R object #' #' @export is.mvbrmsformula <- function(x) { inherits(x, "mvbrmsformula") } is_nonlinear <- function(x) { stopifnot(is.brmsfit(x)) get_nl(bf(x$formula)) } warn_dpar <- function(dpar) { # argument 'dpar' in formula helper functions is deprecated as of 2.3.7 if (!is.null(dpar)) { warning2("Argument 'dpar' is no longer necessary and ignored.") } NULL } # return the right-hand side of a formula rhs <- function(x) { attri <- attributes(x) x <- as.formula(x) x <- if (length(x) == 3) x[-2] else x do_call(structure, c(list(x), attri)) } # return the left-hand side of a formula lhs <- function(x) { x <- as.formula(x) if (length(x) == 3L) update(x, . ~ 1) else NULL } # convert a string to a formula # @param x vector of strings to be converted # @param ... passed to formula() str2formula <- function(x, ..., collapse = "+") { has_chars <- nzchar(x) if (length(x) && any(has_chars)) { out <- paste(x[has_chars], collapse = collapse) } else { out <- "1" } out <- formula(paste("~", out), ...) environment(out) <- parent.frame() out } # convert a formula to a character string # @param formula a model formula # @param rm a vector of to elements indicating how many characters # should be removed at the beginning and end of the string respectively # @param space how should whitespaces be treated? # option 'rm' is dangerous as it may combine different operators (#1142) # @return a single character string or NULL formula2str <- function(formula, rm = c(0, 0), space = c("trim", "rm")) { if (is.null(formula)) { return(NULL) } formula <- as.formula(formula) space <- match.arg(space) if (anyNA(rm[2])) rm[2] <- 0 x <- Reduce(paste, deparse(formula)) x <- gsub("[\t\r\n]+", " ", x, perl = TRUE) if (space == "trim") { x <- trim_wsp(x) } else { x <- rm_wsp(x) } substr(x, 1 + rm[1], nchar(x) - rm[2]) } # right-hand side of a formula as a character string str_rhs <- function(x) { formula2str(rhs(x), rm = c(1, 0)) } # left-hand side of a formula as a character string str_lhs <- function(x) { formula2str(lhs(x), rm = c(0, 2)) } is.formula <- function(x) { inherits(x, "formula") } # wrapper around as.formula with additional checks as_formula <- function(x) { x <- as.formula(x) # fixes issue #749 rhs <- rhs(x)[[2]] if (isTRUE(is.call(rhs) && rhs[[1]] == "~")) { stop2("Nested formulas are not allowed. Did you use '~~' somewhere?") } x } # expand the '.' variable in formula using stats::terms expand_dot_formula <- function(formula, data = NULL) { if (isTRUE("." %in% all.vars(formula))) { att <- attributes(formula) try_terms <- try( stats::terms(formula, data = data), silent = TRUE ) if (!is(try_terms, "try-error")) { formula <- formula(try_terms) } attributes(formula) <- att } formula } brms/R/kfold.R0000644000175000017500000003334714136566245013020 0ustar nileshnilesh#' K-Fold Cross-Validation #' #' Perform exact K-fold cross-validation by refitting the model \eqn{K} #' times each leaving out one-\eqn{K}th of the original data. #' Folds can be run in parallel using the \pkg{future} package. #' #' @aliases kfold #' #' @inheritParams loo.brmsfit #' @param K The number of subsets of equal (if possible) size #' into which the data will be partitioned for performing #' \eqn{K}-fold cross-validation. The model is refit \code{K} times, each time #' leaving out one of the \code{K} subsets. If \code{K} is equal to the total #' number of observations in the data then \eqn{K}-fold cross-validation is #' equivalent to exact leave-one-out cross-validation. #' @param Ksub Optional number of subsets (of those subsets defined by \code{K}) #' to be evaluated. If \code{NULL} (the default), \eqn{K}-fold cross-validation #' will be performed on all subsets. If \code{Ksub} is a single integer, #' \code{Ksub} subsets (out of all \code{K}) subsets will be randomly chosen. #' If \code{Ksub} consists of multiple integers or a one-dimensional array #' (created via \code{as.array}) potentially of length one, the corresponding #' subsets will be used. This argument is primarily useful, if evaluation of #' all subsets is infeasible for some reason. #' @param folds Determines how the subsets are being constructed. #' Possible values are \code{NULL} (the default), \code{"stratified"}, #' \code{"grouped"}, or \code{"loo"}. May also be a vector of length #' equal to the number of observations in the data. Alters the way #' \code{group} is handled. More information is provided in the 'Details' #' section. #' @param group Optional name of a grouping variable or factor in the model. #' What exactly is done with this variable depends on argument \code{folds}. #' More information is provided in the 'Details' section. #' @param exact_loo Deprecated! Please use \code{folds = "loo"} instead. #' @param save_fits If \code{TRUE}, a component \code{fits} is added to #' the returned object to store the cross-validated \code{brmsfit} #' objects and the indices of the omitted observations for each fold. #' Defaults to \code{FALSE}. #' #' @return \code{kfold} returns an object that has a similar structure as the #' objects returned by the \code{loo} and \code{waic} methods and #' can be used with the same post-processing functions. #' #' @details The \code{kfold} function performs exact \eqn{K}-fold #' cross-validation. First the data are partitioned into \eqn{K} folds #' (i.e. subsets) of equal (or as close to equal as possible) size by default. #' Then the model is refit \eqn{K} times, each time leaving out one of the #' \code{K} subsets. If \eqn{K} is equal to the total number of observations #' in the data then \eqn{K}-fold cross-validation is equivalent to exact #' leave-one-out cross-validation (to which \code{loo} is an efficient #' approximation). The \code{compare_ic} function is also compatible with #' the objects returned by \code{kfold}. #' #' The subsets can be constructed in multiple different ways: #' \itemize{ #' \item If both \code{folds} and \code{group} are \code{NULL}, the subsets #' are randomly chosen so that they have equal (or as close to equal as #' possible) size. #' \item If \code{folds} is \code{NULL} but \code{group} is specified, the #' data is split up into subsets, each time omitting all observations of one #' of the factor levels, while ignoring argument \code{K}. #' \item If \code{folds = "stratified"} the subsets are stratified after #' \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_stratified}}. #' \item If \code{folds = "grouped"} the subsets are split by #' \code{group} using \code{\link[loo:kfold-helpers]{loo::kfold_split_grouped}}. #' \item If \code{folds = "loo"} exact leave-one-out cross-validation #' will be performed and \code{K} will be ignored. Further, if \code{group} #' is specified, all observations corresponding to the factor level of the #' currently predicted single value are omitted. Thus, in this case, the #' predicted values are only a subset of the omitted ones. #' \item If \code{folds} is a numeric vector, it must contain one element per #' observation in the data. Each element of the vector is an integer in #' \code{1:K} indicating to which of the \code{K} folds the corresponding #' observation belongs. There are some convenience functions available in #' the \pkg{loo} package that create integer vectors to use for this purpose #' (see the Examples section below and also the #' \link[loo:kfold-helpers]{kfold-helpers} page). #' } #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson()) #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' # perform 10-fold cross validation #' (kfold1 <- kfold(fit1, chains = 1)) #' #' # use the future package for parallelization #' library(future) #' plan(multiprocess) #' kfold(fit1, chains = 1) #' } #' #' @seealso \code{\link{loo}}, \code{\link{reloo}} #' #' @importFrom loo kfold #' @export kfold #' @export kfold.brmsfit <- function(x, ..., K = 10, Ksub = NULL, folds = NULL, group = NULL, exact_loo = NULL, compare = TRUE, resp = NULL, model_names = NULL, save_fits = FALSE) { args <- split_dots(x, ..., model_names = model_names) use_stored <- ulapply(args$models, function(x) is_equal(x$kfold$K, K)) if (!is.null(exact_loo) && as_one_logical(exact_loo)) { warning2("'exact_loo' is deprecated. Please use folds = 'loo' instead.") folds <- "loo" } c(args) <- nlist( criterion = "kfold", K, Ksub, folds, group, compare, resp, save_fits, use_stored ) do_call(compute_loolist, args) } # helper function to perform k-fold cross-validation # @inheritParams kfold.brmsfit # @param model_name ignored but included to avoid being passed to '...' .kfold <- function(x, K, Ksub, folds, group, save_fits, newdata, resp, model_name, newdata2 = NULL, ...) { stopifnot(is.brmsfit(x)) if (is.brmsfit_multiple(x)) { warn_brmsfit_multiple(x) class(x) <- "brmsfit" } if (is.null(newdata)) { newdata <- x$data } else { newdata <- as.data.frame(newdata) } if (is.null(newdata2)) { newdata2 <- x$data2 } else { bterms <- brmsterms(x$formula) newdata2 <- validate_data2(newdata2, bterms) } N <- nrow(newdata) # validate argument 'group' if (!is.null(group)) { valid_groups <- get_cat_vars(x) if (length(group) != 1L || !group %in% valid_groups) { stop2("Group '", group, "' is not a valid grouping factor. ", "Valid groups are: \n", collapse_comma(valid_groups)) } gvar <- factor(get(group, newdata)) } # validate argument 'folds' if (is.null(folds)) { if (is.null(group)) { fold_type <- "random" folds <- loo::kfold_split_random(K, N) } else { fold_type <- "group" folds <- as.numeric(gvar) K <- length(levels(gvar)) message("Setting 'K' to the number of levels of '", group, "' (", K, ")") } } else if (is.character(folds) && length(folds) == 1L) { opts <- c("loo", "stratified", "grouped") fold_type <- match.arg(folds, opts) req_group_opts <- c("stratified", "grouped") if (fold_type %in% req_group_opts && is.null(group)) { stop2("Argument 'group' is required for fold type '", fold_type, "'.") } if (fold_type == "loo") { folds <- seq_len(N) K <- N message("Setting 'K' to the number of observations (", K, ")") } else if (fold_type == "stratified") { folds <- loo::kfold_split_stratified(K, gvar) } else if (fold_type == "grouped") { folds <- loo::kfold_split_grouped(K, gvar) } } else { fold_type <- "custom" folds <- as.numeric(factor(folds)) if (length(folds) != N) { stop2("If 'folds' is a vector, it must be of length N.") } K <- max(folds) message("Setting 'K' to the number of folds (", K, ")") } # validate argument 'Ksub' if (is.null(Ksub)) { Ksub <- seq_len(K) } else { # see issue #441 for reasons to check for arrays is_array_Ksub <- is.array(Ksub) Ksub <- as.integer(Ksub) if (any(Ksub <= 0 | Ksub > K)) { stop2("'Ksub' must contain positive integers not larger than 'K'.") } if (length(Ksub) == 1L && !is_array_Ksub) { Ksub <- sample(seq_len(K), Ksub) } else { Ksub <- unique(Ksub) } Ksub <- sort(Ksub) } # split dots for use in log_lik and update dots <- list(...) ll_arg_names <- arg_names("log_lik") ll_args <- dots[intersect(names(dots), ll_arg_names)] ll_args$allow_new_levels <- TRUE ll_args$resp <- resp ll_args$combine <- TRUE up_args <- dots[setdiff(names(dots), ll_arg_names)] up_args$refresh <- 0 # function to be run inside future::future .kfold_k <- function(k) { if (fold_type == "loo" && !is.null(group)) { omitted <- which(folds == folds[k]) predicted <- k } else { omitted <- predicted <- which(folds == k) } newdata_omitted <- newdata[-omitted, , drop = FALSE] fit <- x up_args$object <- fit up_args$newdata <- newdata_omitted up_args$data2 <- subset_data2(newdata2, -omitted) fit <- SW(do_call(update, up_args)) ll_args$object <- fit ll_args$newdata <- newdata[predicted, , drop = FALSE] ll_args$newdata2 <- subset_data2(newdata2, predicted) lppds <- do_call(log_lik, ll_args) out <- nlist(lppds, omitted, predicted) if (save_fits) out$fit <- fit return(out) } futures <- vector("list", length(Ksub)) lppds <- obs_order <- vector("list", length(Ksub)) if (save_fits) { fits <- array(list(), dim = c(length(Ksub), 3)) dimnames(fits) <- list(NULL, c("fit", "omitted", "predicted")) } x <- recompile_model(x) for (k in Ksub) { ks <- match(k, Ksub) message("Fitting model ", k, " out of ", K) futures[[ks]] <- future::future( .kfold_k(k), packages = "brms", seed = TRUE ) } for (k in Ksub) { ks <- match(k, Ksub) tmp <- future::value(futures[[ks]]) if (save_fits) { fits[ks, ] <- tmp[c("fit", "omitted", "predicted")] } obs_order[[ks]] <- tmp$predicted lppds[[ks]] <- tmp$lppds } lppds <- do_call(cbind, lppds) elpds <- apply(lppds, 2, log_mean_exp) # make sure elpds are put back in the right order obs_order <- unlist(obs_order) elpds <- elpds[order(obs_order)] # compute effective number of parameters ll_args$object <- x ll_args$newdata <- newdata ll_args$newdata2 <- newdata2 ll_full <- do_call(log_lik, ll_args) lpds <- apply(ll_full, 2, log_mean_exp) ps <- lpds - elpds # put everything together in a loo object pointwise <- cbind(elpd_kfold = elpds, p_kfold = ps, kfoldic = -2 * elpds) est <- colSums(pointwise) se_est <- sqrt(nrow(pointwise) * apply(pointwise, 2, var)) estimates <- cbind(Estimate = est, SE = se_est) rownames(estimates) <- colnames(pointwise) out <- nlist(estimates, pointwise) atts <- nlist(K, Ksub, group, folds, fold_type) attributes(out)[names(atts)] <- atts if (save_fits) { out$fits <- fits out$data <- newdata } structure(out, class = c("kfold", "loo")) } #' Predictions from K-Fold Cross-Validation #' #' Compute and evaluate predictions after performing K-fold #' cross-validation via \code{\link{kfold}}. #' #' @param x Object of class \code{'kfold'} computed by \code{\link{kfold}}. #' For \code{kfold_predict} to work, the fitted model objects need to have #' been stored via argument \code{save_fits} of \code{\link{kfold}}. #' @param method The method used to make predictions. Either \code{"predict"} #' or \code{"fitted"}. See \code{\link{predict.brmsfit}} for details. #' @inheritParams predict.brmsfit #' #' @return A \code{list} with two slots named \code{'y'} and \code{'yrep'}. #' Slot \code{y} contains the vector of observed responses. #' Slot \code{yrep} contains the matrix of predicted responses, #' with rows being posterior draws and columns being observations. #' #' @seealso \code{\link{kfold}} #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' # perform k-fold cross validation #' (kf <- kfold(fit, save_fits = TRUE, chains = 1)) #' #' # define a loss function #' rmse <- function(y, yrep) { #' yrep_mean <- colMeans(yrep) #' sqrt(mean((yrep_mean - y)^2)) #' } #' #' # predict responses and evaluate the loss #' kfp <- kfold_predict(kf) #' rmse(y = kfp$y, yrep = kfp$yrep) #' } #' #' @export kfold_predict <- function(x, method = c("predict", "fitted"), resp = NULL, ...) { if (!inherits(x, "kfold")) { stop2("'x' must be a 'kfold' object.") } if (!all(c("fits", "data") %in% names(x))) { stop2( "Slots 'fits' and 'data' are required. ", "Please run kfold with 'save_fits = TRUE'." ) } method <- get(match.arg(method), mode = "function") resp <- validate_resp(resp, x$fits[[1, "fit"]], multiple = FALSE) all_predicted <- as.character(sort(unlist(x$fits[, "predicted"]))) npredicted <- length(all_predicted) ndraws <- ndraws(x$fits[[1, "fit"]]) y <- rep(NA, npredicted) yrep <- matrix(NA, nrow = ndraws, ncol = npredicted) names(y) <- colnames(yrep) <- all_predicted for (k in seq_rows(x$fits)) { fit_k <- x$fits[[k, "fit"]] predicted_k <- x$fits[[k, "predicted"]] obs_names <- as.character(predicted_k) newdata <- x$data[predicted_k, , drop = FALSE] y[obs_names] <- get_y(fit_k, resp, newdata = newdata, ...) yrep[, obs_names] <- method( fit_k, newdata = newdata, resp = resp, allow_new_levels = TRUE, summary = FALSE, ... ) } nlist(y, yrep) } brms/R/plot.R0000644000175000017500000002432614111751666012670 0ustar nileshnilesh#' Trace and Density Plots for MCMC Draws #' #' @param x An object of class \code{brmsfit}. #' @param pars Deprecated alias of \code{variable}. #' Names of the parameters to plot, as given by a #' character vector or a regular expression. #' @param variable Names of the variables (parameters) to plot, as given by a #' character vector or a regular expression (if \code{regex = TRUE}). By #' default, a hopefully not too large selection of variables is plotted. #' @param combo A character vector with at least two elements. #' Each element of \code{combo} corresponds to a column in the resulting #' graphic and should be the name of one of the available #' \code{\link[bayesplot:MCMC-overview]{MCMC}} functions #' (omitting the \code{mcmc_} prefix). #' @param N The number of parameters plotted per page. #' @param theme A \code{\link[ggplot2:theme]{theme}} object #' modifying the appearance of the plots. #' For some basic themes see \code{\link[ggplot2:ggtheme]{ggtheme}} #' and \code{\link[bayesplot:theme_default]{theme_default}}. #' @param regex Logical; Indicates whether \code{variable} should #' be treated as regular expressions. Defaults to \code{FALSE}. #' @param fixed (Deprecated) Indicates whether parameter names #' should be matched exactly (\code{TRUE}) or treated as #' regular expressions (\code{FALSE}). Default is \code{FALSE} #' and only works with argument \code{pars}. #' @param plot Logical; indicates if plots should be #' plotted directly in the active graphic device. #' Defaults to \code{TRUE}. #' @param ask Logical; indicates if the user is prompted #' before a new page is plotted. #' Only used if \code{plot} is \code{TRUE}. #' @param newpage Logical; indicates if the first set of plots #' should be plotted to a new page. #' Only used if \code{plot} is \code{TRUE}. #' @param ... Further arguments passed to #' \code{\link[bayesplot:MCMC-combos]{mcmc_combo}}. #' #' @return An invisible list of #' \code{\link[gtable:gtable]{gtable}} objects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|visit), #' data = epilepsy, family = "poisson") #' plot(fit) #' ## plot population-level effects only #' plot(fit, variable = "^b_", regex = TRUE) #' } #' #' @method plot brmsfit #' @import ggplot2 #' @importFrom graphics plot #' @importFrom grDevices devAskNewPage #' @export plot.brmsfit <- function(x, pars = NA, combo = c("dens", "trace"), N = 5, variable = NULL, regex = FALSE, fixed = FALSE, theme = NULL, plot = TRUE, ask = TRUE, newpage = TRUE, ...) { contains_draws(x) if (!is_wholenumber(N) || N < 1) { stop2("Argument 'N' must be a positive integer.") } variable <- use_variable_alias(variable, x, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(x) regex <- TRUE } draws <- as.array(x, variable = variable, regex = regex) variables <- dimnames(draws)[[3]] if (!length(variables)) { stop2("No valid variables selected.") } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } n_plots <- ceiling(length(variables) / N) plots <- vector(mode = "list", length = n_plots) for (i in seq_len(n_plots)) { sub_vars <- variables[((i - 1) * N + 1):min(i * N, length(variables))] sub_draws <- draws[, , sub_vars, drop = FALSE] plots[[i]] <- bayesplot::mcmc_combo( sub_draws, combo = combo, gg_theme = theme, ... ) if (plot) { plot(plots[[i]], newpage = newpage || i > 1) if (i == 1) { devAskNewPage(ask = ask) } } } invisible(plots) } # list all parameter classes to be included in plots by default default_plot_variables <- function(family) { c(fixef_pars(), "^sd_", "^cor_", "^sigma_", "^rescor_", paste0("^", valid_dpars(family), "$"), "^delta$", "^theta", "^ar", "^ma", "^arr", "^sderr", "^lagsar", "^errorsar", "^car", "^sdcar", "^sds_", "^sdgp_", "^lscale_") } #' MCMC Plots Implemented in \pkg{bayesplot} #' #' Convenient way to call MCMC plotting functions #' implemented in the \pkg{bayesplot} package. #' #' @aliases stanplot stanplot.brmsfit #' #' @inheritParams plot.brmsfit #' @param object An \R object typically of class \code{brmsfit} #' @param type The type of the plot. #' Supported types are (as names) \code{hist}, \code{dens}, #' \code{hist_by_chain}, \code{dens_overlay}, #' \code{violin}, \code{intervals}, \code{areas}, \code{acf}, #' \code{acf_bar},\code{trace}, \code{trace_highlight}, \code{scatter}, #' \code{rhat}, \code{rhat_hist}, \code{neff}, \code{neff_hist} #' \code{nuts_acceptance}, \code{nuts_divergence}, #' \code{nuts_stepsize}, \code{nuts_treedepth}, and \code{nuts_energy}. #' For an overview on the various plot types see #' \code{\link[bayesplot:MCMC-overview]{MCMC-overview}}. #' @param ... Additional arguments passed to the plotting functions. #' See \code{\link[bayesplot:MCMC-overview]{MCMC-overview}} for #' more details. #' #' @return A \code{\link[ggplot2:ggplot]{ggplot}} object #' that can be further customized using the \pkg{ggplot2} package. #' #' @details #' Also consider using the \pkg{shinystan} package available via #' method \code{\link{launch_shinystan}} in \pkg{brms} for flexible #' and interactive visual analysis. #' #' @examples #' \dontrun{ #' model <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' #' # plot posterior intervals #' mcmc_plot(model) #' #' # only show population-level effects in the plots #' mcmc_plot(model, variable = "^b_", regex = TRUE) #' #' # show histograms of the posterior distributions #' mcmc_plot(model, type = "hist") #' #' # plot some diagnostics of the sampler #' mcmc_plot(model, type = "neff") #' mcmc_plot(model, type = "rhat") #' #' # plot some diagnostics specific to the NUTS sampler #' mcmc_plot(model, type = "nuts_acceptance") #' mcmc_plot(model, type = "nuts_divergence") #' } #' #' @export mcmc_plot.brmsfit <- function(object, pars = NA, type = "intervals", variable = NULL, regex = FALSE, fixed = FALSE, ...) { contains_draws(object) object <- restructure(object) type <- as_one_character(type) variable <- use_variable_alias(variable, object, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(object) regex <- TRUE } valid_types <- as.character(bayesplot::available_mcmc("")) valid_types <- sub("^mcmc_", "", valid_types) if (!type %in% valid_types) { stop2("Invalid plot type. Valid plot types are: \n", collapse_comma(valid_types)) } mcmc_fun <- get(paste0("mcmc_", type), asNamespace("bayesplot")) mcmc_arg_names <- names(formals(mcmc_fun)) mcmc_args <- list(...) if ("x" %in% mcmc_arg_names) { if (grepl("^nuts_", type)) { # x refers to a molten data.frame of NUTS parameters mcmc_args$x <- nuts_params(object) } else { # x refers to a data.frame of draws draws <- as.array(object, variable = variable, regex = regex) if (!length(draws)) { stop2("No valid parameters selected.") } sel_variables <- dimnames(draws)[[3]] if (type %in% c("scatter", "hex") && length(sel_variables) != 2L) { stop2("Exactly 2 parameters must be selected for this type.", "\nParameters selected: ", collapse_comma(sel_variables)) } mcmc_args$x <- draws } } if ("lp" %in% mcmc_arg_names) { mcmc_args$lp <- log_posterior(object) } use_nuts <- isTRUE(object$algorithm == "sampling") if ("np" %in% mcmc_arg_names && use_nuts) { mcmc_args$np <- nuts_params(object) } interval_type <- type %in% c("intervals", "areas") if ("rhat" %in% mcmc_arg_names && !interval_type) { mcmc_args$rhat <- rhat(object) } if ("ratio" %in% mcmc_arg_names) { mcmc_args$ratio <- neff_ratio(object) } do_call(mcmc_fun, mcmc_args) } #' @rdname mcmc_plot.brmsfit #' @export mcmc_plot <- function(object, ...) { UseMethod("mcmc_plot") } # 'stanplot' has been deprecated in brms 2.10.6; remove in brms 3.0 #' @export stanplot <- function(object, ...) { UseMethod("stanplot") } #' @export stanplot.brmsfit <- function(object, ...) { warning2("Method 'stanplot' is deprecated. Please use 'mcmc_plot' instead.") mcmc_plot.brmsfit(object, ...) } #' Create a matrix of output plots from a \code{brmsfit} object #' #' A \code{\link[graphics:pairs]{pairs}} #' method that is customized for MCMC output. #' #' @param x An object of class \code{brmsfit} #' @inheritParams plot.brmsfit #' @param ... Further arguments to be passed to #' \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. #' #' @details For a detailed description see #' \code{\link[bayesplot:MCMC-scatterplots]{mcmc_pairs}}. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|visit), #' data = epilepsy, family = "poisson") #' pairs(fit, variable = variables(fit)[1:3]) #' pairs(fit, variable = "^sd_", regex = TRUE) #' } #' #' @export pairs.brmsfit <- function(x, pars = NA, variable = NULL, regex = FALSE, fixed = FALSE, ...) { variable <- use_variable_alias(variable, x, pars, fixed = fixed) if (is.null(variable)) { variable <- default_plot_variables(x) regex <- TRUE } draws <- as.array(x, variable = variable, regex = regex) bayesplot::mcmc_pairs(draws, ...) } #' Default \pkg{bayesplot} Theme for \pkg{ggplot2} Graphics #' #' This theme is imported from the \pkg{bayesplot} package. #' See \code{\link[bayesplot:theme_default]{theme_default}} #' for a complete documentation. #' #' @name theme_default #' #' @param base_size base font size #' @param base_family base font family #' #' @return A \code{theme} object used in \pkg{ggplot2} graphics. #' #' @importFrom bayesplot theme_default #' @export theme_default NULL brms/R/formula-sp.R0000644000175000017500000004767614111751666014014 0ustar nileshnilesh# This file contains functions dealing with the extended # formula syntax to specify special effects terms #' Predictors with Measurement Error in \pkg{brms} Models #' #' (Soft deprecated) Specify predictors with measurement error. The function #' does not evaluate its arguments -- it exists purely to help set up a model. #' #' @param x The variable measured with error. #' @param sdx Known measurement error of \code{x} #' treated as standard deviation. #' @param gr Optional grouping factor to specify which #' values of \code{x} correspond to the same value of the #' latent variable. If \code{NULL} (the default) each #' observation will have its own value of the latent variable. #' #' @details #' For detailed documentation see \code{help(brmsformula)}. #' \code{me} terms are soft deprecated in favor of the more #' general and consistent \code{\link{mi}} terms. #' By default, latent noise-free variables are assumed #' to be correlated. To change that, add \code{set_mecor(FALSE)} #' to your model formula object (see examples). #' #' @seealso #' \code{\link{brmsformula}}, \code{\link{brmsformula-helpers}} #' #' @examples #' \dontrun{ #' # sample some data #' N <- 100 #' dat <- data.frame( #' y = rnorm(N), x1 = rnorm(N), #' x2 = rnorm(N), sdx = abs(rnorm(N, 1)) #' ) #' # fit a simple error-in-variables model #' fit1 <- brm(y ~ me(x1, sdx) + me(x2, sdx), data = dat, #' save_pars = save_pars(latent = TRUE)) #' summary(fit1) #' #' # turn off modeling of correlations #' bform <- bf(y ~ me(x1, sdx) + me(x2, sdx)) + set_mecor(FALSE) #' fit2 <- brm(bform, data = dat, save_pars = save_pars(latent = TRUE)) #' summary(fit2) #' } #' #' @export me <- function(x, sdx, gr = NULL) { # use 'term' for consistency with other special terms term <- deparse(substitute(x)) sdx <- deparse(substitute(sdx)) gr <- substitute(gr) if (!is.null(gr)) { gr <- deparse_combine(gr) stopif_illegal_group(gr) } else { gr <- "" } label <- deparse(match.call()) out <- nlist(term, sdx, gr, label) class(out) <- c("me_term", "sp_term") out } #' Predictors with Missing Values in \pkg{brms} Models #' #' Specify predictor term with missing values in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model. #' #' @param x The variable containing missing values. #' @param idx An optional variable containing indices of observations in `x` #' that are to be used in the model. This is mostly relevant in partially #' subsetted models (via \code{resp_subset}) but may also have other #' applications that I haven't thought of. #' #' @details For detailed documentation see \code{help(brmsformula)}. #' #' @seealso \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' data("nhanes", package = "mice") #' N <- nrow(nhanes) #' #' # simple model with missing data #' bform1 <- bf(bmi | mi() ~ age * mi(chl)) + #' bf(chl | mi() ~ age) + #' set_rescor(FALSE) #' #' fit1 <- brm(bform1, data = nhanes) #' #' summary(fit1) #' plot(conditional_effects(fit1, resp = "bmi"), ask = FALSE) #' loo(fit1, newdata = na.omit(fit1$data)) #' #' # simulate some measurement noise #' nhanes$se <- rexp(N, 2) #' #' # measurement noise can be handled within 'mi' terms #' # with or without the presence of missing values #' bform2 <- bf(bmi | mi() ~ age * mi(chl)) + #' bf(chl | mi(se) ~ age) + #' set_rescor(FALSE) #' #' fit2 <- brm(bform2, data = nhanes) #' #' summary(fit2) #' plot(conditional_effects(fit2, resp = "bmi"), ask = FALSE) #' #' # 'mi' terms can also be used when some responses are subsetted #' nhanes$sub <- TRUE #' nhanes$sub[1:2] <- FALSE #' nhanes$id <- 1:N #' nhanes$idx <- sample(3:N, N, TRUE) #' #' # this requires the addition term 'index' being specified #' # in the subsetted part of the model #' bform3 <- bf(bmi | mi() ~ age * mi(chl, idx)) + #' bf(chl | mi(se) + subset(sub) + index(id) ~ age) + #' set_rescor(FALSE) #' #' fit3 <- brm(bform3, data = nhanes) #' #' summary(fit3) #' plot(conditional_effects(fit3, resp = "bmi"), ask = FALSE) #' } #' #' @export mi <- function(x, idx = NA) { # use 'term' for consistency with other special terms term <- deparse(substitute(x)) term_vars <- all_vars(term) if (!is_equal(term, term_vars)) { stop2("'mi' only accepts single untransformed variables.") } idx <- deparse(substitute(idx)) if (idx != "NA") { idx_vars <- all_vars(idx) if (!is_equal(idx, idx_vars)) { stop2("'mi' only accepts single untransformed variables.") } } label <- deparse(match.call()) out <- nlist(term, idx, label) class(out) <- c("mi_term", "sp_term") out } #' Monotonic Predictors in \pkg{brms} Models #' #' Specify a monotonic predictor term in \pkg{brms}. The function does not #' evaluate its arguments -- it exists purely to help set up a model. #' #' @param x An integer variable or an ordered factor to be modeled as monotonic. #' @param id Optional character string. All monotonic terms #' with the same \code{id} within one formula will be modeled as #' having the same simplex (shape) parameter vector. If all monotonic terms #' of the same predictor have the same \code{id}, the resulting #' predictions will be conditionally monotonic for all values of #' interacting covariates (Bürkner & Charpentier, 2020). #' #' @details See Bürkner and Charpentier (2020) for the underlying theory. For #' detailed documentation of the formula syntax used for monotonic terms, #' see \code{help(brmsformula)} as well as \code{vignette("brms_monotonic")}. #' #' @seealso \code{\link{brmsformula}} #' #' @references #' Bürkner P. C. & Charpentier E. (2020). Modeling Monotonic Effects of Ordinal #' Predictors in Regression Models. British Journal of Mathematical and #' Statistical Psychology. doi:10.1111/bmsp.12195 #' #' @examples #' \dontrun{ #' # generate some data #' income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") #' income <- factor(sample(income_options, 100, TRUE), #' levels = income_options, ordered = TRUE) #' mean_ls <- c(30, 60, 70, 75) #' ls <- mean_ls[income] + rnorm(100, sd = 7) #' dat <- data.frame(income, ls) #' #' # fit a simple monotonic model #' fit1 <- brm(ls ~ mo(income), data = dat) #' summary(fit1) #' plot(fit1, N = 6) #' plot(conditional_effects(fit1), points = TRUE) #' #' # model interaction with other variables #' dat$x <- sample(c("a", "b", "c"), 100, TRUE) #' fit2 <- brm(ls ~ mo(income)*x, data = dat) #' summary(fit2) #' plot(conditional_effects(fit2), points = TRUE) #' #' # ensure conditional monotonicity #' fit3 <- brm(ls ~ mo(income, id = "i")*x, data = dat) #' summary(fit3) #' plot(conditional_effects(fit3), points = TRUE) #' } #' #' @export mo <- function(x, id = NA) { # use 'term' for consistency with other special terms term <- deparse(substitute(x)) id <- as_one_character(id, allow_na = TRUE) label <- deparse(match.call()) out <- nlist(term, id, label) class(out) <- c("mo_term", "sp_term") out } # find variable names for which to keep NAs vars_keep_na <- function(x, ...) { UseMethod("vars_keep_na") } #' @export vars_keep_na.mvbrmsterms <- function(x, ...) { resps <- get_element(x, "respform") resps <- ulapply(resps, terms_resp, check_names = FALSE) out <- lapply(x$terms, vars_keep_na, responses = resps, ...) vars_mi <- unique(ulapply(out, attr, "vars_mi")) out <- unique(unlist(out)) miss_mi <- setdiff(vars_mi, out) if (length(miss_mi)) { stop2( "Response models of variables in 'mi' terms require " , "specification of the addition argument 'mi'. See ?mi. ", "Error occurred for ", collapse_comma(miss_mi), "." ) } out } #' @export vars_keep_na.brmsterms <- function(x, responses = NULL, ...) { out <- character(0) if (is.formula(x$adforms$mi)) { mi_respcall <- terms_resp(x$respform, check_names = FALSE) mi_respvars <- all_vars(mi_respcall) mi_advars <- all_vars(x$adforms$mi) c(out) <- unique(c(mi_respcall, mi_respvars, mi_advars)) } if (is.formula(x$adforms$cens)) { y2_expr <- get_ad_expr(x, "cens", "y2", type = "vars") c(out) <- all_vars(y2_expr) } uni_mi <- ulapply(get_effect(x, "sp"), attr, "uni_mi") if (length(uni_mi)) { vars_mi <- ulapply(uni_mi, function(term) eval2(term)$term) miss_mi <- setdiff(vars_mi, responses) if (length(miss_mi)) { stop2( "Variables in 'mi' terms should also be specified " , "as response variables in the model. See ?mi. ", "Error occurred for ", collapse_comma(miss_mi), "." ) } attr(out, "vars_mi") <- vars_mi } out } # extract unique names of noise-free terms get_uni_me <- function(x) { uni_me <- ulapply(get_effect(x, "sp"), attr, "uni_me") if (!length(uni_me)) { return(NULL) } xname <- ulapply(uni_me, function(term) eval2(term)$term) df <- data.frame(xname, uni_me) df <- df[!duplicated(df), ] xdupl <- df$xname[duplicated(df$xname)] if (length(xdupl)) { calls <- df$uni_me[df$xname == xdupl[1]] stop2( "Variable '", xdupl[1], "' is used in different calls to 'me'.\n", "Associated calls are: ", collapse_comma(calls) ) } unique(uni_me) } # save all me-terms within a tidy data.frame tidy_meef <- function(bterms, data, old_levels = NULL) { uni_me <- get_uni_me(bterms) if (!length(uni_me)) { return(empty_meef()) } if (has_subset(bterms)) { # 'Xme' variables need to be the same across univariate models stop2("Argument 'subset' is not supported when using 'me' terms.") } out <- data.frame( term = uni_me, xname = "", grname = "", stringsAsFactors = FALSE ) levels <- vector("list", nrow(out)) for (i in seq_rows(out)) { tmp <- eval2(out$term[i]) out$xname[i] <- tmp$term if (isTRUE(nzchar(tmp$gr))) { out$grname[i] <- tmp$gr if (length(old_levels)) { levels[[i]] <- old_levels[[tmp$gr]] } else { levels[[i]] <- levels(factor(get(tmp$gr, data))) } } } out$coef <- rename(paste0("me", out$xname)) out$cor <- isTRUE(bterms$mecor) names(levels) <- out$grname levels <- levels[lengths(levels) > 0L] if (length(levels)) { levels <- levels[!duplicated(names(levels))] attr(out, "levels") <- levels } structure(out, class = c("meef_frame", "data.frame")) } empty_meef <- function() { out <- data.frame( term = character(0), xname = character(0), grname = character(0), cor = logical(0), stringsAsFactors = FALSE ) structure(out, class = c("meef_frame", "data.frame")) } is.meef_frame <- function(x) { inherits(x, "meef_frame") } # handle default of correlations between 'me' terms default_mecor <- function(mecor = NULL) { if (is.null(mecor)) TRUE else as_one_logical(mecor) } # find names of all variables used in a special effects type get_sp_vars <- function(x, type) { sp_terms <- ulapply(get_effect(x, "sp"), all_terms) all_vars(str2formula(get_matches_expr(regex_sp(type), sp_terms))) } # gather information of special effects terms # @param x either a formula or a list containing an element "sp" # @param data data frame containing the monotonic variables # @return a data.frame with one row per special term # TODO: refactor to store in long format to avoid several list columns? tidy_spef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["sp"]] if (!is.formula(form)) { return(empty_data_frame()) } mm <- sp_model_matrix(form, data, rename = FALSE) out <- data.frame(term = trim_wsp(colnames(mm)), stringsAsFactors = FALSE) out$coef <- rename(out$term) calls_cols <- c(paste0("calls_", all_sp_types()), "joint_call") list_cols <- c("vars_mi", "idx_mi", "idx2_mi", "ids_mo", "Imo") for (col in c(calls_cols, list_cols)) { out[[col]] <- vector("list", nrow(out)) } kmo <- 0 terms_split <- strsplit(out$term, ":") for (i in seq_rows(out)) { # prepare mo terms take_mo <- grepl_expr(regex_sp("mo"), terms_split[[i]]) if (sum(take_mo)) { out$calls_mo[[i]] <- terms_split[[i]][take_mo] nmo <- length(out$calls_mo[[i]]) out$Imo[[i]] <- (kmo + 1):(kmo + nmo) out$ids_mo[[i]] <- rep(NA, nmo) kmo <- kmo + nmo for (j in seq_along(out$calls_mo[[i]])) { mo_term <- out$calls_mo[[i]][[j]] mo_match <- get_matches_expr(regex_sp("mo"), mo_term) if (length(mo_match) > 1L || nchar(mo_match) < nchar(mo_term)) { stop2("The monotonic term '", mo_term, "' is invalid.") } out$ids_mo[[i]][j] <- eval2(mo_term)$id } } # prepare me terms take_me <- grepl_expr(regex_sp("me"), terms_split[[i]]) if (sum(take_me)) { out$calls_me[[i]] <- terms_split[[i]][take_me] # remove 'I' (identity) function calls that # were used solely to separate formula terms out$calls_me[[i]] <- gsub("^I\\(", "(", out$calls_me[[i]]) } # prepare mi terms take_mi <- grepl_expr(regex_sp("mi"), terms_split[[i]]) if (sum(take_mi)) { mi_parts <- terms_split[[i]][take_mi] out$calls_mi[[i]] <- get_matches_expr(regex_sp("mi"), mi_parts) out$vars_mi[[i]] <- out$idx_mi[[i]] <- rep(NA, length(out$calls_mi[[i]])) for (j in seq_along(out$calls_mi[[i]])) { mi_term <- eval2(out$calls_mi[[i]][[j]]) out$vars_mi[[i]][j] <- mi_term$term if (mi_term$idx != "NA") { out$idx_mi[[i]][j] <- mi_term$idx } } # do it like terms_resp to ensure correct matching out$vars_mi[[i]] <- gsub("\\.|_", "", make.names(out$vars_mi[[i]])) } has_sp_calls <- grepl_expr(regex_sp(all_sp_types()), terms_split[[i]]) sp_calls <- sub("^I\\(", "(", terms_split[[i]][has_sp_calls]) out$joint_call[[i]] <- paste0(sp_calls, collapse = " * ") out$Ic[i] <- any(!has_sp_calls) } # extract data frame to track all required index variables uni_mi <- unique(data.frame( var = unlist(out$vars_mi), idx = unlist(out$idx_mi) )) uni_mi$idx2 <- rep(NA, nrow(uni_mi)) for (i in seq_rows(uni_mi)) { uni_mi_sub <- subset2(uni_mi, var = uni_mi$var[i]) uni_mi$idx2[i] <- match(uni_mi$idx[i], na.omit(uni_mi_sub$idx)) } attr(out, "uni_mi") <- uni_mi for (i in seq_rows(out)) { for (j in seq_along(out$idx_mi[[i]])) { sub <- subset2( uni_mi, var = out$vars_mi[[i]][j], idx = out$idx_mi[[i]][j] ) out$idx2_mi[[i]][j] <- sub$idx2 } } # extract information on covariates not_one <- apply(mm, 2, function(x) any(x != 1)) out$Ic <- cumsum(out$Ic | not_one) out } # extract names of monotonic simplex parameters # @param spef output of tidy_spef # @param use_id use the 'id' argument to construct simo labels? # @return a character vector of length nrow(spef) get_simo_labels <- function(spef, use_id = FALSE) { out <- named_list(spef$term) I <- which(lengths(spef$Imo) > 0) for (i in I) { # use the ID as label if specified out[[i]] <- ifelse( use_id & !is.na(spef$ids_mo[[i]]), spef$ids_mo[[i]], paste0(spef$coef[i], seq_along(spef$Imo[[i]])) ) } unlist(out) } # standard errors of variables with missing values get_sdy <- function(x, data = NULL) { stopifnot(is.brmsterms(x)) miform <- x$adforms[["mi"]] sdy <- NULL if (is.formula(miform)) { mi <- eval_rhs(miform) if (mi$vars$sdy != "NA") { sdy <- eval2(mi$vars$sdy, data) if (!is.null(sdy) && !is.numeric(sdy)) { stop2("Measurement error should be numeric.") } if (isTRUE(any(sdy <= 0))) { stop2("Measurement error should be positive.") } } } sdy } # names of grouping variables used in measurement error terms get_me_groups <- function(x) { uni_me <- get_uni_me(x) out <- lapply(uni_me, eval2) out <- ulapply(out, "[[", "gr") out[nzchar(out)] } # get the design matrix of special effects terms # @param formula a formula containing special effects terms # @param data data.frame passed by the user # @param types types of special terms to consider # @param ... passed to get_model_matrix # @details special terms will be evaluated to 1 so that columns # containing not only ones are those with covariates # @return design matrix of special effects terms and their covariates sp_model_matrix <- function(formula, data, types = all_sp_types(), ...) { attributes(data)$terms <- NULL terms_split <- strsplit(all_terms(formula), split = ":") terms_unique <- unique(unlist(terms_split)) regex <- regex_sp(types) terms_replace <- terms_unique[grepl_expr(regex, terms_unique)] dummies <- paste0("dummy", seq_along(terms_replace), "__") data[dummies] <- list(1) terms_comb <- rep(NA, length(terms_split)) # loop over terms and add dummy variables for (i in seq_along(terms_split)) { replace_i <- grepl_expr(regex, terms_split[[i]]) terms_i_replace <- terms_split[[i]][replace_i] dummies_i <- dummies[match(terms_i_replace, terms_replace)] terms_split[[i]][replace_i] <- dummies_i terms_comb[i] <- paste0(terms_split[[i]], collapse = ":") } new_formula <- str2formula(terms_comb) attributes(new_formula) <- attributes(formula) out <- get_model_matrix(new_formula, data, ...) # recover original column names colnames(out) <- rename(colnames(out), dummies, terms_replace) out } # formula of variables used in special effects terms sp_fake_formula <- function(...) { dots <- c(...) out <- vector("list", length(dots)) for (i in seq_along(dots)) { tmp <- eval2(dots[[i]]) out[[i]] <- all_vars(c(tmp$term, tmp$sdx, tmp$gr)) } str2formula(unique(unlist(out))) } # extract an me variable get_me_values <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) x <- as.vector(eval2(term$term, data)) if (!is.numeric(x)) { stop2("Noisy variables should be numeric.") } as.array(x) } # extract the measurement error of an me term get_me_noise <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) sdx <- as.vector(eval2(term$sdx, data)) if (length(sdx) == 0L) { stop2("Argument 'sdx' is missing in function 'me'.") } else if (length(sdx) == 1L) { sdx <- rep(sdx, nrow(data)) } if (!is.numeric(sdx)) { stop2("Measurement error should be numeric.") } if (isTRUE(any(sdx <= 0))) { stop2("Measurement error should be positive.") } as.array(sdx) } # extract the grouping variable of an me term get_me_group <- function(term, data) { term <- get_sp_term(term) stopifnot(is.me_term(term)) as.array(eval2(term$gr, data)) } # extract mo variables get_mo_values <- function(term, data) { term <- get_sp_term(term) stopifnot(is.mo_term(term)) x <- eval2(term$term, data) if (is.ordered(x)) { # counting starts at zero x <- as.numeric(x) - 1 } else if (all(is_wholenumber(x))) { min_value <- attr(x, "min") if (is.null(min_value)) { min_value <- min(x) } x <- x - min_value } else { stop2( "Monotonic predictors must be integers or ordered ", "factors. Error occurred for variable '", term$term, "'." ) } as.array(x) } # prepare 'sp_term' objects get_sp_term <- function(term) { if (!is.sp_term(term)) { term <- eval2(as_one_character(term)) } term } # all effects which fall under the 'sp' category of brms all_sp_types <- function() { c("mo", "me", "mi") } # classes used to set up special effects terms is.sp_term <- function(x) { inherits(x, "sp_term") } is.mo_term <- function(x) { inherits(x, "mo_term") } is.me_term <- function(x) { inherits(x, "me_term") } is.mi_term <- function(x) { inherits(x, "mi_term") } brms/R/posterior_smooths.R0000644000175000017500000001021414111751666015503 0ustar nileshnilesh#' Posterior Predictions of Smooth Terms #' #' Compute posterior predictions of smooth \code{s} and \code{t2} terms of #' models fitted with \pkg{brms}. #' #' @inheritParams posterior_epred.brmsfit #' @param smooth Name of a single smooth term for which predictions should #' be computed. #' @param newdata An optional \code{data.frame} for which to evaluate #' predictions. If \code{NULL} (default), the original data of the model is #' used. Only those variables appearing in the chosen \code{smooth} term are #' required. #' @param ... Currently ignored. #' #' @return An S x N matrix, where S is the number of #' posterior draws and N is the number of observations. #' #' @examples #' \dontrun{ #' set.seed(0) #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' summary(fit) #' #' newdata <- data.frame(x2 = seq(0, 1, 10)) #' str(posterior_smooths(fit, smooth = "s(x2)", newdata = newdata)) #' } #' #' @export posterior_smooths.brmsfit <- function(object, smooth, newdata = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, ...) { resp <- validate_resp(resp, object, multiple = FALSE) bterms <- brmsterms(exclude_terms(object$formula, smooths_only = TRUE)) if (!is.null(resp)) { stopifnot(is.mvbrmsterms(bterms)) bterms <- bterms$terms[[resp]] } if (!is.null(nlpar)) { if (length(dpar)) { stop2("Cannot use 'dpar' and 'nlpar' at the same time.") } nlpar <- as_one_character(nlpar) nlpars <- names(bterms$nlpars) if (!nlpar %in% nlpars) { stop2("Invalid argument 'nlpar'. Valid non-linear ", "parameters are: ", collapse_comma(nlpars)) } bterms <- bterms$nlpars[[nlpar]] } else { dpar <- dpar %||% "mu" dpar <- as_one_character(dpar) dpars <- names(bterms$dpars) if (!dpar %in% dpars) { stop2("Invalid argument 'dpar'. Valid distributional ", "parameters are: ", collapse_comma(dpars)) } bterms <- bterms$dpars[[dpar]] } posterior_smooths( bterms, fit = object, smooth = smooth, newdata = newdata, ndraws = ndraws, draw_ids = draw_ids, ... ) } #' @export posterior_smooths.btl <- function(object, fit, smooth, newdata = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, ...) { smooth <- rm_wsp(as_one_character(smooth)) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) smef <- tidy_smef(object, fit$data) smef$term <- rm_wsp(smef$term) smterms <- unique(smef$term) if (!smooth %in% smterms) { stop2("Term '", smooth, "' cannot be found. Available ", "smooth terms are: ", collapse_comma(smterms)) } # find relevant variables sub_smef <- subset2(smef, term = smooth) covars <- all_vars(sub_smef$covars[[1]]) byvars <- all_vars(sub_smef$byvars[[1]]) req_vars <- c(covars, byvars) # prepare predictions for splines sdata <- standata( fit, newdata, re_formula = NA, internal = TRUE, check_response = FALSE, req_vars = req_vars ) draw_ids <- validate_draw_ids(fit, draw_ids, ndraws) draws <- as_draws_matrix(fit) draws <- suppressMessages(subset_draws(draws, draw = draw_ids)) prep_args <- nlist(x = object, draws, sdata, data = fit$data) prep <- do_call(prepare_predictions, prep_args) # select subset of smooth parameters and design matrices i <- which(smterms %in% smooth)[1] J <- which(smef$termnum == i) scs <- unlist(attr(prep$sm$fe$Xs, "smcols")[J]) prep$sm$fe$Xs <- prep$sm$fe$Xs[, scs, drop = FALSE] prep$sm$fe$bs <- prep$sm$fe$bs[, scs, drop = FALSE] prep$sm$re <- prep$sm$re[J] prep$family <- brmsfamily("gaussian") predictor(prep, i = NULL) } #' @export posterior_smooths.btnl <- function(object, ...) { stop2("Non-linear formulas do not contain smooth terms.") } #' @rdname posterior_smooths.brmsfit #' @export posterior_smooths <- function(object, ...) { UseMethod("posterior_smooths") } brms/R/conditional_effects.R0000644000175000017500000014016614111751665015714 0ustar nileshnilesh#' Display Conditional Effects of Predictors #' #' Display conditional effects of one or more numeric and/or categorical #' predictors including two-way interaction effects. #' #' @aliases marginal_effects marginal_effects.brmsfit #' #' @param x An object of class \code{brmsfit}. #' @param effects An optional character vector naming effects (main effects or #' interactions) for which to compute conditional plots. Interactions are #' specified by a \code{:} between variable names. If \code{NULL} (the #' default), plots are generated for all main effects and two-way interactions #' estimated in the model. When specifying \code{effects} manually, \emph{all} #' two-way interactions (including grouping variables) may be plotted #' even if not originally modeled. #' @param conditions An optional \code{data.frame} containing variable values #' to condition on. Each effect defined in \code{effects} will #' be plotted separately for each row of \code{conditions}. Values in the #' \code{cond__} column will be used as titles of the subplots. If \code{cond__} #' is not given, the row names will be used for this purpose instead. #' It is recommended to only define a few rows in order to keep the plots clear. #' See \code{\link{make_conditions}} for an easy way to define conditions. #' If \code{NULL} (the default), numeric variables will be conditionalized by #' using their means and factors will get their first level assigned. #' \code{NA} values within factors are interpreted as if all dummy #' variables of this factor are zero. This allows, for instance, to make #' predictions of the grand mean when using sum coding. #' @param int_conditions An optional named \code{list} whose elements are #' vectors of values of the variables specified in \code{effects}. #' At these values, predictions are evaluated. The names of #' \code{int_conditions} have to match the variable names exactly. #' Additionally, the elements of the vectors may be named themselves, #' in which case their names appear as labels for the conditions in the plots. #' Instead of vectors, functions returning vectors may be passed and are #' applied on the original values of the corresponding variable. #' If \code{NULL} (the default), predictions are evaluated at the #' \eqn{mean} and at \eqn{mean +/- sd} for numeric predictors and at #' all categories for factor-like predictors. #' @param re_formula A formula containing group-level effects to be considered #' in the conditional predictions. If \code{NULL}, include all group-level #' effects; if \code{NA} (default), include no group-level effects. #' @param robust If \code{TRUE} (the default) the median is used as the #' measure of central tendency. If \code{FALSE} the mean is used instead. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @param probs (Deprecated) The quantiles to be used in the computation of #' uncertainty intervals. Please use argument \code{prob} instead. #' @param method Method used to obtain predictions. Can be set to #' \code{"posterior_epred"} (the default), \code{"posterior_predict"}, #' or \code{"posterior_linpred"}. For more details, see the respective #' function documentations. #' @param spaghetti Logical. Indicates if predictions should #' be visualized via spaghetti plots. Only applied for numeric #' predictors. If \code{TRUE}, it is recommended #' to set argument \code{ndraws} to a relatively small value #' (e.g., \code{100}) in order to reduce computation time. #' @param surface Logical. Indicates if interactions or #' two-dimensional smooths should be visualized as a surface. #' Defaults to \code{FALSE}. The surface type can be controlled #' via argument \code{stype} of the related plotting method. #' @param categorical Logical. Indicates if effects of categorical #' or ordinal models should be shown in terms of probabilities #' of response categories. Defaults to \code{FALSE}. #' @param ordinal (Deprecated) Please use argument \code{categorical}. #' Logical. Indicates if effects in ordinal models #' should be visualized as a raster with the response categories #' on the y-axis. Defaults to \code{FALSE}. #' @param transform A function or a character string naming #' a function to be applied on the predicted responses #' before summary statistics are computed. Only allowed #' if \code{method = "posterior_predict"}. #' @param resolution Number of support points used to generate #' the plots. Higher resolution leads to smoother plots. #' Defaults to \code{100}. If \code{surface} is \code{TRUE}, #' this implies \code{10000} support points for interaction terms, #' so it might be necessary to reduce \code{resolution} #' when only few RAM is available. #' @param too_far Positive number. #' For surface plots only: Grid points that are too #' far away from the actual data points can be excluded from the plot. #' \code{too_far} determines what is too far. The grid is scaled into #' the unit square and then grid points more than \code{too_far} #' from the predictor variables are excluded. By default, all #' grid points are used. Ignored for non-surface plots. #' @param select_points Positive number. #' Only relevant if \code{points} or \code{rug} are set to \code{TRUE}: #' Actual data points of numeric variables that #' are too far away from the values specified in \code{conditions} #' can be excluded from the plot. Values are scaled into #' the unit interval and then points more than \code{select_points} #' from the values in \code{conditions} are excluded. #' By default, all points are used. #' @param ... Further arguments such as \code{draw_ids} or \code{ndraws} #' passed to \code{\link{posterior_predict}} or \code{\link{posterior_epred}}. #' @inheritParams plot.brmsfit #' @param ncol Number of plots to display per column for each effect. #' If \code{NULL} (default), \code{ncol} is computed internally based #' on the number of rows of \code{conditions}. #' @param points Logical. Indicates if the original data points #' should be added via \code{\link{geom_jitter}}. #' Default is \code{FALSE}. Note that only those data points will be added #' that match the specified conditions defined in \code{conditions}. #' For categorical predictors, the conditions have to match exactly. #' For numeric predictors, argument \code{select_points} is used to #' determine, which points do match a condition. #' @param rug Logical. Indicates if a rug representation of predictor #' values should be added via \code{\link{geom_rug}}. #' Default is \code{FALSE}. Depends on \code{select_points} in the same #' way as \code{points} does. #' @param mean Logical. Only relevant for spaghetti plots. #' If \code{TRUE} (the default), display the mean regression #' line on top of the regression lines for each sample. #' @param jitter_width Only used if \code{points = TRUE}: #' Amount of horizontal jittering of the data points. #' Mainly useful for ordinal models. Defaults to \code{0} that #' is no jittering. #' @param stype Indicates how surface plots should be displayed. #' Either \code{"contour"} or \code{"raster"}. #' @param line_args Only used in plots of continuous predictors: #' A named list of arguments passed to #' \code{\link{geom_smooth}}. #' @param cat_args Only used in plots of categorical predictors: #' A named list of arguments passed to #' \code{\link{geom_point}}. #' @param errorbar_args Only used in plots of categorical predictors: #' A named list of arguments passed to #' \code{\link{geom_errorbar}}. #' @param surface_args Only used in surface plots: #' A named list of arguments passed to #' \code{\link{geom_contour}} or #' \code{\link{geom_raster}} #' (depending on argument \code{stype}). #' @param spaghetti_args Only used in spaghetti plots: #' A named list of arguments passed to #' \code{\link{geom_smooth}}. #' @param point_args Only used if \code{points = TRUE}: #' A named list of arguments passed to #' \code{\link{geom_jitter}}. #' @param rug_args Only used if \code{rug = TRUE}: #' A named list of arguments passed to #' \code{\link{geom_rug}}. #' @param facet_args Only used if if multiple condtions are provided: #' A named list of arguments passed to #' \code{\link{facet_wrap}}. #' #' @return An object of class \code{'brms_conditional_effects'} which is a #' named list with one data.frame per effect containing all information #' required to generate conditional effects plots. Among others, these #' data.frames contain some special variables, namely \code{estimate__} #' (predicted values of the response), \code{se__} (standard error of the #' predicted response), \code{lower__} and \code{upper__} (lower and upper #' bounds of the uncertainty interval of the response), as well as #' \code{cond__} (used in faceting when \code{conditions} contains multiple #' rows). #' #' The corresponding \code{plot} method returns a named #' list of \code{\link{ggplot}} objects, which can be further #' customized using the \pkg{ggplot2} package. #' #' @details When creating \code{conditional_effects} for a particular predictor #' (or interaction of two predictors), one has to choose the values of all #' other predictors to condition on. By default, the mean is used for #' continuous variables and the reference category is used for factors, but #' you may change these values via argument \code{conditions}. This also has #' an implication for the \code{points} argument: In the created plots, only #' those points will be shown that correspond to the factor levels actually #' used in the conditioning, in order not to create the false impression of #' bad model fit, where it is just due to conditioning on certain factor #' levels. #' #' To fully change colors of the created plots, one has to amend both #' \code{scale_colour} and \code{scale_fill}. See #' \code{\link{scale_colour_grey}} or \code{\link{scale_colour_gradient}} for #' more details. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1 | patient), #' data = epilepsy, family = poisson()) #' #' ## plot all conditional effects #' plot(conditional_effects(fit), ask = FALSE) #' #' ## change colours to grey scale #' library(ggplot2) #' me <- conditional_effects(fit, "zBase:Trt") #' plot(me, plot = FALSE)[[1]] + #' scale_color_grey() + #' scale_fill_grey() #' #' ## only plot the conditional interaction effect of 'zBase:Trt' #' ## for different values for 'zAge' #' conditions <- data.frame(zAge = c(-1, 0, 1)) #' plot(conditional_effects(fit, effects = "zBase:Trt", #' conditions = conditions)) #' #' ## also incorporate group-level effects variance over patients #' ## also add data points and a rug representation of predictor values #' plot(conditional_effects(fit, effects = "zBase:Trt", #' conditions = conditions, re_formula = NULL), #' points = TRUE, rug = TRUE) #' #' ## change handling of two-way interactions #' int_conditions <- list( #' zBase = setNames(c(-2, 1, 0), c("b", "c", "a")) #' ) #' conditional_effects(fit, effects = "Trt:zBase", #' int_conditions = int_conditions) #' conditional_effects(fit, effects = "Trt:zBase", #' int_conditions = list(zBase = quantile)) #' #' ## fit a model to illustrate how to plot 3-way interactions #' fit3way <- brm(count ~ zAge * zBase * Trt, data = epilepsy) #' conditions <- make_conditions(fit3way, "zAge") #' conditional_effects(fit3way, "zBase:Trt", conditions = conditions) #' ## only include points close to the specified values of zAge #' me <- conditional_effects( #' fit3way, "zBase:Trt", conditions = conditions, #' select_points = 0.1 #' ) #' plot(me, points = TRUE) #' } #' #' @export conditional_effects.brmsfit <- function(x, effects = NULL, conditions = NULL, int_conditions = NULL, re_formula = NA, prob = 0.95, robust = TRUE, method = "posterior_epred", spaghetti = FALSE, surface = FALSE, categorical = FALSE, ordinal = FALSE, transform = NULL, resolution = 100, select_points = 0, too_far = 0, probs = NULL, ...) { probs <- validate_ci_bounds(prob, probs = probs) method <- validate_pp_method(method) spaghetti <- as_one_logical(spaghetti) surface <- as_one_logical(surface) categorical <- as_one_logical(categorical) ordinal <- as_one_logical(ordinal) contains_draws(x) x <- restructure(x) new_formula <- update_re_terms(x$formula, re_formula = re_formula) bterms <- brmsterms(new_formula) if (!is.null(transform) && method != "posterior_predict") { stop2("'transform' is only allowed if 'method = posterior_predict'.") } if (ordinal) { warning2("Argument 'ordinal' is deprecated. ", "Please use 'categorical' instead.") } rsv_vars <- rsv_vars(bterms) use_def_effects <- is.null(effects) if (use_def_effects) { effects <- get_all_effects(bterms, rsv_vars = rsv_vars) } else { # allow to define interactions in any order effects <- strsplit(as.character(effects), split = ":") if (any(unique(unlist(effects)) %in% rsv_vars)) { stop2("Variables ", collapse_comma(rsv_vars), " should not be used as effects for this model") } if (any(lengths(effects) > 2L)) { stop2("To display interactions of order higher than 2 ", "please use the 'conditions' argument.") } all_effects <- get_all_effects( bterms, rsv_vars = rsv_vars, comb_all = TRUE ) ae_coll <- all_effects[lengths(all_effects) == 1L] ae_coll <- ulapply(ae_coll, paste, collapse = ":") matches <- match(lapply(all_effects, sort), lapply(effects, sort), 0L) if (sum(matches) > 0 && sum(matches > 0) < length(effects)) { invalid <- effects[setdiff(seq_along(effects), sort(matches))] invalid <- ulapply(invalid, paste, collapse = ":") warning2( "Some specified effects are invalid for this model: ", collapse_comma(invalid), "\nValid effects are ", "(combinations of): ", collapse_comma(ae_coll) ) } effects <- unique(effects[sort(matches)]) if (!length(effects)) { stop2( "All specified effects are invalid for this model.\n", "Valid effects are (combinations of): ", collapse_comma(ae_coll) ) } } if (categorical || ordinal) { int_effs <- lengths(effects) == 2L if (any(int_effs)) { effects <- effects[!int_effs] warning2( "Interactions cannot be plotted directly if 'categorical' ", "is TRUE. Please use argument 'conditions' instead." ) } } if (!length(effects)) { stop2("No valid effects detected.") } mf <- model.frame(x) conditions <- prepare_conditions( x, conditions = conditions, effects = effects, re_formula = re_formula, rsv_vars = rsv_vars ) int_conditions <- lapply(int_conditions, function(x) if (is.numeric(x)) sort(x, TRUE) else x ) int_vars <- get_int_vars(bterms) group_vars <- get_group_vars(bterms) out <- list() for (i in seq_along(effects)) { eff <- effects[[i]] cond_data <- prepare_cond_data( mf[, eff, drop = FALSE], conditions = conditions, int_conditions = int_conditions, int_vars = int_vars, group_vars = group_vars, surface = surface, resolution = resolution, reorder = use_def_effects ) if (surface && length(eff) == 2L && too_far > 0) { # exclude prediction grid points too far from data ex_too_far <- mgcv::exclude.too.far( g1 = cond_data[[eff[1]]], g2 = cond_data[[eff[2]]], d1 = mf[, eff[1]], d2 = mf[, eff[2]], dist = too_far) cond_data <- cond_data[!ex_too_far, ] } c(out) <- conditional_effects( bterms, fit = x, cond_data = cond_data, method = method, surface = surface, spaghetti = spaghetti, categorical = categorical, ordinal = ordinal, re_formula = re_formula, transform = transform, conditions = conditions, int_conditions = int_conditions, select_points = select_points, probs = probs, robust = robust, ... ) } structure(out, class = "brms_conditional_effects") } #' @rdname conditional_effects.brmsfit #' @export conditional_effects <- function(x, ...) { UseMethod("conditional_effects") } # compute expected values of MV models for use in conditional_effects # @return a list of summarized prediction matrices #' @export conditional_effects.mvbrmsterms <- function(x, resp = NULL, ...) { resp <- validate_resp(resp, x$responses) x$terms <- x$terms[resp] out <- lapply(x$terms, conditional_effects, ...) unlist(out, recursive = FALSE) } # conditional_effects for univariate model # @return a list with the summarized prediction matrix as the only element # @note argument 'resp' exists only to be excluded from '...' (#589) #' @export conditional_effects.brmsterms <- function( x, fit, cond_data, int_conditions, method, surface, spaghetti, categorical, ordinal, probs, robust, dpar = NULL, nlpar = NULL, resp = NULL, ... ) { stopifnot(is.brmsfit(fit)) effects <- attr(cond_data, "effects") types <- attr(cond_data, "types") catscale <- NULL pred_args <- list( fit, newdata = cond_data, allow_new_levels = TRUE, dpar = dpar, nlpar = nlpar, resp = if (nzchar(x$resp)) x$resp, incl_autocor = FALSE, ... ) if (method != "posterior_predict") { # 'transform' creates problems in 'posterior_linpred' pred_args$transform <- NULL } out <- do_call(method, pred_args) rownames(cond_data) <- NULL if (categorical || ordinal) { if (method != "posterior_epred") { stop2("Can only use 'categorical' with method = 'posterior_epred'.") } if (!is_polytomous(x)) { stop2("Argument 'categorical' may only be used ", "for categorical or ordinal models.") } if (categorical && ordinal) { stop2("Please use argument 'categorical' instead of 'ordinal'.") } catscale <- str_if(is_multinomial(x), "Count", "Probability") cats <- dimnames(out)[[3]] if (is.null(cats)) cats <- seq_dim(out, 3) cond_data <- repl(cond_data, length(cats)) cond_data <- do_call(rbind, cond_data) cond_data$cats__ <- factor(rep(cats, each = ncol(out)), levels = cats) effects[2] <- "cats__" types[2] <- "factor" } else { if (conv_cats_dpars(x$family) && is.null(dpar)) { stop2("Please set 'categorical' to TRUE.") } if (is_ordinal(x$family) && is.null(dpar) && method != "posterior_linpred") { warning2( "Predictions are treated as continuous variables in ", "'conditional_effects' by default which is likely invalid ", "for ordinal families. Please set 'categorical' to TRUE." ) if (method == "posterior_epred") { out <- ordinal_probs_continuous(out) } } } cond_data <- add_effects__(cond_data, effects) first_numeric <- types[1] %in% "numeric" second_numeric <- types[2] %in% "numeric" both_numeric <- first_numeric && second_numeric if (second_numeric && !surface) { # only convert 'effect2__' to factor so that the original # second effect variable remains unchanged in the data mde2 <- round(cond_data[[effects[2]]], 2) levels2 <- sort(unique(mde2), TRUE) cond_data$effect2__ <- factor(mde2, levels = levels2) labels2 <- names(int_conditions[[effects[2]]]) if (length(labels2) == length(levels2)) { levels(cond_data$effect2__) <- labels2 } } spag <- NULL if (first_numeric && spaghetti) { if (surface) { stop2("Cannot use 'spaghetti' and 'surface' at the same time.") } spag <- out if (categorical) { spag <- do_call(cbind, array2list(spag)) } sample <- rep(seq_rows(spag), each = ncol(spag)) if (length(types) == 2L) { # draws should be unique across plotting groups sample <- paste0(sample, "_", cond_data[[effects[2]]]) } spag <- data.frame(as.numeric(t(spag)), factor(sample)) colnames(spag) <- c("estimate__", "sample__") spag <- cbind(cond_data, spag) } out <- posterior_summary(out, probs = probs, robust = robust) if (categorical || ordinal) { out <- do_call(rbind, array2list(out)) } colnames(out) <- c("estimate__", "se__", "lower__", "upper__") out <- cbind(cond_data, out) if (!is.null(dpar)) { response <- dpar } else if (!is.null(nlpar)) { response <- nlpar } else { response <- as.character(x$formula[2]) } attr(out, "effects") <- effects attr(out, "response") <- response attr(out, "surface") <- unname(both_numeric && surface) attr(out, "categorical") <- categorical attr(out, "catscale") <- catscale attr(out, "ordinal") <- ordinal attr(out, "spaghetti") <- spag attr(out, "points") <- make_point_frame(x, fit$data, effects, ...) name <- paste0(usc(x$resp, "suffix"), paste0(effects, collapse = ":")) setNames(list(out), name) } # get combinations of variables used in predictor terms # @param ... character vectors or formulas # @param alist a list of character vectors or formulas get_var_combs <- function(..., alist = list()) { dots <- c(list(...), alist) for (i in seq_along(dots)) { if (is.formula(dots[[i]])) { dots[[i]] <- attr(terms(dots[[i]]), "term.labels") } dots[[i]] <- lapply(dots[[i]], all_vars) } unique(unlist(dots, recursive = FALSE)) } # extract combinations of predictor variables get_all_effects <- function(x, ...) { UseMethod("get_all_effects") } #' @export get_all_effects.default <- function(x, ...) { NULL } #' @export get_all_effects.mvbrmsterms <- function(x, ...) { out <- lapply(x$terms, get_all_effects, ...) unique(unlist(out, recursive = FALSE)) } # get all effects for use in conditional_effects # @param bterms object of class brmsterms # @param rsv_vars character vector of reserved variables # @param comb_all include all main effects and two-way interactions? # @return a list with one element per valid effect / effects combination # excludes all 3-way or higher interactions #' @export get_all_effects.brmsterms <- function(x, rsv_vars = NULL, comb_all = FALSE) { stopifnot(is.atomic(rsv_vars)) out <- list() for (dp in names(x$dpars)) { out <- c(out, get_all_effects(x$dpars[[dp]])) } for (nlp in names(x$nlpars)) { out <- c(out, get_all_effects(x$nlpars[[nlp]])) } out <- rmNULL(lapply(out, setdiff, y = rsv_vars)) if (comb_all) { # allow to combine all variables with each other out <- unique(unlist(out)) out <- c(out, get_group_vars(x)) if (length(out)) { int <- expand.grid(out, out, stringsAsFactors = FALSE) int <- int[int[, 1] != int[, 2], ] int <- as.list(as.data.frame(t(int), stringsAsFactors = FALSE)) int <- unique(unname(lapply(int, sort))) out <- c(as.list(out), int) } } unique(out[lengths(out) <= 2L]) } #' @export get_all_effects.btl <- function(x, ...) { c(get_var_combs(x[["fe"]], x[["cs"]]), get_all_effects_type(x, "sp"), get_all_effects_type(x, "sm"), get_all_effects_type(x, "gp")) } # extract variable combinations from special terms get_all_effects_type <- function(x, type) { stopifnot(is.btl(x)) type <- as_one_character(type) regex_type <- regex_sp(type) terms <- all_terms(x[[type]]) out <- named_list(terms) for (i in seq_along(terms)) { # some special terms can appear within interactions # we did not allow ":" within these terms so we can use it for splitting term_parts <- unlist(strsplit(terms[i], split = ":")) vars <- vector("list", length(term_parts)) for (j in seq_along(term_parts)) { if (grepl_expr(regex_type, term_parts[j])) { # evaluate a special term to extract variables tmp <- eval2(term_parts[j]) vars[[j]] <- setdiff(unique(c(tmp$term, tmp$by)), "NA") } else { # extract all variables from an ordinary term vars[[j]] <- all_vars(term_parts[j]) } } vars <- unique(unlist(vars)) out[[i]] <- str2formula(vars, collapse = "*") } get_var_combs(alist = out) } #' @export get_all_effects.btnl <- function(x, ...) { covars <- all_vars(rhs(x$covars)) out <- as.list(covars) if (length(covars) > 1L) { c(out) <- utils::combn(covars, 2, simplify = FALSE) } unique(out) } # extract names of predictor variables get_pred_vars <- function(x) { unique(unlist(get_all_effects(x))) } # extract names of variables treated as integers get_int_vars <- function(x, ...) { UseMethod("get_int_vars") } #' @export get_int_vars.mvbrmsterms <- function(x, ...) { unique(ulapply(x$terms, get_int_vars)) } #' @export get_int_vars.brmsterms <- function(x, ...) { advars <- ulapply(rmNULL(x$adforms[c("trials", "thres", "vint")]), all_vars) unique(c(advars, get_sp_vars(x, "mo"))) } # transform posterior draws of ordinal probabilities to a # continuous scale assuming equidistance between adjacent categories # @param x an ndraws x nobs x ncat array of posterior draws # @return an ndraws x nobs matrix of posterior draws ordinal_probs_continuous <- function(x) { stopifnot(length(dim(x)) == 3) for (k in seq_dim(x, 3)) { x[, , k] <- x[, , k] * k } x <- lapply(seq_dim(x, 2), function(s) rowSums(x[, s, ])) do_call(cbind, x) } #' Prepare Fully Crossed Conditions #' #' This is a helper function to prepare fully crossed conditions primarily #' for use with the \code{conditions} argument of \code{\link{conditional_effects}}. #' Automatically creates labels for each row in the \code{cond__} column. #' #' @param x An \R object from which to extract the variables #' that should be part of the conditions. #' @param vars Names of the variables that should be part of the conditions. #' @param ... Arguments passed to \code{\link{rows2labels}}. #' #' @return A \code{data.frame} where each row indicates a condition. #' #' @details For factor like variables, all levels are used as conditions. #' For numeric variables, \code{mean + (-1:1) * SD} are used as conditions. #' #' @seealso \code{\link{conditional_effects}}, \code{\link{rows2labels}} #' #' @examples #' df <- data.frame(x = c("a", "b"), y = rnorm(10)) #' make_conditions(df, vars = c("x", "y")) #' #' @export make_conditions <- function(x, vars, ...) { # rev ensures that the last variable varies fastest in expand.grid vars <- rev(as.character(vars)) if (!is.data.frame(x) && "data" %in% names(x)) { x <- x$data } x <- as.data.frame(x) out <- named_list(vars) for (v in vars) { tmp <- get(v, x) if (is_like_factor(tmp)) { tmp <- levels(as.factor(tmp)) } else { tmp <- mean(tmp, na.rm = TRUE) + (-1:1) * sd(tmp, na.rm = TRUE) } out[[v]] <- tmp } out <- rev(expand.grid(out)) out$cond__ <- rows2labels(out, ...) out } # extract the cond__ variable used for faceting get_cond__ <- function(x) { out <- x[["cond__"]] if (is.null(out)) { out <- rownames(x) } as.character(out) } #' Convert Rows to Labels #' #' Convert information in rows to labels for each row. #' #' @param x A \code{data.frame} for which to extract labels. #' @param digits Minimal number of decimal places shown in #' the labels of numeric variables. #' @param sep A single character string defining the separator #' between variables used in the labels. #' @param incl_vars Indicates if variable names should #' be part of the labels. Defaults to \code{TRUE}. #' @param ... Currently unused. #' #' @return A character vector of the same length as the number #' of rows of \code{x}. #' #' @seealso \code{\link{make_conditions}}, \code{\link{conditional_effects}} #' #' @export rows2labels <- function(x, digits = 2, sep = " & ", incl_vars = TRUE, ...) { x <- as.data.frame(x) incl_vars <- as_one_logical(incl_vars) out <- x for (i in seq_along(out)) { if (!is_like_factor(out[[i]])) { out[[i]] <- round(out[[i]], digits) } if (incl_vars) { out[[i]] <- paste0(names(out)[i], " = ", out[[i]]) } } paste_sep <- function(..., sep__ = sep) { paste(..., sep = sep__) } Reduce(paste_sep, out) } # prepare conditions for use in conditional_effects # @param fit an object of class 'brmsfit' # @param conditions optional data.frame containing user defined conditions # @param effects see conditional_effects # @param re_formula see conditional_effects # @param rsv_vars names of reserved variables # @return a data.frame with (possibly updated) conditions prepare_conditions <- function(fit, conditions = NULL, effects = NULL, re_formula = NA, rsv_vars = NULL) { mf <- model.frame(fit) new_formula <- update_re_terms(fit$formula, re_formula = re_formula) bterms <- brmsterms(new_formula) if (any(grepl_expr("^(as\\.)?factor(.+)$", bterms$allvars))) { # conditions are chosen based the variables stored in the data # this approach cannot take into account possible transformations # to factors happening inside the model formula warning2( "Using 'factor' or 'as.factor' in the model formula ", "might lead to problems in 'conditional_effects'.", "Please convert your variables to factors beforehand." ) } req_vars <- all_vars(rhs(bterms$allvars)) req_vars <- setdiff(req_vars, rsv_vars) req_vars <- setdiff(req_vars, names(fit$data2)) if (is.null(conditions)) { conditions <- as.data.frame(as.list(rep(NA, length(req_vars)))) names(conditions) <- req_vars } else { conditions <- as.data.frame(conditions) if (!nrow(conditions)) { stop2("Argument 'conditions' must have a least one row.") } conditions <- unique(conditions) if (any(duplicated(get_cond__(conditions)))) { stop2("Condition labels should be unique.") } req_vars <- setdiff(req_vars, names(conditions)) } # special treatment for 'trials' addition variables trial_vars <- all_vars(bterms$adforms$trials) trial_vars <- trial_vars[!vars_specified(trial_vars, conditions)] if (length(trial_vars)) { message("Setting all 'trials' variables to 1 by ", "default if not specified otherwise.") req_vars <- setdiff(req_vars, trial_vars) for (v in trial_vars) { conditions[[v]] <- 1L } } # use sensible default values for unspecified variables subset_vars <- get_ad_vars(bterms, "subset") int_vars <- get_int_vars(bterms) group_vars <- get_group_vars(bterms) req_vars <- setdiff(req_vars, group_vars) for (v in req_vars) { if (is_like_factor(mf[[v]])) { # factor-like variable if (v %in% subset_vars) { # avoid unintentional subsetting of newdata (#755) conditions[[v]] <- TRUE } else { # use reference category for factors levels <- levels(as.factor(mf[[v]])) ordered <- is.ordered(mf[[v]]) conditions[[v]] <- factor(levels[1], levels, ordered = ordered) } } else { # numeric-like variable if (v %in% subset_vars) { # avoid unintentional subsetting of newdata (#755) conditions[[v]] <- 1 } else if (v %in% int_vars) { # ensure valid integer values conditions[[v]] <- round(median(mf[[v]], na.rm = TRUE)) } else { conditions[[v]] <- mean(mf[[v]], na.rm = TRUE) } } } all_vars <- c(all_vars(bterms$allvars), "cond__") unused_vars <- setdiff(names(conditions), all_vars) if (length(unused_vars)) { warning2( "The following variables in 'conditions' are not ", "part of the model:\n", collapse_comma(unused_vars) ) } cond__ <- conditions$cond__ conditions <- validate_newdata( conditions, fit, re_formula = re_formula, allow_new_levels = TRUE, check_response = FALSE, incl_autocor = FALSE ) conditions$cond__ <- cond__ conditions } # prepare data to be used in conditional_effects # @param data data.frame containing only data of the predictors of interest # @param conditions see argument 'conditions' of conditional_effects # @param int_conditions see argument 'int_conditions' of conditional_effects # @param int_vars names of variables being treated as integers # @param group_vars names of grouping variables # @param surface generate surface plots later on? # @param resolution number of distinct points at which to evaluate # the predictors of interest # @param reorder reorder predictors so that numeric ones come first? prepare_cond_data <- function(data, conditions, int_conditions = NULL, int_vars = NULL, group_vars = NULL, surface = FALSE, resolution = 100, reorder = TRUE) { effects <- names(data) stopifnot(length(effects) %in% c(1L, 2L)) is_factor <- ulapply(data, is_like_factor) | names(data) %in% group_vars types <- ifelse(is_factor, "factor", "numeric") # numeric effects should come first if (reorder) { new_order <- order(types, decreasing = TRUE) effects <- effects[new_order] types <- types[new_order] } # handle first predictor if (effects[1] %in% names(int_conditions)) { # first predictor has pre-specified conditions int_cond <- int_conditions[[effects[1]]] if (is.function(int_cond)) { int_cond <- int_cond(data[[effects[1]]]) } values <- int_cond } else if (types[1] == "factor") { # first predictor is factor-like values <- factor(unique(data[[effects[1]]])) } else { # first predictor is numeric min1 <- min(data[[effects[1]]], na.rm = TRUE) max1 <- max(data[[effects[1]]], na.rm = TRUE) if (effects[1] %in% int_vars) { values <- seq(min1, max1, by = 1) } else { values <- seq(min1, max1, length.out = resolution) } } if (length(effects) == 2L) { # handle second predictor values <- setNames(list(values, NA), effects) if (effects[2] %in% names(int_conditions)) { # second predictor has pre-specified conditions int_cond <- int_conditions[[effects[2]]] if (is.function(int_cond)) { int_cond <- int_cond(data[[effects[2]]]) } values[[2]] <- int_cond } else if (types[2] == "factor") { # second predictor is factor-like values[[2]] <- factor(unique(data[[effects[2]]])) } else { # second predictor is numeric if (surface) { min2 <- min(data[[effects[2]]], na.rm = TRUE) max2 <- max(data[[effects[2]]], na.rm = TRUE) if (effects[2] %in% int_vars) { values[[2]] <- seq(min2, max2, by = 1) } else { values[[2]] <- seq(min2, max2, length.out = resolution) } } else { if (effects[2] %in% int_vars) { median2 <- median(data[[effects[2]]]) mad2 <- mad(data[[effects[2]]]) values[[2]] <- round((-1:1) * mad2 + median2) } else { mean2 <- mean(data[[effects[2]]], na.rm = TRUE) sd2 <- sd(data[[effects[2]]], na.rm = TRUE) values[[2]] <- (-1:1) * sd2 + mean2 } } } data <- do_call(expand.grid, values) } else { stopifnot(length(effects) == 1L) data <- structure(data.frame(values), names = effects) } # no need to have the same value combination more than once data <- unique(data) data <- data[do_call(order, as.list(data)), , drop = FALSE] data <- replicate(nrow(conditions), data, simplify = FALSE) cond_vars <- setdiff(names(conditions), effects) cond__ <- get_cond__(conditions) for (j in seq_rows(conditions)) { data[[j]] <- fill_newdata(data[[j]], cond_vars, conditions, n = j) data[[j]]$cond__ <- cond__[j] } data <- do_call(rbind, data) data$cond__ <- factor(data$cond__, cond__) structure(data, effects = effects, types = types) } # which variables in 'vars' are specified in 'data'? vars_specified <- function(vars, data) { .fun <- function(v) isTRUE(v %in% names(data)) && any(!is.na(data[[v]])) as.logical(ulapply(vars, .fun)) } # prepare data points based on the provided conditions # allows to add data points to conditional effects plots # @return a data.frame containing the data points to be plotted make_point_frame <- function(bterms, mf, effects, conditions, select_points = 0, transform = NULL, ...) { stopifnot(is.brmsterms(bterms), is.data.frame(mf)) effects <- intersect(effects, names(mf)) points <- mf[, effects, drop = FALSE] points$resp__ <- model.response( model.frame(bterms$respform, mf, na.action = na.pass) ) req_vars <- names(mf) groups <- get_re_groups(bterms) if (length(groups)) { c(req_vars) <- unlist(strsplit(groups, ":")) } req_vars <- unique(setdiff(req_vars, effects)) req_vars <- intersect(req_vars, names(conditions)) if (length(req_vars)) { # find out which data point is valid for which condition cond__ <- get_cond__(conditions) mf <- mf[, req_vars, drop = FALSE] conditions <- conditions[, req_vars, drop = FALSE] points$cond__ <- NA points <- replicate(nrow(conditions), points, simplify = FALSE) for (i in seq_along(points)) { cond <- conditions[i, , drop = FALSE] # ensures correct handling of matrix columns not_na <- function(x) !any(is.na(x) | x %in% "zero__") not_na <- ulapply(cond, not_na) cond <- cond[, not_na, drop = FALSE] mf_tmp <- mf[, not_na, drop = FALSE] if (ncol(mf_tmp)) { is_num <- sapply(mf_tmp, is.numeric) is_num <- is_num & !names(mf_tmp) %in% groups if (sum(is_num)) { # handle numeric variables stopifnot(select_points >= 0) if (select_points > 0) { for (v in names(mf_tmp)[is_num]) { min <- min(mf_tmp[, v], na.rm = TRUE) max <- max(mf_tmp[, v], na.rm = TRUE) unit <- scale_unit(mf_tmp[, v], min, max) unit_cond <- scale_unit(cond[, v], min, max) unit_diff <- abs(unit - unit_cond) close_enough <- unit_diff <= select_points mf_tmp[[v]][close_enough] <- cond[, v] mf_tmp[[v]][!close_enough] <- NA } } else { # take all numeric values if select_points is zero cond <- cond[, !is_num, drop = FALSE] mf_tmp <- mf_tmp[, !is_num, drop = FALSE] } } } if (ncol(mf_tmp)) { # handle factors and grouping variables # do it like base::duplicated K <- do_call("paste", c(mf_tmp, sep = "\r")) %in% do_call("paste", c(cond, sep = "\r")) } else { K <- seq_rows(mf) } # cond__ allows to assign points to conditions points[[i]]$cond__[K] <- cond__[i] } points <- do_call(rbind, points) points <- points[!is.na(points$cond__), , drop = FALSE] points$cond__ <- factor(points$cond__, cond__) } points <- add_effects__(points, effects) if (!is.numeric(points$resp__)) { points$resp__ <- as.numeric(as.factor(points$resp__)) if (is_binary(bterms$family)) { points$resp__ <- points$resp__ - 1 } } if (!is.null(transform)) { points$resp__ <- do_call(transform, list(points$resp__)) } points } # add effect__ variables to the data add_effects__ <- function(data, effects) { for (i in seq_along(effects)) { data[[paste0("effect", i, "__")]] <- eval2(effects[i], data) } data } #' @export print.brms_conditional_effects <- function(x, ...) { plot(x, ...) } #' @rdname conditional_effects.brmsfit #' @method plot brms_conditional_effects #' @export plot.brms_conditional_effects <- function( x, ncol = NULL, points = FALSE, rug = FALSE, mean = TRUE, jitter_width = 0, stype = c("contour", "raster"), line_args = list(), cat_args = list(), errorbar_args = list(), surface_args = list(), spaghetti_args = list(), point_args = list(), rug_args = list(), facet_args = list(), theme = NULL, ask = TRUE, plot = TRUE, ... ) { dots <- list(...) plot <- use_alias(plot, dots$do_plot) stype <- match.arg(stype) smooths_only <- isTRUE(attr(x, "smooths_only")) if (points && smooths_only) { stop2("Argument 'points' is invalid for objects ", "returned by 'conditional_smooths'.") } if (!is_equal(jitter_width, 0)) { warning2("'jitter_width' is deprecated. Please use ", "'point_args = list(width = )' instead.") } if (!is.null(theme) && !is.theme(theme)) { stop2("Argument 'theme' should be a 'theme' object.") } if (plot) { default_ask <- devAskNewPage() on.exit(devAskNewPage(default_ask)) devAskNewPage(ask = FALSE) } dont_replace <- c("mapping", "data", "inherit.aes") plots <- named_list(names(x)) for (i in seq_along(x)) { response <- attr(x[[i]], "response") effects <- attr(x[[i]], "effects") ncond <- length(unique(x[[i]]$cond__)) df_points <- attr(x[[i]], "points") categorical <- isTRUE(attr(x[[i]], "categorical")) catscale <- attr(x[[i]], "catscale") surface <- isTRUE(attr(x[[i]], "surface")) # deprecated as of brms 2.4.3 ordinal <- isTRUE(attr(x[[i]], "ordinal")) if (surface || ordinal) { # surface plots for two dimensional interactions or ordinal plots plots[[i]] <- ggplot(x[[i]]) + aes_(~ effect1__, ~ effect2__) + labs(x = effects[1], y = effects[2]) if (ordinal) { width <- ifelse(is_like_factor(x[[i]]$effect1__), 0.9, 1) .surface_args <- nlist( mapping = aes_(fill = ~ estimate__), height = 0.9, width = width ) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_tile, .surface_args) + scale_fill_gradientn(colors = viridis6(), name = catscale) + ylab(response) } else if (stype == "contour") { .surface_args <- nlist( mapping = aes_(z = ~ estimate__, colour = ~ ..level..), bins = 30, size = 1.3 ) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_contour, .surface_args) + scale_color_gradientn(colors = viridis6(), name = response) } else if (stype == "raster") { .surface_args <- nlist(mapping = aes_(fill = ~ estimate__)) replace_args(.surface_args, dont_replace) <- surface_args plots[[i]] <- plots[[i]] + do_call(geom_raster, .surface_args) + scale_fill_gradientn(colors = viridis6(), name = response) } } else { # plot effects of single predictors or two-way interactions gvar <- if (length(effects) == 2L) "effect2__" spaghetti <- attr(x[[i]], "spaghetti") plots[[i]] <- ggplot(x[[i]]) + aes_string(x = "effect1__", y = "estimate__", colour = gvar) + labs(x = effects[1], y = response, colour = effects[2]) if (is.null(spaghetti)) { plots[[i]] <- plots[[i]] + aes_string(ymin = "lower__", ymax = "upper__", fill = gvar) + labs(fill = effects[2]) } # extract suggested colors for later use colors <- ggplot_build(plots[[i]]) colors <- unique(colors$data[[1]][["colour"]]) if (points && !categorical && !surface) { # add points first so that they appear behind the predictions .point_args <- list( mapping = aes_string(x = "effect1__", y = "resp__"), data = df_points, inherit.aes = FALSE, size = 2 / ncond^0.25, height = 0, width = jitter_width ) if (is_like_factor(df_points[, gvar])) { .point_args$mapping[c("colour", "fill")] <- aes_string(colour = gvar, fill = gvar) } replace_args(.point_args, dont_replace) <- point_args plots[[i]] <- plots[[i]] + do_call(geom_jitter, .point_args) } if (!is.null(spaghetti)) { # add a regression line for each sample separately .spaghetti_args <- list( aes_string(group = "sample__", colour = gvar), data = spaghetti, stat = "identity", size = 0.5 ) if (length(effects) == 1L) { .spaghetti_args$colour <- alpha("blue", 0.1) } else { # workaround to get transparent lines plots[[i]] <- plots[[i]] + scale_color_manual(values = alpha(colors, 0.1)) } replace_args(.spaghetti_args, dont_replace) <- spaghetti_args plots[[i]] <- plots[[i]] + do_call(geom_smooth, .spaghetti_args) } if (is.numeric(x[[i]]$effect1__)) { # line plots for numeric predictors .line_args <- list(stat = "identity") if (!is.null(spaghetti)) { # display a white mean regression line .line_args$mapping <- aes_string(group = gvar) .line_args$colour <- alpha("white", 0.8) } replace_args(.line_args, dont_replace) <- line_args if (mean || is.null(spaghetti)) { plots[[i]] <- plots[[i]] + do_call(geom_smooth, .line_args) } if (rug) { .rug_args <- list( aes_string(x = "effect1__"), sides = "b", data = df_points, inherit.aes = FALSE ) if (is_like_factor(df_points[, gvar])) { .point_args$mapping[c("colour", "fill")] <- aes_string(colour = gvar, fill = gvar) } replace_args(.rug_args, dont_replace) <- rug_args plots[[i]] <- plots[[i]] + do_call(geom_rug, .rug_args) } } else { # points and errorbars for factors .cat_args <- list( position = position_dodge(width = 0.4), size = 4 / ncond^0.25 ) .errorbar_args <- list( position = position_dodge(width = 0.4), width = 0.3 ) replace_args(.cat_args, dont_replace) <- cat_args replace_args(.errorbar_args, dont_replace) <- errorbar_args plots[[i]] <- plots[[i]] + do_call(geom_point, .cat_args) + do_call(geom_errorbar, .errorbar_args) } if (categorical) { plots[[i]] <- plots[[i]] + ylab(catscale) + labs(fill = response, color = response) } } if (ncond > 1L) { # one plot per row of conditions if (is.null(ncol)) { ncol <- max(floor(sqrt(ncond)), 3) } .facet_args <- nlist(facets = "cond__", ncol) replace_args(.facet_args, dont_replace) <- facet_args plots[[i]] <- plots[[i]] + do_call(facet_wrap, .facet_args) } plots[[i]] <- plots[[i]] + theme if (plot) { plot(plots[[i]]) if (i == 1) { devAskNewPage(ask = ask) } } } invisible(plots) } # the name 'marginal_effects' is deprecated as of brms 2.10.3 # do not remove it eventually as it has been used in the brms papers #' @export marginal_effects <- function(x, ...) { UseMethod("marginal_effects") } #' @export marginal_effects.brmsfit <- function(x, ...) { warning2("Method 'marginal_effects' is deprecated. ", "Please use 'conditional_effects' instead.") conditional_effects.brmsfit(x, ...) } #' @export print.brmsMarginalEffects <- function(x, ...) { class(x) <- "brms_conditional_effects" print(x, ...) } #' @export plot.brmsMarginalEffects <- function(x, ...) { class(x) <- "brms_conditional_effects" plot(x, ...) } brms/R/posterior_epred.R0000644000175000017500000007172114111751666015120 0ustar nileshnilesh#' Expected Values of the Posterior Predictive Distribution #' #' Compute posterior draws of the expected value/mean of the posterior #' predictive distribution. Can be performed for the data used to fit the model #' (posterior predictive checks) or for new data. By definition, these #' predictions have smaller variance than the posterior predictions performed by #' the \code{\link{posterior_predict.brmsfit}} method. This is because only the #' uncertainty in the mean is incorporated in the draws computed by #' \code{posterior_epred} while any residual error is ignored. However, the #' estimated means of both methods averaged across draws should be very #' similar. #' #' @aliases pp_expect #' #' @inheritParams posterior_predict.brmsfit #' @param dpar Optional name of a predicted distributional parameter. #' If specified, expected predictions of this parameters are returned. #' @param nlpar Optional name of a predicted non-linear parameter. #' If specified, expected predictions of this parameters are returned. #' #' @return An \code{array} of predicted \emph{mean} response values. For #' categorical and ordinal models, the output is an S x N x C array. #' Otherwise, the output is an S x N matrix, where S is the number of #' posterior draws, N is the number of observations, and C is the number of #' categories. In multivariate models, an additional dimension is added to the #' output which indexes along the different response variables. #' #' @template details-newdata-na #' @template details-allow_new_levels #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## compute expected predictions #' ppe <- posterior_epred(fit) #' str(ppe) #' } #' #' @aliases posterior_epred #' @method posterior_epred brmsfit #' @importFrom rstantools posterior_epred #' @export posterior_epred #' @export posterior_epred.brmsfit <- function(object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ...) { cl <- match.call() if ("re.form" %in% names(cl)) { re_formula <- re.form } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = "response", summary = FALSE ) } #' @export posterior_epred.mvbrmsprep <- function(object, ...) { out <- lapply(object$resps, posterior_epred, ...) along <- ifelse(length(out) > 1L, 3, 2) do_call(abind, c(out, along = along)) } #' @export posterior_epred.brmsprep <- function(object, dpar, nlpar, sort, scale = "response", incl_thres = NULL, summary = FALSE, robust = FALSE, probs = c(0.025, 0.975), ...) { summary <- as_one_logical(summary) dpars <- names(object$dpars) nlpars <- names(object$nlpars) if (length(dpar)) { # predict a distributional parameter dpar <- as_one_character(dpar) if (!dpar %in% dpars) { stop2("Invalid argument 'dpar'. Valid distributional ", "parameters are: ", collapse_comma(dpars)) } if (length(nlpar)) { stop2("Cannot use 'dpar' and 'nlpar' at the same time.") } predicted <- is.bprepl(object$dpars[[dpar]]) || is.bprepnl(object$dpars[[dpar]]) if (predicted) { # parameter varies across observations if (scale == "linear") { object$dpars[[dpar]]$family$link <- "identity" } if (is_ordinal(object$family)) { object$dpars[[dpar]]$cs <- NULL object$family <- object$dpars[[dpar]]$family <- .dpar_family(link = object$dpars[[dpar]]$family$link) } if (dpar_class(dpar) == "theta" && scale == "response") { ap_id <- as.numeric(dpar_id(dpar)) out <- get_theta(object)[, , ap_id, drop = FALSE] dim(out) <- dim(out)[c(1, 2)] } else { out <- get_dpar(object, dpar = dpar, ilink = TRUE) } } else { # parameter is constant across observations out <- object$dpars[[dpar]] out <- matrix(out, nrow = object$ndraws, ncol = object$nobs) } } else if (length(nlpar)) { # predict a non-linear parameter nlpar <- as_one_character(nlpar) if (!nlpar %in% nlpars) { stop2("Invalid argument 'nlpar'. Valid non-linear ", "parameters are: ", collapse_comma(nlpars)) } out <- get_nlpar(object, nlpar = nlpar) } else { # no dpar or nlpar specified incl_thres <- as_one_logical(incl_thres %||% FALSE) incl_thres <- incl_thres && is_ordinal(object$family) && scale == "linear" if (incl_thres) { # extract linear predictor array with thresholds etc. included if (is.mixfamily(object$family)) { stop2("'incl_thres' is not supported for mixture models.") } object$family$link <- "identity" } if (scale == "response" || incl_thres) { # predict the mean of the response distribution for (nlp in nlpars) { object$nlpars[[nlp]] <- get_nlpar(object, nlpar = nlp) } for (dp in dpars) { object$dpars[[dp]] <- get_dpar(object, dpar = dp) } if (is_trunc(object)) { out <- posterior_epred_trunc(object) } else { posterior_epred_fun <- paste0("posterior_epred_", object$family$family) posterior_epred_fun <- get(posterior_epred_fun, asNamespace("brms")) out <- posterior_epred_fun(object) } } else { # return results on the linear scale # extract all 'mu' parameters if (conv_cats_dpars(object$family)) { out <- dpars[grepl("^mu", dpars)] } else { out <- dpars[dpar_class(dpars) %in% "mu"] } if (length(out) == 1L) { out <- get_dpar(object, dpar = out, ilink = FALSE) } else { # multiple mu parameters in categorical or mixture models out <- lapply(out, get_dpar, prep = object, ilink = FALSE) out <- abind::abind(out, along = 3) } } } if (is.null(dim(out))) { out <- as.matrix(out) } colnames(out) <- NULL out <- reorder_obs(out, object$old_order, sort = sort) if (scale == "response" && is_polytomous(object$family) && length(dim(out)) == 3L && dim(out)[3] == length(object$cats)) { # for ordinal models with varying thresholds, dim[3] may not match cats dimnames(out)[[3]] <- object$cats } if (summary) { # only for compatibility with the 'fitted' method out <- posterior_summary(out, probs = probs, robust = robust) if (is_polytomous(object$family) && length(dim(out)) == 3L) { if (scale == "linear") { dimnames(out)[[3]] <- paste0("eta", seq_dim(out, 3)) } else { dimnames(out)[[3]] <- paste0("P(Y = ", dimnames(out)[[3]], ")") } } } out } #' Expected Values of the Posterior Predictive Distribution #' #' This method is an alias of \code{\link{posterior_epred.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams posterior_epred.brmsfit #' @param object An object of class \code{brmsfit}. #' @param scale Either \code{"response"} or \code{"linear"}. #' If \code{"response"}, results are returned on the scale #' of the response variable. If \code{"linear"}, #' results are returned on the scale of the linear predictor term, #' that is without applying the inverse link function or #' other transformations. #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}.. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predicted \emph{mean} response values. #' If \code{summary = FALSE} the output resembles those of #' \code{\link{posterior_epred.brmsfit}}. #' #' If \code{summary = TRUE} the output depends on the family: For categorical #' and ordinal families, the output is an N x E x C array, where N is the #' number of observations, E is the number of summary statistics, and C is the #' number of categories. For all other families, the output is an N x E #' matrix. The number of summary statistics E is equal to \code{2 + #' length(probs)}: The \code{Estimate} column contains point estimates (either #' mean or median depending on argument \code{robust}), while the #' \code{Est.Error} column contains uncertainty estimates (either standard #' deviation or median absolute deviation depending on argument #' \code{robust}). The remaining columns starting with \code{Q} contain #' quantile estimates as specified via argument \code{probs}. #' #' In multivariate models, an additional dimension is added to the output #' which indexes along the different response variables. #' #' @seealso \code{\link{posterior_epred.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## compute expected predictions #' fitted_values <- fitted(fit) #' head(fitted_values) #' #' ## plot expected predictions against actual response #' dat <- as.data.frame(cbind(Y = standata(fit)$Y, fitted_values)) #' ggplot(dat) + geom_point(aes(x = Estimate, y = Y)) #' } #' #' @export fitted.brmsfit <- function(object, newdata = NULL, re_formula = NULL, scale = c("response", "linear"), resp = NULL, dpar = NULL, nlpar = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { scale <- match.arg(scale) summary <- as_one_logical(summary) contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = scale, summary = summary, robust = robust, probs = probs ) } #' Posterior Draws of the Linear Predictor #' #' Compute posterior draws of the linear predictor, that is draws before #' applying any link functions or other transformations. Can be performed for #' the data used to fit the model (posterior predictive checks) or for new data. #' #' @inheritParams posterior_epred.brmsfit #' @param object An object of class \code{brmsfit}. #' @param transform Logical; if \code{FALSE} #' (the default), draws of the linear predictor are returned. #' If \code{TRUE}, draws of transformed linear predictor, #' that is, after applying the link function are returned. #' @param dpar Name of a predicted distributional parameter #' for which draws are to be returned. By default, draws #' of the main distributional parameter(s) \code{"mu"} are returned. #' @param incl_thres Logical; only relevant for ordinal models when #' \code{transform} is \code{FALSE}, and ignored otherwise. Shall the #' thresholds and category-specific effects be included in the linear #' predictor? For backwards compatibility, the default is to not include them. #' #' @seealso \code{\link{posterior_epred.brmsfit}} #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' #' ## extract linear predictor values #' pl <- posterior_linpred(fit) #' str(pl) #' } #' #' @aliases posterior_linpred #' @method posterior_linpred brmsfit #' @importFrom rstantools posterior_linpred #' @export #' @export posterior_linpred posterior_linpred.brmsfit <- function( object, transform = FALSE, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, dpar = NULL, nlpar = NULL, incl_thres = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) { cl <- match.call() if ("re.form" %in% names(cl)) { re_formula <- re.form } scale <- "linear" transform <- as_one_logical(transform) if (transform) { scale <- "response" # if transform, return inv-link draws of only a single # distributional or non-linear parameter for consistency # of brms and rstanarm if (is.null(dpar) && is.null(nlpar)) { dpar <- "mu" } } contains_draws(object) object <- restructure(object) prep <- prepare_predictions( object, newdata = newdata, re_formula = re_formula, resp = resp, ndraws = ndraws, draw_ids = draw_ids, check_response = FALSE, ... ) posterior_epred( prep, dpar = dpar, nlpar = nlpar, sort = sort, scale = scale, incl_thres = incl_thres, summary = FALSE ) } # ------------------- family specific posterior_epred methods --------------------- # All posterior_epred_ functions have the same arguments structure # @param prep A named list returned by prepare_predictions containing # all required data and draws # @return transformed linear predictor representing the mean # of the posterior predictive distribution posterior_epred_gaussian <- function(prep) { if (!is.null(prep$ac$lagsar)) { prep$dpars$mu <- posterior_epred_lagsar(prep) } prep$dpars$mu } posterior_epred_student <- function(prep) { if (!is.null(prep$ac$lagsar)) { prep$dpars$mu <- posterior_epred_lagsar(prep) } prep$dpars$mu } posterior_epred_skew_normal <- function(prep) { prep$dpars$mu } posterior_epred_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2)) } posterior_epred_shifted_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2) + ndt) } posterior_epred_binomial <- function(prep) { trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials } posterior_epred_bernoulli <- function(prep) { prep$dpars$mu } posterior_epred_poisson <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_negbinomial <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_negbinomial2 <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_geometric <- function(prep) { multiply_dpar_rate_denom(prep$dpars$mu, prep) } posterior_epred_discrete_weibull <- function(prep) { mean_discrete_weibull(prep$dpars$mu, prep$dpars$shape) } posterior_epred_com_poisson <- function(prep) { mean_com_poisson(prep$dpars$mu, prep$dpars$shape) } posterior_epred_exponential <- function(prep) { prep$dpars$mu } posterior_epred_gamma <- function(prep) { prep$dpars$mu } posterior_epred_weibull <- function(prep) { prep$dpars$mu } posterior_epred_frechet <- function(prep) { prep$dpars$mu } posterior_epred_gen_extreme_value <- function(prep) { with(prep$dpars, mu + sigma * (gamma(1 - xi) - 1) / xi) } posterior_epred_inverse.gaussian <- function(prep) { prep$dpars$mu } posterior_epred_exgaussian <- function(prep) { prep$dpars$mu } posterior_epred_wiener <- function(prep) { # mu is the drift rate with(prep$dpars, ndt - bias / mu + bs / mu * (exp(-2 * mu * bias) - 1) / (exp(-2 * mu * bs) - 1) ) } posterior_epred_beta <- function(prep) { prep$dpars$mu } posterior_epred_von_mises <- function(prep) { prep$dpars$mu } posterior_epred_asym_laplace <- function(prep) { with(prep$dpars, mu + sigma * (1 - 2 * quantile) / (quantile * (1 - quantile)) ) } posterior_epred_zero_inflated_asym_laplace <- function(prep) { posterior_epred_asym_laplace(prep) * (1 - prep$dpars$zi) } posterior_epred_cox <- function(prep) { stop2("Cannot compute expected values of the posterior predictive ", "distribution for family 'cox'.") } posterior_epred_hurdle_poisson <- function(prep) { with(prep$dpars, mu / (1 - exp(-mu)) * (1 - hu)) } posterior_epred_hurdle_negbinomial <- function(prep) { with(prep$dpars, mu / (1 - (shape / (mu + shape))^shape) * (1 - hu)) } posterior_epred_hurdle_gamma <- function(prep) { with(prep$dpars, mu * (1 - hu)) } posterior_epred_hurdle_lognormal <- function(prep) { with(prep$dpars, exp(mu + sigma^2 / 2) * (1 - hu)) } posterior_epred_zero_inflated_poisson <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_inflated_negbinomial <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_inflated_binomial <- function(prep) { trials <- data2draws(prep$data$trials, dim_mu(prep)) prep$dpars$mu * trials * (1 - prep$dpars$zi) } posterior_epred_zero_inflated_beta <- function(prep) { with(prep$dpars, mu * (1 - zi)) } posterior_epred_zero_one_inflated_beta <- function(prep) { with(prep$dpars, zoi * coi + mu * (1 - zoi)) } posterior_epred_categorical <- function(prep) { get_probs <- function(i) { eta <- insert_refcat(slice_col(eta, i), family = prep$family) dcategorical(cats, eta = eta) } eta <- abind(prep$dpars, along = 3) cats <- seq_len(prep$data$ncat) out <- abind(lapply(seq_cols(eta), get_probs), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_multinomial <- function(prep) { get_counts <- function(i) { eta <- insert_refcat(slice_col(eta, i), family = prep$family) dcategorical(cats, eta = eta) * trials[i] } eta <- abind(prep$dpars, along = 3) cats <- seq_len(prep$data$ncat) trials <- prep$data$trials out <- abind(lapply(seq_cols(eta), get_counts), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_dirichlet <- function(prep) { get_probs <- function(i) { eta <- insert_refcat(slice_col(eta, i), family = prep$family) dcategorical(cats, eta = eta) } eta <- prep$dpars[grepl("^mu", names(prep$dpars))] eta <- abind(eta, along = 3) cats <- seq_len(prep$data$ncat) out <- abind(lapply(seq_cols(eta), get_probs), along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- prep$cats out } posterior_epred_dirichlet2 <- function(prep) { mu <- prep$dpars[grepl("^mu", names(prep$dpars))] mu <- abind(mu, along = 3) sums_mu <- apply(mu, 1:2, sum) cats <- seq_len(prep$data$ncat) for (i in cats) { mu[, , i] <- mu[, , i] / sums_mu } dimnames(mu)[[3]] <- prep$cats mu } posterior_epred_cumulative <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_sratio <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_cratio <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_acat <- function(prep) { posterior_epred_ordinal(prep) } posterior_epred_custom <- function(prep) { custom_family_method(prep$family, "posterior_epred")(prep) } posterior_epred_mixture <- function(prep) { families <- family_names(prep$family) prep$dpars$theta <- get_theta(prep) out <- 0 for (j in seq_along(families)) { posterior_epred_fun <- paste0("posterior_epred_", families[j]) posterior_epred_fun <- get(posterior_epred_fun, asNamespace("brms")) tmp_prep <- pseudo_prep_for_mixture(prep, j) if (length(dim(prep$dpars$theta)) == 3L) { theta <- prep$dpars$theta[, , j] } else { theta <- prep$dpars$theta[, j] } out <- out + theta * posterior_epred_fun(tmp_prep) } out } # ------ posterior_epred helper functions ------ # compute 'posterior_epred' for ordinal models posterior_epred_ordinal <- function(prep) { dens <- get(paste0("d", prep$family$family), mode = "function") # the linear scale has one column less than the response scale adjust <- ifelse(prep$family$link == "identity", 0, 1) ncat_max <- max(prep$data$nthres) + adjust nact_min <- min(prep$data$nthres) + adjust init_mat <- matrix(ifelse(prep$family$link == "identity", NA, 0), nrow = prep$ndraws, ncol = ncat_max - nact_min) args <- list(link = prep$family$link) out <- vector("list", prep$nobs) for (i in seq_along(out)) { args_i <- args args_i$eta <- slice_col(prep$dpars$mu, i) args_i$disc <- slice_col(prep$dpars$disc, i) args_i$thres <- subset_thres(prep, i) ncat_i <- NCOL(args_i$thres) + adjust args_i$x <- seq_len(ncat_i) out[[i]] <- do_call(dens, args_i) if (ncat_i < ncat_max) { sel <- seq_len(ncat_max - ncat_i) out[[i]] <- cbind(out[[i]], init_mat[, sel]) } } out <- abind(out, along = 3) out <- aperm(out, perm = c(1, 3, 2)) dimnames(out)[[3]] <- seq_len(ncat_max) out } # compute 'posterior_epred' for lagsar models posterior_epred_lagsar <- function(prep) { stopifnot(!is.null(prep$ac$lagsar)) I <- diag(prep$nobs) .posterior_epred <- function(s) { IB <- I - with(prep$ac, lagsar[s, ] * Msar) as.numeric(solve(IB, prep$dpars$mu[s, ])) } out <- rblapply(seq_len(prep$ndraws), .posterior_epred) rownames(out) <- NULL out } # expand data to dimension appropriate for # vectorized multiplication with posterior draws data2draws <- function(x, dim) { stopifnot(length(dim) == 2L, length(x) %in% c(1, dim[2])) matrix(x, nrow = dim[1], ncol = dim[2], byrow = TRUE) } # expected dimension of the main parameter 'mu' dim_mu <- function(prep) { c(prep$ndraws, prep$nobs) } # is the model truncated? is_trunc <- function(prep) { stopifnot(is.brmsprep(prep)) any(prep$data[["lb"]] > -Inf) || any(prep$data[["ub"]] < Inf) } # prepares data required for truncation and calles the # family specific truncation function for posterior_epred values posterior_epred_trunc <- function(prep) { stopifnot(is_trunc(prep)) lb <- data2draws(prep$data[["lb"]], dim_mu(prep)) ub <- data2draws(prep$data[["ub"]], dim_mu(prep)) posterior_epred_trunc_fun <- paste0("posterior_epred_trunc_", prep$family$family) posterior_epred_trunc_fun <- try( get(posterior_epred_trunc_fun, asNamespace("brms")), silent = TRUE ) if (is(posterior_epred_trunc_fun, "try-error")) { stop2("posterior_epred values on the respone scale not yet implemented ", "for truncated '", prep$family$family, "' models.") } trunc_args <- nlist(prep, lb, ub) do_call(posterior_epred_trunc_fun, trunc_args) } # ----- family specific truncation functions ----- # @param prep output of 'prepare_predictions' # @param lb lower truncation bound # @param ub upper truncation bound # @return draws of the truncated mean parameter posterior_epred_trunc_gaussian <- function(prep, lb, ub) { zlb <- (lb - prep$dpars$mu) / prep$dpars$sigma zub <- (ub - prep$dpars$mu) / prep$dpars$sigma # truncated mean of standard normal; see Wikipedia trunc_zmean <- (dnorm(zlb) - dnorm(zub)) / (pnorm(zub) - pnorm(zlb)) prep$dpars$mu + trunc_zmean * prep$dpars$sigma } posterior_epred_trunc_student <- function(prep, lb, ub) { zlb <- with(prep$dpars, (lb - mu) / sigma) zub <- with(prep$dpars, (ub - mu) / sigma) nu <- prep$dpars$nu # see Kim 2008: Moments of truncated Student-t distribution G1 <- gamma((nu - 1) / 2) * nu^(nu / 2) / (2 * (pt(zub, df = nu) - pt(zlb, df = nu)) * gamma(nu / 2) * gamma(0.5)) A <- (nu + zlb^2) ^ (-(nu - 1) / 2) B <- (nu + zub^2) ^ (-(nu - 1) / 2) trunc_zmean <- G1 * (A - B) prep$dpars$mu + trunc_zmean * prep$dpars$sigma } posterior_epred_trunc_lognormal <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) m1 <- with(prep$dpars, exp(mu + sigma^2 / 2) * (pnorm((log(ub) - mu) / sigma - sigma) - pnorm((log(lb) - mu) / sigma - sigma)) ) with(prep$dpars, m1 / (plnorm(ub, meanlog = mu, sdlog = sigma) - plnorm(lb, meanlog = mu, sdlog = sigma)) ) } posterior_epred_trunc_gamma <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) prep$dpars$scale <- prep$dpars$mu / prep$dpars$shape # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, scale / gamma(shape) * (incgamma(1 + shape, ub / scale) - incgamma(1 + shape, lb / scale)) ) with(prep$dpars, m1 / (pgamma(ub, shape, scale = scale) - pgamma(lb, shape, scale = scale)) ) } posterior_epred_trunc_exponential <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) inv_mu <- 1 / prep$dpars$mu # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, mu * (incgamma(2, ub / mu) - incgamma(2, lb / mu))) m1 / (pexp(ub, rate = inv_mu) - pexp(lb, rate = inv_mu)) } posterior_epred_trunc_weibull <- function(prep, lb, ub) { lb <- ifelse(lb < 0, 0, lb) prep$dpars$a <- 1 + 1 / prep$dpars$shape prep$dpars$scale <- with(prep$dpars, mu / gamma(a)) # see Jawitz 2004: Moments of truncated continuous univariate distributions m1 <- with(prep$dpars, scale * (incgamma(a, (ub / scale)^shape) - incgamma(a, (lb / scale)^shape)) ) with(prep$dpars, m1 / (pweibull(ub, shape, scale = scale) - pweibull(lb, shape, scale = scale)) ) } posterior_epred_trunc_binomial <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) max_value <- max(prep$data$trials) ub <- ifelse(ub > max_value, max_value, ub) trials <- prep$data$trials if (length(trials) > 1) { trials <- data2draws(trials, dim_mu(prep)) } args <- list(size = trials, prob = prep$dpars$mu) posterior_epred_trunc_discrete(dist = "binom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_poisson <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) args <- list(lambda = mu) posterior_epred_trunc_discrete(dist = "pois", args = args, lb = lb, ub = ub) } posterior_epred_trunc_negbinomial <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(prep$dpars$shape, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_negbinomial2 <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(1 / prep$dpars$sigma, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } posterior_epred_trunc_geometric <- function(prep, lb, ub) { lb <- ifelse(lb < -1, -1, lb) mu <- multiply_dpar_rate_denom(prep$dpars$mu, prep) max_value <- 3 * max(mu) ub <- ifelse(ub > max_value, max_value, ub) shape <- multiply_dpar_rate_denom(1, prep) args <- list(mu = mu, size = shape) posterior_epred_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub) } # posterior_epred values for truncated discrete distributions posterior_epred_trunc_discrete <- function(dist, args, lb, ub) { stopifnot(is.matrix(lb), is.matrix(ub)) message( "Computing posterior_epred values for truncated ", "discrete models may take a while." ) pdf <- get(paste0("d", dist), mode = "function") cdf <- get(paste0("p", dist), mode = "function") mean_kernel <- function(x, args) { # just x * density(x) x * do_call(pdf, c(x, args)) } if (any(is.infinite(c(lb, ub)))) { stop("lb and ub must be finite") } # simplify lb and ub back to vector format vec_lb <- lb[1, ] vec_ub <- ub[1, ] min_lb <- min(vec_lb) # array of dimension S x N x length((lb+1):ub) mk <- lapply((min_lb + 1):max(vec_ub), mean_kernel, args = args) mk <- do_call(abind, c(mk, along = 3)) m1 <- vector("list", ncol(mk)) for (n in seq_along(m1)) { # summarize only over non-truncated values for this observation J <- (vec_lb[n] - min_lb + 1):(vec_ub[n] - min_lb) m1[[n]] <- rowSums(mk[, n, ][, J, drop = FALSE]) } rm(mk) m1 <- do.call(cbind, m1) m1 / (do.call(cdf, c(list(ub), args)) - do.call(cdf, c(list(lb), args))) } #' @export pp_expect <- function(object, ...) { warning2("Method 'pp_expect' is deprecated. ", "Please use 'posterior_epred' instead.") UseMethod("posterior_epred") } brms/R/priors.R0000644000175000017500000023242414136720631013223 0ustar nileshnilesh#' Prior Definitions for \pkg{brms} Models #' #' Define priors for specific parameters or classes of parameters. #' #' @aliases brmsprior brmsprior-class #' #' @param prior A character string defining a distribution in \pkg{Stan} language #' @param class The parameter class. Defaults to \code{"b"} #' (i.e. population-level effects). #' See 'Details' for other valid parameter classes. #' @param coef Name of the coefficient within the parameter class. #' @param group Grouping factor for group-level parameters. #' @param resp Name of the response variable. #' Only used in multivariate models. #' @param dpar Name of a distributional parameter. #' Only used in distributional models. #' @param nlpar Name of a non-linear parameter. #' Only used in non-linear models. #' @param lb Lower bound for parameter restriction. Currently only allowed #' for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction. #' @param ub Upper bound for parameter restriction. Currently only allowed #' for classes \code{"b"}. Defaults to \code{NULL}, that is no restriction. #' @param check Logical; Indicates whether priors #' should be checked for validity (as far as possible). #' Defaults to \code{TRUE}. If \code{FALSE}, \code{prior} is passed #' to the Stan code as is, and all other arguments are ignored. #' @param ... Arguments passed to \code{set_prior}. #' #' @return An object of class \code{brmsprior} to be used in the \code{prior} #' argument of \code{\link{brm}}. #' #' @details #' \code{set_prior} is used to define prior distributions for parameters #' in \pkg{brms} models. The functions \code{prior}, \code{prior_}, and #' \code{prior_string} are aliases of \code{set_prior} each allowing #' for a different kind of argument specification. #' \code{prior} allows specifying arguments as expression without #' quotation marks using non-standard evaluation. #' \code{prior_} allows specifying arguments as one-sided formulas #' or wrapped in \code{quote}. #' \code{prior_string} allows specifying arguments as strings just #' as \code{set_prior} itself. #' #' Below, we explain its usage and list some common #' prior distributions for parameters. #' A complete overview on possible prior distributions is given #' in the Stan Reference Manual available at \url{https://mc-stan.org/}. #' #' To combine multiple priors, use \code{c(...)} or the \code{+} operator #' (see 'Examples'). \pkg{brms} does not check if the priors are written #' in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their #' syntactical correctness when the model is parsed to \code{C++} and #' returns an error if they are not. #' This, however, does not imply that priors are always meaningful if they are #' accepted by \pkg{Stan}. Although \pkg{brms} trys to find common problems #' (e.g., setting bounded priors on unbounded parameters), there is no guarantee #' that the defined priors are reasonable for the model. #' Below, we list the types of parameters in \pkg{brms} models, #' for which the user can specify prior distributions. #' #' 1. Population-level ('fixed') effects #' #' Every Population-level effect has its own regression parameter # These parameters are internally named as \code{b_}, where \code{} #' represents the name of the corresponding population-level effect. #' Suppose, for instance, that \code{y} is predicted by \code{x1} and \code{x2} #' (i.e., \code{y ~ x1 + x2} in formula syntax). #' Then, \code{x1} and \code{x2} have regression parameters #' \code{b_x1} and \code{b_x2} respectively. #' The default prior for population-level effects (including monotonic and #' category specific effects) is an improper flat prior over the reals. #' Other common options are normal priors or student-t priors. #' If we want to have a normal prior with mean 0 and #' standard deviation 5 for \code{x1}, and a unit student-t prior with 10 #' degrees of freedom for \code{x2}, we can specify this via #' \code{set_prior("normal(0,5)", class = "b", coef = "x1")} and \cr #' \code{set_prior("student_t(10, 0, 1)", class = "b", coef = "x2")}. #' To put the same prior on all population-level effects at once, #' we may write as a shortcut \code{set_prior("", class = "b")}. #' This also leads to faster sampling, because priors can be vectorized in this case. #' Both ways of defining priors can be combined using for instance #' \code{set_prior("normal(0, 2)", class = "b")} and \cr #' \code{set_prior("normal(0, 10)", class = "b", coef = "x1")} #' at the same time. This will set a \code{normal(0, 10)} prior on #' the effect of \code{x1} and a \code{normal(0, 2)} prior #' on all other population-level effects. #' However, this will break vectorization and #' may slow down the sampling procedure a bit. #' #' In case of the default intercept parameterization #' (discussed in the 'Details' section of \code{\link{brmsformula}}), #' general priors on class \code{"b"} will \emph{not} affect #' the intercept. Instead, the intercept has its own parameter class #' named \code{"Intercept"} and priors can thus be #' specified via \code{set_prior("", class = "Intercept")}. #' Setting a prior on the intercept will not break vectorization #' of the other population-level effects. #' Note that technically, this prior is set on an intercept that #' results when internally centering all population-level predictors #' around zero to improve sampling efficiency. On this centered #' intercept, specifying a prior is actually much easier and #' intuitive than on the original intercept, since the former #' represents the expected response value when all predictors #' are at their means. To treat the intercept as an ordinary #' population-level effect and avoid the centering parameterization, #' use \code{0 + Intercept} on the right-hand side of the model formula. #' #' A special shrinkage prior to be applied on population-level effects is the #' (regularized) horseshoe prior and related priors. See #' \code{\link{horseshoe}} for details. Another shrinkage prior is the #' so-called lasso prior. See \code{\link{lasso}} for details. #' #' In non-linear models, population-level effects are defined separately #' for each non-linear parameter. Accordingly, it is necessary to specify #' the non-linear parameter in \code{set_prior} so that priors #' we can be assigned correctly. #' If, for instance, \code{alpha} is the parameter and \code{x} the predictor #' for which we want to define the prior, we can write #' \code{set_prior("", coef = "x", nlpar = "alpha")}. #' As a shortcut we can use \code{set_prior("", nlpar = "alpha")} #' to set the same prior on all population-level effects of \code{alpha} at once. #' #' If desired, population-level effects can be restricted to fall only #' within a certain interval using the \code{lb} and \code{ub} arguments #' of \code{set_prior}. This is often required when defining priors #' that are not defined everywhere on the real line, such as uniform #' or gamma priors. When defining a \code{uniform(2,4)} prior, #' you should write \code{set_prior("uniform(2,4)", lb = 2, ub = 4)}. #' When using a prior that is defined on the positive reals only #' (such as a gamma prior) set \code{lb = 0}. #' In most situations, it is not useful to restrict population-level #' parameters through bounded priors #' (non-linear models are an important exception), #' but if you really want to this is the way to go. #' #' 2. Standard deviations of group-level ('random') effects #' #' Each group-level effect of each grouping factor has a standard deviation named #' \code{sd__}. Consider, for instance, the formula #' \code{y ~ x1 + x2 + (1 + x1 | g)}. #' We see that the intercept as well as \code{x1} are group-level effects #' nested in the grouping factor \code{g}. #' The corresponding standard deviation parameters are named as #' \code{sd_g_Intercept} and \code{sd_g_x1} respectively. #' These parameters are restricted to be non-negative and, by default, #' have a half student-t prior with 3 degrees of freedom and a #' scale parameter that depends on the standard deviation of the response #' after applying the link function. Minimally, the scale parameter is 2.5. #' This prior is used (a) to be only weakly informative in order to influence #' results as few as possible, while (b) providing at least some regularization #' to considerably improve convergence and sampling efficiency. #' To define a prior distribution only for standard deviations #' of a specific grouping factor, #' use \cr \code{set_prior("", class = "sd", group = "")}. #' To define a prior distribution only for a specific standard deviation #' of a specific grouping factor, you may write \cr #' \code{set_prior("", class = "sd", group = "", coef = "")}. #' Recommendations on useful prior distributions for #' standard deviations are given in Gelman (2006), but note that he #' is no longer recommending uniform priors, anymore. \cr #' #' When defining priors on group-level parameters in non-linear models, #' please make sure to specify the corresponding non-linear parameter #' through the \code{nlpar} argument in the same way as #' for population-level effects. #' #' 3. Correlations of group-level ('random') effects #' #' If there is more than one group-level effect per grouping factor, #' the correlations between those effects have to be estimated. #' The prior \code{lkj_corr_cholesky(eta)} or in short #' \code{lkj(eta)} with \code{eta > 0} #' is essentially the only prior for (Cholesky factors) of correlation matrices. #' If \code{eta = 1} (the default) all correlations matrices #' are equally likely a priori. If \code{eta > 1}, extreme correlations #' become less likely, whereas \code{0 < eta < 1} results in #' higher probabilities for extreme correlations. #' Correlation matrix parameters in \code{brms} models are named as #' \code{cor_}, (e.g., \code{cor_g} if \code{g} is the grouping factor). #' To set the same prior on every correlation matrix, #' use for instance \code{set_prior("lkj(2)", class = "cor")}. #' Internally, the priors are transformed to be put on the Cholesky factors #' of the correlation matrices to improve efficiency and numerical stability. #' The corresponding parameter class of the Cholesky factors is \code{L}, #' but it is not recommended to specify priors for this parameter class directly. #' #' 4. Splines #' #' Splines are implemented in \pkg{brms} using the 'random effects' #' formulation as explained in \code{\link[mgcv:gamm]{gamm}}). #' Thus, each spline has its corresponding standard deviations #' modeling the variability within this term. In \pkg{brms}, this #' parameter class is called \code{sds} and priors can #' be specified via \code{set_prior("", class = "sds", #' coef = "")}. The default prior is the same as #' for standard deviations of group-level effects. #' #' 5. Gaussian processes #' #' Gaussian processes as currently implemented in \pkg{brms} have #' two parameters, the standard deviation parameter \code{sdgp}, #' and characteristic length-scale parameter \code{lscale} #' (see \code{\link{gp}} for more details). The default prior #' of \code{sdgp} is the same as for standard deviations of #' group-level effects. The default prior of \code{lscale} #' is an informative inverse-gamma prior specifically tuned #' to the covariates of the Gaussian process (for more details see #' \url{https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html}). #' This tuned prior may be overly informative in some cases, so please #' consider other priors as well to make sure inference is #' robust to the prior specification. If tuning fails, a half-normal prior #' is used instead. #' #' 6. Autocorrelation parameters #' #' The autocorrelation parameters currently implemented are named #' \code{ar} (autoregression), \code{ma} (moving average), #' \code{arr} (autoregression of the response), \code{car} #' (spatial conditional autoregression), as well as \code{lagsar} #' and \code{errorsar} (Spatial simultaneous autoregression). #' #' Priors can be defined by \code{set_prior("", class = "ar")} #' for \code{ar} and similar for other autocorrelation parameters. #' By default, \code{ar} and \code{ma} are bounded between \code{-1} #' and \code{1}, \code{car}, \code{lagsar}, and \code{errorsar} are #' bounded between \code{0}, and \code{1}, and \code{arr} is unbounded #' (you may change this by using the arguments \code{lb} and \code{ub}). #' The default prior is flat over the definition area. #' #' 7. Distance parameters of monotonic effects #' #' As explained in the details section of \code{\link{brm}}, #' monotonic effects make use of a special parameter vector to #' estimate the 'normalized distances' between consecutive predictor #' categories. This is realized in \pkg{Stan} using the \code{simplex} #' parameter type. This class is named \code{"simo"} (short for #' simplex monotonic) in \pkg{brms}. #' The only valid prior for simplex parameters is the #' dirichlet prior, which accepts a vector of length \code{K - 1} #' (K = number of predictor categories) as input defining the #' 'concentration' of the distribution. Explaining the dirichlet prior #' is beyond the scope of this documentation, but we want to describe #' how to define this prior syntactically correct. #' If a predictor \code{x} with \code{K} categories is modeled as monotonic, #' we can define a prior on its corresponding simplex via \cr #' \code{prior(dirichlet(), class = simo, coef = mox1)}. #' The \code{1} in the end of \code{coef} indicates that this is the first #' simplex in this term. If interactions between multiple monotonic #' variables are modeled, multiple simplexes per term are required. #' For \code{}, we can put in any \code{R} expression #' defining a vector of length \code{K - 1}. The default is a uniform #' prior (i.e. \code{ = rep(1, K-1)}) over all simplexes #' of the respective dimension. #' #' 8. Parameters for specific families #' #' Some families need additional parameters to be estimated. #' Families \code{gaussian}, \code{student}, \code{skew_normal}, #' \code{lognormal}, and \code{gen_extreme_value} need the parameter #' \code{sigma} to account for the residual standard deviation. #' By default, \code{sigma} has a half student-t prior that scales #' in the same way as the group-level standard deviations. #' Further, family \code{student} needs the parameter #' \code{nu} representing the degrees of freedom of students-t distribution. #' By default, \code{nu} has prior \code{gamma(2, 0.1)} #' and a fixed lower bound of \code{1}. #' Families \code{gamma}, \code{weibull}, \code{inverse.gaussian}, and #' \code{negbinomial} need a \code{shape} parameter that has a #' \code{gamma(0.01, 0.01)} prior by default. #' For families \code{cumulative}, \code{cratio}, \code{sratio}, #' and \code{acat}, and only if \code{threshold = "equidistant"}, #' the parameter \code{delta} is used to model the distance between #' two adjacent thresholds. #' By default, \code{delta} has an improper flat prior over the reals. #' The \code{von_mises} family needs the parameter \code{kappa}, representing #' the concentration parameter. By default, \code{kappa} has prior #' \code{gamma(2, 0.01)}. \cr #' Every family specific parameter has its own prior class, so that #' \code{set_prior("", class = "")} is the right way to go. #' All of these priors are chosen to be weakly informative, #' having only minimal influence on the estimations, #' while improving convergence and sampling efficiency. #' #' Fixing parameters to constants is possible by using the \code{constant} #' function, for example, \code{constant(1)} to fix a parameter to 1. #' Broadcasting to vectors and matrices is done automatically. #' #' Often, it may not be immediately clear, which parameters are present in the #' model. To get a full list of parameters and parameter classes for which #' priors can be specified (depending on the model) use function #' \code{\link{get_prior}}. #' #' @seealso \code{\link{get_prior}} #' #' @references #' Gelman A. (2006). Prior distributions for variance parameters in hierarchical models. #' Bayesian analysis, 1(3), 515 -- 534. #' #' @examples #' ## use alias functions #' (prior1 <- prior(cauchy(0, 1), class = sd)) #' (prior2 <- prior_(~cauchy(0, 1), class = ~sd)) #' (prior3 <- prior_string("cauchy(0, 1)", class = "sd")) #' identical(prior1, prior2) #' identical(prior1, prior3) #' #' # check which parameters can have priors #' get_prior(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative()) #' #' # define some priors #' bprior <- c(prior_string("normal(0,10)", class = "b"), #' prior(normal(1,2), class = b, coef = treat), #' prior_(~cauchy(0,2), class = ~sd, #' group = ~subject, coef = ~Intercept)) #' #' # verify that the priors indeed found their way into Stan's model code #' make_stancode(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = cumulative(), #' prior = bprior) #' #' # use the horseshoe prior to model sparsity in regression coefficients #' make_stancode(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson(), #' prior = set_prior("horseshoe(3)")) #' #' # fix certain priors to constants #' bprior <- prior(constant(1), class = "b") + #' prior(constant(2), class = "b", coef = "zBase") + #' prior(constant(0.5), class = "sd") #' make_stancode(count ~ zAge + zBase + (1 | patient), #' data = epilepsy, prior = bprior) #' #' # pass priors to Stan without checking #' prior <- prior_string("target += normal_lpdf(b[1] | 0, 1)", check = FALSE) #' make_stancode(count ~ Trt, data = epilepsy, prior = prior) #' #' @export set_prior <- function(prior, class = "b", coef = "", group = "", resp = "", dpar = "", nlpar = "", lb = NA, ub = NA, check = TRUE) { input <- nlist(prior, class, coef, group, resp, dpar, nlpar, lb, ub, check) input <- try(as.data.frame(input), silent = TRUE) if (is(input, "try-error")) { stop2("Processing arguments of 'set_prior' has failed:\n", input) } out <- vector("list", nrow(input)) for (i in seq_along(out)) { out[[i]] <- do_call(.set_prior, input[i, ]) } Reduce("+", out) } # validate arguments passed to 'set_prior' .set_prior <- function(prior, class, coef, group, resp, dpar, nlpar, lb, ub, check) { prior <- as_one_character(prior) class <- as_one_character(class) group <- as_one_character(group) coef <- as_one_character(coef) resp <- as_one_character(resp) dpar <- as_one_character(dpar) nlpar <- as_one_character(nlpar) lb <- as_one_character(lb, allow_na = TRUE) ub <- as_one_character(ub, allow_na = TRUE) check <- as_one_logical(check) # validate boundaries bound <- "" if (class %in% c("ar", "ma") && (!is.na(lb) || !is.na(ub))) { # changed in version 2.9.5 lb <- ub <- NA warning2( "Changing the boundaries of autocorrelation parameters ", "is deprecated and will be ignored." ) } if (!is.na(lb) || !is.na(ub)) { # TODO: extend the boundary interface to more parameter classes boundary_classes <- c("b") if (!class %in% boundary_classes) { stop2("Currently boundaries are only allowed for classe(s) ", collapse_comma(boundary_classes), "." ) } if (nzchar(coef)) { stop2("Argument 'coef' may not be specified when using boundaries.") } # don't put spaces in boundary declarations lb <- if (!is.na(lb)) paste0("lower=", lb) ub <- if (!is.na(ub)) paste0("upper=", ub) if (!is.null(lb) || !is.null(ub)) { bound <- paste0("<", paste(c(lb, ub), collapse = ","), ">") } } if (!check) { # prior will be added to the log-posterior as is class <- coef <- group <- resp <- dpar <- nlpar <- bound <- "" } source <- "user" out <- nlist(prior, source, class, coef, group, resp, dpar, nlpar, bound) do_call(brmsprior, out) } #' @describeIn set_prior Alias of \code{set_prior} allowing to #' specify arguments as expressions without quotation marks. #' @export prior <- function(prior, ...) { call <- as.list(match.call()[-1]) seval <- rmNULL(call[prior_seval_args()]) call[prior_seval_args()] <- NULL call <- lapply(call, deparse_no_string) do_call(set_prior, c(call, seval)) } #' @describeIn set_prior Alias of \code{set_prior} allowing to specify #' arguments as as one-sided formulas or wrapped in \code{quote}. #' @export prior_ <- function(prior, ...) { call <- nlist(prior, ...) seval <- rmNULL(call[prior_seval_args()]) call[prior_seval_args()] <- NULL as_string <- function(x) { if (is.formula(x) && length(x) == 2) { deparse_no_string(x[[2]]) } else if (is.call(x) || is.name(x) || is.atomic(x)) { deparse_no_string(x) } else { stop2("Arguments must be one-sided formula, call, name, or constant.") } } call <- lapply(call, as_string) do_call(set_prior, c(call, seval)) } # arguments for which to use standard evaluation prior_seval_args <- function() { c("check") } #' @describeIn set_prior Alias of \code{set_prior} allowing to #' specify arguments as strings. #' @export prior_string <- function(prior, ...) { set_prior(prior, ...) } #' Overview on Priors for \pkg{brms} Models #' #' Get information on all parameters (and parameter classes) for which priors #' may be specified including default priors. #' #' @inheritParams brm #' @param ... Other arguments for internal usage only. #' #' @return A data.frame with columns \code{prior}, \code{class}, \code{coef}, #' and \code{group} and several rows, each providing information on a #' parameter (or parameter class) on which priors can be specified. The prior #' column is empty except for internal default priors. #' #' @seealso \code{\link{set_prior}} #' #' @examples #' ## get all parameters and parameters classes to define priors on #' (prior <- get_prior(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson())) #' #' ## define a prior on all population-level effects a once #' prior$prior[1] <- "normal(0,10)" #' #' ## define a specific prior on the population-level effect of Trt #' prior$prior[5] <- "student_t(10, 0, 5)" #' #' ## verify that the priors indeed found their way into Stan's model code #' make_stancode(count ~ zAge + zBase * Trt + (1|patient) + (1|obs), #' data = epilepsy, family = poisson(), #' prior = prior) #' #' @export get_prior <- function(formula, data, family = gaussian(), autocor = NULL, data2 = NULL, knots = NULL, sparse = NULL, ...) { if (is.brmsfit(formula)) { stop2("Use 'prior_summary' to extract priors from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula) ) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots ) .get_prior(bterms, data, ...) } # internal work function of 'get_prior' # @param internal return priors for internal use? # @return a brmsprior object .get_prior <- function(bterms, data, internal = FALSE, ...) { ranef <- tidy_ranef(bterms, data) meef <- tidy_meef(bterms, data) # initialize output prior <- empty_prior() # priors for distributional parameters prior <- prior + prior_predictor( bterms, data = data, internal = internal ) # priors of group-level parameters def_scale_prior <- def_scale_prior(bterms, data) prior <- prior + prior_re( ranef, def_scale_prior = def_scale_prior, internal = internal ) # priors for noise-free variables prior <- prior + prior_Xme(meef, internal = internal) # explicitly label default priors as such prior$source <- "default" # apply 'unique' as the same prior may have been included multiple times to_order <- with(prior, order(resp, dpar, nlpar, class, group, coef)) prior <- unique(prior[to_order, , drop = FALSE]) rownames(prior) <- NULL class(prior) <- c("brmsprior", "data.frame") prior } # generate priors for predictor terms # @return a 'brmsprior' object prior_predictor <- function(x, ...) { UseMethod("prior_predictor") } #' @export prior_predictor.default <- function(x, ...) { empty_prior() } prior_predictor.mvbrmsterms <- function(x, internal = FALSE, ...) { prior <- empty_prior() for (i in seq_along(x$terms)) { prior <- prior + prior_predictor(x$terms[[i]], ...) } for (cl in c("b", "Intercept")) { # deprecated; see warning in 'validate_prior_special' if (any(with(prior, class == cl & coef == ""))) { prior <- prior + brmsprior(class = cl) } } if (x$rescor) { if (internal) { prior <- prior + brmsprior(class = "Lrescor", prior = "lkj_corr_cholesky(1)") } else { prior <- prior + brmsprior(class = "rescor", prior = "lkj(1)") } if (family_names(x)[1] %in% "student") { prior <- prior + brmsprior(class = "nu", prior = "gamma(2, 0.1)") } } prior } prior_predictor.brmsterms <- function(x, data, ...) { data <- subset_data(data, x) def_scale_prior <- def_scale_prior(x, data) valid_dpars <- valid_dpars(x) prior <- empty_prior() # priors for mixture models if (is.mixfamily(x$family)) { if (has_joint_theta(x)) { # individual theta parameters should not have a prior in this case theta_dpars <- str_subset(valid_dpars, "^theta[[:digit:]]+") valid_dpars <- setdiff(valid_dpars, theta_dpars) prior <- prior + brmsprior(prior = "dirichlet(1)", class = "theta", resp = x$resp) } if (fix_intercepts(x)) { # fixing thresholds across mixture components # requires a single set of priors at the top level stopifnot(is_ordinal(x)) prior <- prior + prior_thres(x, def_scale_prior = def_scale_prior) } } # priors for distributional parameters for (dp in valid_dpars) { def_dprior <- def_dprior(x, dp, data = data) if (!is.null(x$dpars[[dp]])) { # parameter is predicted dp_prior <- prior_predictor( x$dpars[[dp]], data = data, def_scale_prior = def_scale_prior, def_dprior = def_dprior ) } else if (!is.null(x$fdpars[[dp]])) { # parameter is fixed dp_prior <- empty_prior() } else { # parameter is estimated dp_prior <- brmsprior(def_dprior, class = dp, resp = x$resp) } prior <- prior + dp_prior } # priors for non-linear parameters for (nlp in names(x$nlpars)) { nlp_prior <- prior_predictor( x$nlpars[[nlp]], data = data, def_scale_prior = def_scale_prior, def_dprior = def_dprior ) prior <- prior + nlp_prior } if (conv_cats_dpars(x$family)) { # deprecated; see warning in 'validate_prior_special' for (cl in c("b", "Intercept")) { if (any(find_rows(prior, class = cl, coef = "", resp = x$resp))) { prior <- prior + brmsprior(class = cl, resp = x$resp) } } } # priors for noise-free response variables sdy <- get_sdy(x, data) if (!is.null(sdy)) { prior <- prior + brmsprior(class = "meanme", resp = x$resp) + brmsprior(class = "sdme", resp = x$resp) } # priors for autocorrelation parameters # prior <- prior + prior_autocor(x, def_scale_prior = def_scale_prior) prior } # prior for linear predictor termss #' @export prior_predictor.btl <- function(x, ...) { prior_fe(x, ...) + prior_thres(x, ...) + prior_sp(x, ...) + prior_cs(x, ...) + prior_sm(x, ...) + prior_gp(x, ...) + prior_ac(x, ...) + prior_bhaz(x, ...) } # priors for non-linear predictor terms #' @export prior_predictor.btnl <- function(x, ...) { # thresholds are required even in non-linear ordinal models prior_thres(x, ...) + prior_ac(x, ...) } # priors for population-level parameters prior_fe <- function(bterms, data, def_dprior = "", ...) { prior <- empty_prior() fixef <- colnames(data_fe(bterms, data)$X) px <- check_prefix(bterms) center_X <- stan_center_X(bterms) if (center_X && !is_ordinal(bterms)) { # priors for ordinal thresholds are provided in 'prior_thres' prior <- prior + brmsprior(def_dprior, class = "Intercept", ls = px) fixef <- setdiff(fixef, "Intercept") } if (length(fixef)) { prior <- prior + brmsprior(class = "b", coef = c("", fixef), ls = px) } prior } # priors for thresholds of ordinal models prior_thres <- function(bterms, def_scale_prior = "", ...) { prior <- empty_prior() if (!is_ordinal(bterms)) { # thresholds only exist in ordinal models return(prior) } if (fix_intercepts(bterms) && !is.mixfamily(bterms$family)) { # fixed thresholds cannot have separate priors return(prior) } # create priors for threshold per group .prior_thres <- function(thres, thres_prior = "", group = "") { prior <- empty_prior() if (has_equidistant_thres(bterms)) { # prior for the delta parameter for equidistant thresholds thres <- character(0) bound <- str_if(has_ordered_thres(bterms), "") prior <- prior + brmsprior( class = "delta", group = group, bound = bound, ls = px ) } prior <- prior + brmsprior( prior = c(thres_prior, rep("", length(thres))), class = "Intercept", coef = c("", thres), group = group, ls = px ) } px <- check_prefix(bterms) groups <- get_thres_groups(bterms) if (any(nzchar(groups))) { # for models with multiple threshold vectors prior <- prior + .prior_thres(character(0), def_scale_prior) for (g in groups) { prior <- prior + .prior_thres(get_thres(bterms, group = g), group = g) } } else { # for models with a single threshold vector prior <- prior + .prior_thres(get_thres(bterms), def_scale_prior) } prior } # priors for coefficients of baseline hazards in the Cox model prior_bhaz <- function(bterms, ...) { prior <- empty_prior() if (!is_cox(bterms$family)) { return(prior) } px <- check_prefix(bterms) # the scale of sbhaz is not identified when an intercept is part of mu # thus a sum-to-one constraint ensures identification prior <- prior + brmsprior("dirichlet(1)", class = "sbhaz", ls = px) prior } # priors for special effects parameters prior_sp <- function(bterms, data, ...) { prior <- empty_prior() spef <- tidy_spef(bterms, data) if (nrow(spef)) { px <- check_prefix(bterms) prior <- prior + brmsprior( class = "b", coef = c("", spef$coef), ls = px ) simo_coef <- get_simo_labels(spef, use_id = TRUE) if (length(simo_coef)) { prior <- prior + brmsprior( prior = "dirichlet(1)", class = "simo", coef = simo_coef, ls = px ) } } prior } # priors for category spcific effects parameters prior_cs <- function(bterms, data, ...) { prior <- empty_prior() csef <- colnames(get_model_matrix(bterms$cs, data = data)) if (length(csef)) { px <- check_prefix(bterms) prior <- prior + brmsprior(class = "b", coef = c("", csef), ls = px) } prior } # default priors for hyper-parameters of noise-free variables prior_Xme <- function(meef, internal = FALSE, ...) { stopifnot(is.meef_frame(meef)) prior <- empty_prior() if (nrow(meef)) { prior <- prior + brmsprior(class = "meanme", coef = c("", meef$coef)) + brmsprior(class = "sdme", coef = c("", meef$coef)) # priors for correlation parameters groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) if (meef$cor[K[1]] && length(K) > 1L) { if (internal) { prior <- prior + brmsprior("lkj_corr_cholesky(1)", class = "Lme") if (nzchar(g)) { prior <- prior + brmsprior(class = "Lme", group = g) } } else { prior <- prior + brmsprior("lkj(1)", class = "corme") if (nzchar(g)) { prior <- prior + brmsprior(class = "corme", group = g) } } } } } prior } # default priors of gaussian processes # @param def_scale_prior: a character string defining # the default prior SD parameters prior_gp <- function(bterms, data, def_scale_prior, ...) { prior <- empty_prior() gpef <- tidy_gpef(bterms, data) if (nrow(gpef)) { px <- check_prefix(bterms) lscale_prior <- def_lscale_prior(bterms, data) prior <- prior + brmsprior(class = "sdgp", prior = def_scale_prior, ls = px) + brmsprior(class = "sdgp", coef = unlist(gpef$sfx1), ls = px) + brmsprior(class = "lscale", ls = px) + brmsprior(class = "lscale", prior = lscale_prior, coef = names(lscale_prior), ls = px) } prior } # default priors for length-scale parameters of GPs # see https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html # @param plb prior probability of being lower than minimum length-scale # @param pub prior probability of being higher than maximum length-scale def_lscale_prior <- function(bterms, data, plb = 0.01, pub = 0.01) { .opt_fun <- function(x, lb, ub) { # optimize parameters on the log-scale to make them positive only x <- exp(x) y1 <- pinvgamma(lb, x[1], x[2], log.p = TRUE) y2 <- pinvgamma(ub, x[1], x[2], lower.tail = FALSE, log.p = TRUE) c(y1 - log(plb), y2 - log(pub)) } .def_lscale_prior <- function(X) { dq <- diff_quad(X) ub <- sqrt(max(dq)) lb <- sqrt(min(dq[dq > 0])) # prevent extreme priors lb <- max(lb, 0.01 * ub) opt_res <- nleqslv::nleqslv( c(0, 0), .opt_fun, lb = lb, ub = ub, control = list(allowSingular = TRUE) ) prior <- "normal(0, 0.5)" if (opt_res$termcd %in% 1:2) { # use the inverse-gamma prior only in case of convergence pars <- exp(opt_res$x) prior <- paste0("inv_gamma(", sargs(round(pars, 6)), ")") } return(prior) } p <- usc(combine_prefix(bterms)) gpef <- tidy_gpef(bterms, data) data_gp <- data_gp(bterms, data, internal = TRUE) out <- vector("list", NROW(gpef)) for (i in seq_along(out)) { pi <- paste0(p, "_", i) iso <- gpef$iso[i] cons <- gpef$cons[[i]] if (length(cons) > 0L) { for (j in seq_along(cons)) { Xgp <- data_gp[[paste0("Xgp_prior", pi, "_", j)]] if (iso) { c(out[[i]]) <- .def_lscale_prior(Xgp) } else { c(out[[i]]) <- apply(Xgp, 2, .def_lscale_prior) } } } else { Xgp <- data_gp[[paste0("Xgp_prior", pi)]] if (iso) { out[[i]] <- .def_lscale_prior(Xgp) } else { out[[i]] <- apply(Xgp, 2, .def_lscale_prior) } } # transpose so that by-levels vary last names(out[[i]]) <- as.vector(t(gpef$sfx2[[i]])) } unlist(out) } # priors for varying effects parameters # @param ranef: a list returned by tidy_ranef # @param def_scale_prior a character string defining # the default prior for SD parameters # @param internal: see 'get_prior' prior_re <- function(ranef, def_scale_prior, internal = FALSE, ...) { prior <- empty_prior() if (!nrow(ranef)) { return(prior) } # global sd class px <- check_prefix(ranef) upx <- unique(px) if (length(def_scale_prior) > 1L) { def_scale_prior <- def_scale_prior[px$resp] } global_sd_prior <- brmsprior( class = "sd", prior = def_scale_prior, ls = px ) prior <- prior + global_sd_prior for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) group <- r$group[1] rpx <- check_prefix(r) urpx <- unique(rpx) # include group-level standard deviations prior <- prior + brmsprior(class = "sd", group = group, ls = urpx) + brmsprior(class = "sd", coef = r$coef, group = group, ls = rpx) # detect duplicated group-level effects J <- with(prior, class == "sd" & nzchar(coef)) dupli <- duplicated(prior[J, ]) if (any(dupli)) { stop2("Duplicated group-level effects detected for group ", group) } # include correlation parameters if (isTRUE(r$cor[1]) && nrow(r) > 1L) { if (internal) { prior <- prior + brmsprior( class = "L", group = c("", group), prior = c("lkj_corr_cholesky(1)", "") ) } else { prior <- prior + brmsprior( class = "cor", group = c("", group), prior = c("lkj(1)", "") ) } } } tranef <- get_dist_groups(ranef, "student") if (isTRUE(nrow(tranef) > 0L)) { prior <- prior + brmsprior("gamma(2, 0.1)", class = "df", group = tranef$group) } prior } # priors for smooth terms prior_sm <- function(bterms, data, def_scale_prior, ...) { prior <- empty_prior() smef <- tidy_smef(bterms, data) if (NROW(smef)) { px <- check_prefix(bterms) # prior for the FE coefficients Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { prior <- prior + brmsprior( class = "b", coef = c("", Xs_names), ls = px ) } # prior for SD parameters of the RE coefficients smterms <- unique(smef$term) prior_strings <- c(def_scale_prior, rep("", length(smterms))) prior <- prior + brmsprior( class = "sds", coef = c("", smterms), prior = prior_strings, ls = px ) } prior } # priors for autocor parameters prior_ac <- function(bterms, def_scale_prior, ...) { prior <- empty_prior() acef <- tidy_acef(bterms) if (!NROW(acef)) { return(prior) } px <- check_prefix(bterms) if (has_ac_class(acef, "arma")) { acef_arma <- subset2(acef, class = "arma") if (acef_arma$p > 0) { prior <- prior + brmsprior(class = "ar", ls = px) } if (acef_arma$q > 0) { prior <- prior + brmsprior(class = "ma", ls = px) } } if (has_ac_class(acef, "cosy")) { prior <- prior + brmsprior(class = "cosy", ls = px) } if (has_ac_latent_residuals(bterms)) { prior <- prior + brmsprior(def_scale_prior, class = "sderr", ls = px) } if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") if (acef_sar$type == "lag") { prior <- prior + brmsprior(class = "lagsar", ls = px) } if (acef_sar$type == "error") { prior <- prior + brmsprior(class = "errorsar", ls = px) } } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") prior <- prior + brmsprior(def_scale_prior, class = "sdcar", ls = px) if (acef_car$type %in% "escar") { prior <- prior + brmsprior(class = "car", ls = px) } else if (acef_car$type %in% "bym2") { prior <- prior + brmsprior("beta(1, 1)", class = "rhocar", ls = px) } } prior } # default priors for distributional parameters def_dprior <- function(x, dpar, data = NULL) { stopifnot(is.brmsterms(x)) dpar <- as_one_character(dpar) resp <- usc(x$resp) dpar_class <- dpar_class(dpar, family = x) link <- x$dpars[[dpar]]$family$link if (is.null(link)) { link <- "identity" } # ensures reasonable scaling in def_scale_prior x$family$link <- link if (link == "identity") { # dpar is estimated or predicted on the linear scale out <- switch(dpar_class, "", mu = def_scale_prior(x, data, center = FALSE, dpar = dpar), sigma = def_scale_prior(x, data), shape = "gamma(0.01, 0.01)", nu = "gamma(2, 0.1)", phi = "gamma(0.01, 0.01)", kappa = "gamma(2, 0.01)", beta = "gamma(1, 0.1)", zi = "beta(1, 1)", hu = "beta(1, 1)", zoi = "beta(1, 1)", coi = "beta(1, 1)", bs = "gamma(1, 1)", ndt = glue("uniform(0, min_Y{resp})"), bias = "beta(1, 1)", quantile = "beta(1, 1)", xi = "normal(0, 2.5)", alpha = "normal(0, 4)", disc = "lognormal(0, 1)", theta = "logistic(0, 1)" ) } else { # except for 'mu' all parameters only support one link other than identity out <- switch(dpar_class, "", mu = def_scale_prior(x, data, center = FALSE, dpar = dpar), sigma = def_scale_prior(x, data), shape = "student_t(3, 0, 2.5)", nu = "normal(2.7, 0.8)", phi = "student_t(3, 0, 2.5)", kappa = "normal(5.0, 0.8)", beta = "normal(1.7, 1.3)", zi = "logistic(0, 1)", hu = "logistic(0, 1)", zoi = "logistic(0, 1)", coi = "logistic(0, 1)", bs = "normal(-0.6, 1.3)", bias = "logistic(0, 1)", quantile = "logistic(0, 1)", xi = "normal(0, 4)", alpha = "normal(0, 4)", disc = "normal(0, 1)" ) } out } # default priors for scale/SD parameters def_scale_prior <- function(x, data, ...) { UseMethod("def_scale_prior") } #' @export def_scale_prior.mvbrmsterms <- function(x, data, ...) { out <- ulapply(x$terms, def_scale_prior, data = data, ...) names(out) <- x$responses out } # @param center Should the prior be centered around zero? # If FALSE, the prior location is computed based on Y. #' @export def_scale_prior.brmsterms <- function(x, data, center = TRUE, df = 3, location = 0, scale = 2.5, dpar = NULL, ...) { y <- unname(model.response(model.frame(x$respform, data))) link <- x$family$link if (has_logscale(x$family)) { link <- "log" } tlinks <- c("identity", "log", "inverse", "sqrt", "1/mu^2") if (link %in% tlinks && !is_like_factor(y) && !conv_cats_dpars(x)) { if (link %in% c("log", "inverse", "1/mu^2")) { # avoid Inf in link(y) y <- ifelse(y == 0, y + 0.1, y) } y_link <- SW(link(y, link = link)) scale_y <- round(mad(y_link), 1) if (is.finite(scale_y)) { scale <- max(scale, scale_y) } if (!center) { location_y <- round(median(y_link), 1) if (is.finite(location_y)) { location <- location_y } # offsets may render default intercept priors not sensible dpar <- as_one_character(dpar) offset <- unname(unlist(data_offset(x$dpars[[dpar]], data))) if (length(offset)) { mean_offset <- mean(offset) if (is.finite(mean_offset)) { location <- location - mean_offset } } } } paste0("student_t(", sargs(df, location, scale), ")") } #' Validate Prior for \pkg{brms} Models #' #' Validate priors supplied by the user. Return a complete #' set of priors for the given model, including default priors. #' #' @inheritParams get_prior #' @inheritParams brm #' #' @return An object of class \code{brmsprior}. #' #' @seealso \code{\link{get_prior}}, \code{\link{set_prior}}. #' #' @examples #' prior1 <- prior(normal(0,10), class = b) + #' prior(cauchy(0,2), class = sd) #' validate_prior(prior1, count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' @export validate_prior <- function(prior, formula, data, family = gaussian(), sample_prior = "no", data2 = NULL, knots = NULL, ...) { formula <- validate_formula(formula, data = data, family = family) bterms <- brmsterms(formula) data2 <- validate_data2(data2, bterms = bterms) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots ) .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior, ... ) } # internal work function of 'validate_prior' .validate_prior <- function(prior, bterms, data, sample_prior, require_nlpar_prior = TRUE, ...) { sample_prior <- validate_sample_prior(sample_prior) require_nlpar_prior <- as_one_logical(require_nlpar_prior) all_priors <- .get_prior(bterms, data, internal = TRUE) if (is.null(prior)) { prior <- all_priors } else if (!is.brmsprior(prior)) { stop2("Argument 'prior' must be a 'brmsprior' object.") } # when updating existing priors, invalid priors should be allowed allow_invalid_prior <- isTRUE(attr(prior, "allow_invalid_prior")) # temporarily exclude priors that should not be checked no_checks <- !nzchar(prior$class) prior_no_checks <- prior[no_checks, ] prior <- prior[!no_checks, ] # check for duplicated priors prior$class <- rename( prior$class, c("^cor$", "^rescor$", "^corme$"), c("L", "Lrescor", "Lme"), fixed = FALSE ) if (any(duplicated(prior))) { stop2("Duplicated prior specifications are not allowed.") } # check for invalid priors # it is good to let the user know beforehand that some of their priors # were invalid in the model to avoid unnecessary refits if (nrow(prior)) { valid_ids <- which(duplicated(rbind(all_priors, prior))) invalid <- !seq_rows(prior) %in% (valid_ids - nrow(all_priors)) if (any(invalid) && !allow_invalid_prior) { stop2( "The following priors do not correspond ", "to any model parameter: \n", collapse(.print_prior(prior[invalid, ]), "\n"), "Function 'get_prior' might be helpful to you." ) } prior <- prior[!invalid, ] } prior$prior <- sub("^(lkj|lkj_corr)\\(", "lkj_corr_cholesky(", prior$prior) check_prior_content(prior) # merge user-specified priors with default priors prior$new <- rep(TRUE, nrow(prior)) all_priors$new <- rep(FALSE, nrow(all_priors)) prior <- c(all_priors, prior, replace = TRUE) # don't require priors on nlpars if some priors are not checked (#1124) require_nlpar_prior <- require_nlpar_prior && !any(no_checks) prior <- validate_prior_special( prior, bterms = bterms, data = data, require_nlpar_prior = require_nlpar_prior, ... ) prior <- prior[with(prior, order(class, group, resp, dpar, nlpar, coef)), ] # check and warn about valid but unused priors for (i in which(nzchar(prior$prior) & !nzchar(prior$coef))) { ls <- prior[i, c("class", "group", "resp", "dpar", "nlpar")] class(ls) <- "data.frame" prior_sub_coef <- subset2(prior, ls = ls) prior_sub_coef <- prior_sub_coef[nzchar(prior_sub_coef$coef), ] if (nrow(prior_sub_coef) && all(nzchar(prior_sub_coef$prior))) { warning2( "The global prior '", prior$prior[i], "' of class '", prior$class[i], "' will not be used in the model as all related coefficients have ", "individual priors already. If you did not set those ", "priors yourself, then maybe brms has assigned default priors. ", "See ?set_prior and ?get_prior for more details." ) } } prior <- prior + prior_no_checks rownames(prior) <- NULL attr(prior, "sample_prior") <- sample_prior if (is_verbose()) { # show remaining default priors added to the model def_prior <- prepare_print_prior(prior) def_prior <- subset2(def_prior, source = "default") if (nrow(def_prior)) { message("The following priors were automatically added to the model:") print(def_prior, show_df = TRUE) } } prior } # try to check if prior distributions are reasonable # @param prior A brmsprior object check_prior_content <- function(prior) { if (!is.brmsprior(prior)) { return(invisible(TRUE)) } if (nrow(prior)) { lb_priors <- c( "lognormal", "chi_square", "inv_chi_square", "scaled_inv_chi_square", "exponential", "gamma", "inv_gamma", "weibull", "frechet", "rayleigh", "pareto", "pareto_type_2" ) lb_priors_reg <- paste0("^(", paste0(lb_priors, collapse = "|"), ")") ulb_priors <- c("beta", "uniform", "von_mises") ulb_priors_reg <- paste0("^(", paste0(ulb_priors, collapse = "|"), ")") nb_pars <- c("b", "alpha", "xi") lb_pars <- c( "sigma", "shape", "nu", "phi", "kappa", "beta", "bs", "disc", "sdcar", "sigmaLL", "sd", "sds", "sdgp", "lscale" ) cormat_pars <- c("cor", "rescor", "corme", "L", "Lrescor", "Lme") lb_warning <- ub_warning <- "" for (i in seq_rows(prior)) { msg_prior <- .print_prior(prior[i, , drop = FALSE]) has_lb_prior <- grepl(lb_priors_reg, prior$prior[i]) has_ulb_prior <- grepl(ulb_priors_reg, prior$prior[i]) # priors with nchar(coef) inherit their boundaries j <- which(with(prior, class == class[i] & group == group[i] & nlpar == nlpar[i] & !nzchar(coef) )) bound <- if (length(j)) prior$bound[j] else "" has_lb <- grepl("lower", bound) has_ub <- grepl("upper", bound) if (prior$class[i] %in% nb_pars) { if ((has_lb_prior || has_ulb_prior) && !has_lb) { lb_warning <- paste0(lb_warning, msg_prior, "\n") } if (has_ulb_prior && !has_ub) { ub_warning <- paste0(ub_warning, msg_prior, "\n") } } else if (prior$class[i] %in% lb_pars) { if (has_ulb_prior && !has_ub) { ub_warning <- paste0(ub_warning, msg_prior, "\n") } } else if (prior$class[i] %in% cormat_pars) { regex <- "^((lkj)|(constant))" if (nzchar(prior$prior[i]) && !grepl(regex, prior$prior[i])) { stop2( "The only supported prior for correlation matrices is ", "the 'lkj' prior. See help(set_prior) for more details." ) } } else if (prior$class[i] %in% c("simo", "theta", "sbhaz")) { regex <- "^((dirichlet)|(constant))\\(" if (nchar(prior$prior[i]) && !grepl(regex, prior$prior[i])) { stop2( "Currently 'dirichlet' is the only valid prior for ", "simplex parameters. See help(set_prior) for more details." ) } } } if (nchar(lb_warning)) { warning2( "It appears as if you have specified a lower bounded ", "prior on a parameter that has no natural lower bound.", "\nIf this is really what you want, please specify ", "argument 'lb' of 'set_prior' appropriately.", "\nWarning occurred for prior \n", lb_warning ) } if (nchar(ub_warning)) { warning2( "It appears as if you have specified an upper bounded ", "prior on a parameter that has no natural upper bound.", "\nIf this is really what you want, please specify ", "argument 'ub' of 'set_prior' appropriately.", "\nWarning occurred for prior \n", ub_warning ) } } invisible(TRUE) } # prepare special priors for use in Stan # required for priors that are not natively supported by Stan validate_prior_special <- function(x, ...) { UseMethod("validate_prior_special") } #' @export validate_prior_special.default <- function(x, prior = empty_prior(), ...) { prior } #' @export validate_prior_special.brmsprior <- function(x, bterms, ...) { if (!NROW(x)) { return(x) } if (is.null(x$new)) { x$new <- TRUE } x$remove <- FALSE x <- validate_prior_special(bterms, prior = x, ...) x <- x[!x$remove, ] x$new <- x$remove <- NULL x } #' @export validate_prior_special.mvbrmsterms <- function(x, prior = NULL, ...) { for (cl in c("b", "Intercept")) { # copy over the global population-level priors in MV models gi <- which(find_rows(prior, class = cl, coef = "", resp = "")) prior$remove[gi] <- TRUE if (!any(nzchar(prior$prior[gi]))) { next } # allowing global priors in multivariate models implies conceptual problems # in the specification of default priors as it becomes unclear on which # prior level they should be defined warning2("Specifying global priors for regression coefficients in ", "multivariate models is deprecated. Please specify priors ", "separately for each response variable.") for (r in x$responses) { rows <- which(find_rows(prior, class = cl, coef = "", resp = r)) for (ri in rows) { if (isTRUE(!prior$new[ri] || !nzchar(prior$prior[ri]))) { prior$prior[ri] <- prior$prior[gi] } } } } for (i in seq_along(x$terms)) { prior <- validate_prior_special(x$terms[[i]], prior = prior, ...) } prior } #' @export validate_prior_special.brmsterms <- function(x, data, prior = NULL, ...) { data <- subset_data(data, x) if (is.null(prior)) { prior <- empty_prior() } if (conv_cats_dpars(x$family)) { for (cl in c("b", "Intercept")) { gi <- which(find_rows( prior, class = cl, coef = "", dpar = "", nlpar = "", resp = x$resp )) prior$remove[gi] <- TRUE if (!any(nzchar(prior$prior[gi]))) { next } # allowing global priors in categorical models implies conceptual problems # in the specification of default priors as it becomes unclear on which # prior level they should be defined warning2("Specifying global priors for regression coefficients in ", "categorical models is deprecated. Please specify priors ", "separately for each response category.") for (dp in names(x$dpars)) { rows <- which(find_rows( prior, class = cl, coef = "", dpar = dp, nlpar = "", resp = x$resp )) for (dpi in rows) { if (isTRUE(!prior$new[dpi] || !nzchar(prior$prior[dpi]))) { prior$prior[dpi] <- prior$prior[gi] } } } } } simple_sigma <- simple_sigma(x) for (dp in names(x$dpars)) { allow_autoscale <- dp == "mu" && simple_sigma prior <- validate_prior_special( x$dpars[[dp]], prior = prior, data = data, allow_autoscale = allow_autoscale, ... ) } for (nlp in names(x$nlpars)) { prior <- validate_prior_special( x$nlpars[[nlp]], prior = prior, data = data, allow_autoscale = simple_sigma, ... ) } prior } #' @export validate_prior_special.btnl <- function(x, prior, ...) { prior } # prepare special priors that cannot be passed to Stan as is # @param allow_autoscale allow autoscaling by parameter sigma? # @param require_nlpar_prior require priors on coefficients of nlpars? # @return a possibly updated brmsprior object with additional attributes #' @export validate_prior_special.btl <- function(x, prior, data, allow_autoscale = TRUE, require_nlpar_prior = TRUE, ...) { allow_autoscale <- as_one_logical(allow_autoscale) require_nlpar_prior <- as_one_logical(require_nlpar_prior) px <- check_prefix(x) if (is_nlpar(x) && no_center(x$fe)) { nlp_prior <- subset2(prior, ls = px) if (!any(nzchar(nlp_prior$prior)) && require_nlpar_prior) { stop2( "Priors on population-level coefficients are required in ", "non-linear models, but none were found for parameter ", "'", px$nlpar, "'. See help(set_prior) for more details." ) } } # prepare special priors such as horseshoe or lasso special <- list() b_index <- which(find_rows(prior, class = "b", coef = "", ls = px)) stopifnot(length(b_index) <= 1L) if (length(b_index)) { b_prior <- prior$prior[b_index] if (any(is_special_prior(b_prior))) { # horseshoe prior for population-level parameters if (any(nzchar(prior[b_index, "bound"]))) { stop2("Setting boundaries on coefficients is not ", "allowed when using the special priors.") } if (is.formula(x[["cs"]])) { stop2("Special priors are not yet allowed ", "in models with category-specific effects.") } b_coef_indices <- which( find_rows(prior, class = "b", ls = px) & !find_rows(prior, coef = c("", "Intercept")) ) if (any(nchar(prior$prior[b_coef_indices]))) { stop2( "Defining separate priors for single coefficients is not ", "allowed when using special priors for the whole ", "set of coefficients (except for the Intercept)." ) } if (is_special_prior(b_prior, "horseshoe")) { special$horseshoe <- attributes(eval2(b_prior)) special$horseshoe$autoscale <- isTRUE(special$horseshoe$autoscale) && allow_autoscale } else if (is_special_prior(b_prior, "R2D2")) { special$R2D2 <- attributes(eval2(b_prior)) special$R2D2$autoscale <- isTRUE(special$R2D2$autoscale) && allow_autoscale } else if (is_special_prior(b_prior, "lasso")) { # the parameterization via double_exponential appears to be more # efficient than an indirect parameterization via normal and # exponential distributions; tested on 2017-06-09 p <- usc(combine_prefix(px)) lasso_scale <- paste0("lasso_scale", p, " * lasso_inv_lambda", p) lasso_prior <- paste0("double_exponential(0, ", lasso_scale, ")") prior$prior[b_index] <- lasso_prior special$lasso <- attributes(eval2(b_prior)) } } } prefix <- combine_prefix(px, keep_mu = TRUE) attributes(prior)$special[[prefix]] <- special prior } # validate argument 'sample_prior' validate_sample_prior <- function(sample_prior) { options <- c("no", "yes", "only") if (is.null(sample_prior)) { sample_prior <- "no" } if (!is.character(sample_prior)) { sample_prior <- as_one_logical(sample_prior) sample_prior <- if (sample_prior) "yes" else "no" } match.arg(sample_prior, options) } # get stored 'sample_prior' argument get_sample_prior <- function(prior) { validate_sample_prior(attr(prior, "sample_prior", TRUE)) } # extract prior boundaries of a parameter # @param prior a brmsprior object # @param class,coef,group,px passed to 'subset2' get_bound <- function(prior, class = "b", coef = "", group = "", px = list()) { stopifnot(is.brmsprior(prior)) class <- as_one_character(class) if (!length(coef)) coef <- "" if (!length(group)) group <- "" bound <- subset2(prior, ls = c(nlist(class, coef, group), px))$bound if (!length(bound)) bound <- "" if (length(bound) != 1L) { stop("Extracting parameter boundaries failed. Please report a bug.") } bound } # create data.frames containing prior information brmsprior <- function(prior = "", class = "", coef = "", group = "", resp = "", dpar = "", nlpar = "", bound = "", source = "", ls = list()) { if (length(ls)) { if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } names <- all_cols_prior() if (!all(names(ls) %in% names)) { stop("Names of 'ls' must some of ", collapse_comma(names)) } for (v in names(ls)) { assign(v, ls[[v]]) } } out <- data.frame( prior, class, coef, group, resp, dpar, nlpar, bound, source, stringsAsFactors = FALSE ) class(out) <- c("brmsprior", "data.frame") out } #' @describeIn set_prior Create an empty \code{brmsprior} object. #' @export empty_prior <- function() { char0 <- character(0) brmsprior( prior = char0, source = char0, class = char0, coef = char0, group = char0, resp = char0, dpar = char0, nlpar = char0, bound = char0 ) } # natural upper and lower bounds for priors # @param a named list with elements 'lb and 'ub' prior_bounds <- function(prior) { switch(prior, lognormal = list(lb = 0, ub = Inf), chi_square = list(lb = 0, ub = Inf), inv_chi_square = list(lb = 0, ub = Inf), scaled_inv_chi_square = list(lb = 0, ub = Inf), exponential = list(lb = 0, ub = Inf), gamma = list(lb = 0, ub = Inf), inv_gamma = list(lb = 0, ub = Inf), weibull = list(lb = 0, ub = Inf), frechet = list(lb = 0, ub = Inf), rayleigh = list(lb = 0, ub = Inf), pareto = list(lb = 0, ub = Inf), pareto_type_2 = list(lb = 0, ub = Inf), beta = list(lb = 0, ub = 1), von_mises = list(lb = -pi, ub = pi), list(lb = -Inf, ub = Inf) ) } # all columns of brmsprior objects all_cols_prior <- function() { c("prior", "class", "coef", "group", "resp", "dpar", "nlpar", "bound", "source") } # relevant columns for duplication checks in brmsprior objects rcols_prior <- function() { c("class", "coef", "group", "resp", "dpar", "nlpar") } # upper and lower bounds for parameter classes # @param par name of a distributional parameter # @param bound optional Stan code of boundaries to extract values from # @param resp optional name of the response variable # @return A named list with elements 'lb and 'ub' par_bounds <- function(par, bound = "", resp = "") { resp <- usc(resp) out <- switch(par, sigma = list(lb = 0, ub = Inf), shape = list(lb = 0, ub = Inf), nu = list(lb = 1, ub = Inf), phi = list(lb = 0, ub = Inf), kappa = list(lb = 0, ub = Inf), beta = list(lb = 0, ub = Inf), zi = list(lb = 0, ub = 1), hu = list(lb = 0, ub = 1), zoi = list(lb = 0, ub = 1), coi = list(lb = 0, ub = 1), bs = list(lb = 0, ub = Inf), ndt = list(lb = 0, ub = glue("min_Y{resp}")), bias = list(lb = 0, ub = 1), disc = list(lb = 0, ub = Inf), quantile = list(lb = 0, ub = 1), ar = list(lb = -1, ub = 1), ma = list(lb = -1, ub = 1), lagsar = list(lb = 0, ub = 1), errorsar = list(lb = 0, ub = 1), car = list(lb = 0, ub = 1), sdcar = list(lb = 0, ub = Inf), rhocar = list(lb = 0, ub = 1), sigmaLL = list(lb = 0, ub = Inf), sd = list(lb = 0, ub = Inf), sds = list(lb = 0, ub = Inf), sdgp = list(lb = 0, ub = Inf), lscale = list(lb = 0, ub = Inf), list(lb = -Inf, ub = Inf) ) if (isTRUE(nzchar(bound))) { opt_lb <- get_matches("(<|,)lower=[^,>]+", bound) if (isTRUE(nzchar(opt_lb))) { out$lb <- substr(opt_lb, 8, nchar(opt_lb)) } opt_ub <- get_matches("(<|,)upper=[^,>]+", bound) if (isTRUE(nzchar(opt_ub))) { out$ub <- substr(opt_ub, 8, nchar(opt_ub)) } } out } #' Checks if argument is a \code{brmsprior} object #' #' @param x An \R object #' #' @export is.brmsprior <- function(x) { inherits(x, "brmsprior") } #' Print method for \code{brmsprior} objects #' #' @param x An object of class \code{brmsprior}. #' @param show_df Logical; Print priors as a single #' \code{data.frame} (\code{TRUE}) or as a sequence of #' sampling statements (\code{FALSE})? #' @param ... Currently ignored. #' #' @export print.brmsprior <- function(x, show_df = NULL, ...) { if (is.null(show_df)) { show_df <- nrow(x) > 1L } show_df <- as_one_logical(show_df) y <- prepare_print_prior(x) if (show_df) { print.data.frame(y, row.names = FALSE, ...) } else { cat(collapse(.print_prior(y), "\n")) } invisible(x) } # prepare pretty printing of brmsprior objects prepare_print_prior <- function(x) { stopifnot(is.brmsprior(x)) x$source[!nzchar(x$source)] <- "(unknown)" # column names to vectorize over cols <- c("group", "nlpar", "dpar", "resp", "class") empty_strings <- rep("", 4) for (i in which(!nzchar(x$prior))) { ls <- x[i, cols] ls <- rbind(ls, c(empty_strings, ls$class)) ls <- as.list(ls) sub_prior <- subset2(x, ls = ls) base_prior <- stan_base_prior(sub_prior) if (nzchar(base_prior)) { x$prior[i] <- base_prior x$source[i] <- "(vectorized)" } else { x$prior[i] <- "(flat)" } } x } # prepare text for print.brmsprior .print_prior <- function(x) { group <- usc(x$group) resp <- usc(x$resp) dpar <- usc(x$dpar) nlpar <- usc(x$nlpar) coef <- usc(x$coef) if (any(nzchar(c(resp, dpar, nlpar, coef)))) { group <- usc(group, "suffix") } bound <- ifelse(nzchar(x$bound), paste0(x$bound, " "), "") tilde <- ifelse(nzchar(x$class) | nzchar(group) | nzchar(coef), " ~ ", "") prior <- ifelse(nzchar(x$prior), x$prior, "(flat)") paste0(bound, x$class, group, resp, dpar, nlpar, coef, tilde, prior) } # combine multiple brmsprior objects into one brmsprior #' @export c.brmsprior <- function(x, ..., replace = FALSE) { dots <- list(...) if (all(sapply(dots, is.brmsprior))) { replace <- as_one_logical(replace) # don't use 'c()' here to avoid creating a recursion out <- do_call(rbind, list(x, ...)) if (replace) { # update duplicated priors out <- unique(out, fromLast = TRUE) } } else { if (length(dots)) { stop2("Cannot add '", class(dots[[1]])[1], "' objects to the prior.") } out <- c(as.data.frame(x)) } out } #' @export "+.brmsprior" <- function(e1, e2) { if (is.null(e2)) { return(e1) } if (!is.brmsprior(e2)) { stop2("Cannot add '", class(e2)[1], "' objects to the prior.") } c(e1, e2) } #' @export duplicated.brmsprior <- function(x, incomparables = FALSE, ...) { # compare only specific columns of the brmsprior object duplicated.data.frame(x[, rcols_prior()], incomparables, ...) } # evaluate the dirichlet prior of simplex parameters # avoid name clashing with the dirichlet family # @param prior a character vector of the form 'dirichlet(...)' # @param len desired length of the prior concentration vector # @param env environment in which to search for data # @return a numeric vector of prior concentration values eval_dirichlet <- function(prior, len = NULL, env = NULL) { dirichlet <- function(...) { out <- try(as.numeric(c(...))) if (is(out, "try-error")) { stop2("Something went wrong. Did you forget to store ", "auxiliary data in the 'data2' argument?") } if (anyNA(out) || any(out <= 0)) { stop2("The dirichlet prior expects positive values.") } if (!is.null(len)) { if (length(out) == 1L) { out <- rep(out, len) } if (length(out) != len) { stop2("Invalid Dirichlet prior. Expected input of length ", len, ".") } } return(out) } prior <- as_one_character(prior) if (!nzchar(prior)) { prior <- "dirichlet(1)" } eval2(prior, envir = env, enclos = environment()) } #' Regularized horseshoe priors in \pkg{brms} #' #' Function used to set up regularized horseshoe priors and related #' hierarchical shrinkage priors for population-level effects in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up #' the model. #' #' @param df Degrees of freedom of student-t prior of the #' local shrinkage parameters. Defaults to \code{1}. #' @param scale_global Scale of the student-t prior of the global shrinkage #' parameter. Defaults to \code{1}. #' In linear models, \code{scale_global} will internally be #' multiplied by the residual standard deviation parameter \code{sigma}. #' @param df_global Degrees of freedom of student-t prior of the #' global shrinkage parameter. Defaults to \code{1}. If \code{df_global} #' is greater \code{1}, the shape of the prior will no longer resemble #' a horseshoe and it may be more appropriately called an hierarchical #' shrinkage prior in this case. #' @param scale_slab Scale of the student-t prior of the regularization #' parameter. Defaults to \code{2}. The original unregularized horseshoe #' prior is obtained by setting \code{scale_slab} to infinite, which #' we can approximate in practice by setting it to a very large real value. #' @param df_slab Degrees of freedom of the student-t prior of #' the regularization parameter. Defaults to \code{4}. #' @param par_ratio Ratio of the expected number of non-zero coefficients #' to the expected number of zero coefficients. If specified, #' \code{scale_global} is ignored and internally computed as #' \code{par_ratio / sqrt(N)}, where \code{N} is the total number #' of observations in the data. #' @param autoscale Logical; indicating whether the horseshoe #' prior should be scaled using the residual standard deviation #' \code{sigma} if possible and sensible (defaults to \code{TRUE}). #' Autoscaling is not applied for distributional parameters or #' when the model does not contain the parameter \code{sigma}. #' #' @return A character string obtained by \code{match.call()} with #' additional arguments. #' #' @details #' The horseshoe prior is a special shrinkage prior initially proposed by #' Carvalho et al. (2009). #' It is symmetric around zero with fat tails and an infinitely large spike #' at zero. This makes it ideal for sparse models that have #' many regression coefficients, although only a minority of them is non-zero. #' The horseshoe prior can be applied on all population-level effects at once #' (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. #' The \code{1} implies that the student-t prior of the local shrinkage #' parameters has 1 degrees of freedom. This may, however, lead to an #' increased number of divergent transition in \pkg{Stan}. #' Accordingly, increasing the degrees of freedom to slightly higher values #' (e.g., \code{3}) may often be a better option, although the prior #' no longer resembles a horseshoe in this case. #' Further, the scale of the global shrinkage parameter plays an important role #' in amount of shrinkage applied. It defaults to \code{1}, #' but this may result in too few shrinkage (Piironen & Vehtari, 2016). #' It is thus possible to change the scale using argument \code{scale_global} #' of the horseshoe prior, for instance \code{horseshoe(1, scale_global = 0.5)}. #' In linear models, \code{scale_global} will internally be multiplied by the #' residual standard deviation parameter \code{sigma}. See Piironen and #' Vehtari (2016) for recommendations how to properly set the global scale. #' The degrees of freedom of the global shrinkage prior may also be #' adjusted via argument \code{df_global}. #' Piironen and Vehtari (2017) recommend to specifying the ratio of the #' expected number of non-zero coefficients to the expected number of zero #' coefficients \code{par_ratio} rather than \code{scale_global} directly. #' As proposed by Piironen and Vehtari (2017), an additional regularization #' is applied that only affects non-zero coefficients. The amount of #' regularization can be controlled via \code{scale_slab} and \code{df_slab}. #' To make sure that shrinkage can equally affect all coefficients, #' predictors should be one the same scale. #' Generally, models with horseshoe priors a more likely than other models #' to have divergent transitions so that increasing \code{adapt_delta} #' from \code{0.8} to values closer to \code{1} will often be necessary. #' See the documentation of \code{\link{brm}} for instructions #' on how to increase \code{adapt_delta}. #' #' @references #' Carvalho, C. M., Polson, N. G., & Scott, J. G. (2009). #' Handling sparsity via the horseshoe. #' In International Conference on Artificial Intelligence and Statistics (pp. 73-80). #' #' Piironen J. & Vehtari A. (2016). On the Hyperprior Choice for the Global #' Shrinkage Parameter in the Horseshoe Prior. #' \url{https://arxiv.org/pdf/1610.05559v1.pdf} #' #' Piironen, J., and Vehtari, A. (2017). Sparsity information and regularization #' in the horseshoe and other shrinkage priors. #' \url{https://arxiv.org/abs/1707.01694} #' #' @seealso \code{\link{set_prior}} #' #' @examples #' set_prior(horseshoe(df = 3, par_ratio = 0.1)) #' #' @export horseshoe <- function(df = 1, scale_global = 1, df_global = 1, scale_slab = 2, df_slab = 4, par_ratio = NULL, autoscale = TRUE) { out <- deparse(match.call(), width.cutoff = 500L) df <- as.numeric(df) df_global <- as.numeric(df_global) df_slab <- as.numeric(df_slab) scale_global <- as.numeric(scale_global) scale_slab <- as.numeric(scale_slab) if (!isTRUE(df > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the local priors must be a single positive number.") } if (!isTRUE(df_global > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the global prior must be a single positive number.") } if (!isTRUE(scale_global > 0)) { stop2("Invalid horseshoe prior: Scale of the global ", "prior must be a single positive number.") } if (!isTRUE(df_slab > 0)) { stop2("Invalid horseshoe prior: Degrees of freedom of ", "the slab part must be a single positive number.") } if (!isTRUE(scale_slab > 0)) { stop2("Invalid horseshoe prior: Scale of the slab ", "part must be a single positive number.") } if (!is.null(par_ratio)) { par_ratio <- as.numeric(par_ratio) if (!isTRUE(par_ratio > 0)) { stop2("Argument 'par_ratio' must be greater 0.") } } autoscale <- as_one_logical(autoscale) att <- nlist( df, df_global, df_slab, scale_global, scale_slab, par_ratio, autoscale ) attributes(out)[names(att)] <- att out } #' R2-D2 Priors in \pkg{brms} #' #' Function used to set up R2D2 priors for population-level effects in #' \pkg{brms}. The function does not evaluate its arguments -- it exists purely #' to help set up the model. #' #' @param mean_R2 mean of the Beta prior on the coefficient of determination R^2. #' @param prec_R2 precision of the Beta prior on the coefficient of determination R^2. #' @param cons_D2 concentration vector of the Dirichlet prior on the variance #' decomposition parameters. #' @param autoscale Logical; indicating whether the horseshoe #' prior should be scaled using the residual standard deviation #' \code{sigma} if possible and sensible (defaults to \code{TRUE}). #' Autoscaling is not applied for distributional parameters or #' when the model does not contain the parameter \code{sigma}. #' #' @references #' Zhang, Y. D., Naughton, B. P., Bondell, H. D., & Reich, B. J. (2020). #' Bayesian regression using a prior on the model fit: The R2-D2 shrinkage #' prior. Journal of the American Statistical Association. #' \url{https://arxiv.org/pdf/1609.00046.pdf} #' #' @seealso \code{\link{set_prior}} #' #' @examples #' set_prior(R2D2(mean_R2 = 0.8, prec_R2 = 10)) #' #' @export R2D2 <- function(mean_R2 = 0.5, prec_R2 = 2, cons_D2 = 1, autoscale = TRUE) { out <- deparse(match.call(), width.cutoff = 500L) mean_R2 <- as_one_numeric(mean_R2) prec_R2 <- as_one_numeric(prec_R2) cons_D2 <- as.numeric(cons_D2) if (!(mean_R2 > 0 && mean_R2 < 1)) { stop2("Invalid R2D2 prior: Mean of the R2 prior ", "must be a single number in (0, 1).") } if (prec_R2 <= 0) { stop2("Invalid R2D2 prior: Precision of the R2 prior ", "must be a single positive number.") } if (any(cons_D2 <= 0)) { stop2("Invalid R2D2 prior: Concentration of the D2 prior ", "must be a vector of positive numbers.") } autoscale <- as_one_logical(autoscale) att <- nlist(mean_R2, prec_R2, cons_D2, autoscale) attributes(out)[names(att)] <- att out } #' Set up a lasso prior in \pkg{brms} #' #' Function used to set up a lasso prior for population-level effects #' in \pkg{brms}. The function does not evaluate its arguments -- #' it exists purely to help set up the model. #' #' @param df Degrees of freedom of the chi-square prior of the inverse tuning #' parameter. Defaults to \code{1}. #' @param scale Scale of the lasso prior. Defaults to \code{1}. #' #' @return A character string obtained by \code{match.call()} with #' additional arguments. #' #' @details #' The lasso prior is the Bayesian equivalent to the LASSO method for performing #' variable selection (Park & Casella, 2008). #' With this prior, independent Laplace (i.e. double exponential) priors #' are placed on the population-level effects. #' The scale of the Laplace priors depends on a tuning parameter #' that controls the amount of shrinkage. In \pkg{brms}, the inverse #' of the tuning parameter is used so that smaller values imply #' more shrinkage. The inverse tuning parameter has a chi-square distribution #' and with degrees of freedom controlled via argument \code{df} #' of function \code{lasso} (defaults to \code{1}). For instance, #' one can specify a lasso prior using \code{set_prior("lasso(1)")}. #' To make sure that shrinkage can equally affect all coefficients, #' predictors should be one the same scale. #' If you do not want to standardized all variables, #' you can adjust the general scale of the lasso prior via argument #' \code{scale}, for instance, \code{lasso(1, scale = 10)}. #' #' @references #' Park, T., & Casella, G. (2008). The Bayesian Lasso. Journal of the American #' Statistical Association, 103(482), 681-686. #' #' @seealso \code{\link{set_prior}} #' #' @examples #' set_prior(lasso(df = 1, scale = 10)) #' #' @export lasso <- function(df = 1, scale = 1) { out <- deparse(match.call(), width.cutoff = 500L) df <- as.numeric(df) scale <- as.numeric(scale) if (!isTRUE(df > 0)) { stop2("Invalid lasso prior: Degrees of freedom of the shrinkage ", "parameter prior must be a single positive number.") } if (!isTRUE(scale > 0)) { stop2("Invalid lasso prior: Scale of the Laplace ", "priors must be a single positive number.") } att <- nlist(df, scale) attributes(out)[names(att)] <- att out } # check for the usage of special priors # @param prior a character vector of priors # @param target optional special priors to search for # if NULL search for all special priors # @return a logical vector equal to the length of 'prior' is_special_prior <- function(prior, target = NULL) { stopifnot(is.character(prior)) if (is.null(target)) { target <- c("horseshoe", "R2D2", "lasso") } regex <- paste0("^", regex_or(target), "\\(") grepl(regex, prior) } # extract special prior information # @param prior a brmsprior object # @param px object from which the prefix can be extract get_special_prior <- function(prior, px = NULL) { out <- attr(prior, "special") if (!is.null(px)) { prefix <- combine_prefix(px, keep_mu = TRUE) out <- out[[prefix]] } out } # check if parameters should be sampled only from the prior is_prior_only <- function(prior) { is_equal(get_sample_prior(prior), "only") } brms/R/stanvars.R0000644000175000017500000002160414111751666013547 0ustar nileshnilesh#' User-defined variables passed to Stan #' #' Prepare user-defined variables to be passed to one of Stan's #' program blocks. This is primarily useful for defining more complex #' priors, for refitting models without recompilation despite #' changing priors, or for defining custom Stan functions. #' #' @aliases stanvars #' #' @param x An \R object containing data to be passed to Stan. #' Only required if \code{block = 'data'} and ignored otherwise. #' @param name Optional character string providing the desired variable #' name of the object in \code{x}. If \code{NULL} (the default) #' the variable name is directly inferred from \code{x}. #' @param scode Line of Stan code to define the variable #' in Stan language. If \code{block = 'data'}, the #' Stan code is inferred based on the class of \code{x} by default. #' @param block Name of one of Stan's program blocks in #' which the variable should be defined. Can be \code{'data'}, #' \code{'tdata'} (transformed data), \code{'parameters'}, #' \code{'tparameters'} (transformed parameters), \code{'model'}, #' \code{'likelihood'} (part of the model block where the likelihood is given), #' \code{'genquant'} (generated quantities) or \code{'functions'}. #' @param position Name of the position within the block where the #' Stan code should be placed. Currently allowed are \code{'start'} #' (the default) and \code{'end'} of the block. #' @param pll_args Optional Stan code to be put into the header #' of \code{partial_log_lik} functions. This ensures that the variables #' specified in \code{scode} can be used in the likelihood even when #' within-chain parallelization is activated via \code{\link{threading}}. #' #' @return An object of class \code{stanvars}. #' #' @examples #' bprior <- prior(normal(mean_intercept, 10), class = "Intercept") #' stanvars <- stanvar(5, name = "mean_intercept") #' make_stancode(count ~ Trt, epilepsy, prior = bprior, #' stanvars = stanvars) #' #' # define a multi-normal prior with known covariance matrix #' bprior <- prior(multi_normal(M, V), class = "b") #' stanvars <- stanvar(rep(0, 2), "M", scode = " vector[K] M;") + #' stanvar(diag(2), "V", scode = " matrix[K, K] V;") #' make_stancode(count ~ Trt + zBase, epilepsy, #' prior = bprior, stanvars = stanvars) #' #' # define a hierachical prior on the regression coefficients #' bprior <- set_prior("normal(0, tau)", class = "b") + #' set_prior("target += normal_lpdf(tau | 0, 10)", check = FALSE) #' stanvars <- stanvar(scode = "real tau;", #' block = "parameters") #' make_stancode(count ~ Trt + zBase, epilepsy, #' prior = bprior, stanvars = stanvars) #' #' # ensure that 'tau' is passed to the likelihood of a threaded model #' # not necessary for this example but may be necessary in other cases #' stanvars <- stanvar(scode = "real tau;", #' block = "parameters", pll_args = "real tau") #' make_stancode(count ~ Trt + zBase, epilepsy, #' stanvars = stanvars, threads = threading(2)) #' #' @export stanvar <- function(x = NULL, name = NULL, scode = NULL, block = "data", position = "start", pll_args = NULL) { vblocks <- c( "data", "tdata", "parameters", "tparameters", "model", "genquant", "functions", "likelihood" ) block <- match.arg(block, vblocks) vpositions <- c("start", "end") position <- match.arg(position, vpositions) if (block == "data") { if (is.null(x)) { stop2("Argument 'x' is required if block = 'data'.") } if (is.null(name)) { name <- deparse(substitute(x)) } name <- as_one_character(name) if (!is_equal(name, make.names(name)) || grepl("\\.", name)) { stop2("'", limit_chars(name, 30), "' is not ", "a valid variable name in Stan.") } if (is.null(scode)) { # infer scode from x if (is.integer(x)) { if (length(x) == 1L) { scode <- paste0("int ", name) } else { scode <- paste0("int ", name, "[", length(x), "]") } } else if (is.vector(x)) { if (length(x) == 1L) { scode <- paste0("real ", name) } else { scode <- paste0("vector[", length(x), "] ", name) } } else if (is.array(x)) { if (length(dim(x)) == 1L) { scode <- paste0("vector[", length(x), "] ", name) } else if (is.matrix(x)) { scode <- paste0("matrix[", nrow(x), ", ", ncol(x), "] ", name) } } if (is.null(scode)) { stop2( "'stanvar' could not infer the Stan code for an object ", "of class '", class(x), "'. Please specify the Stan code ", "manually via argument 'scode'." ) } scode <- paste0(scode, ";") } if (is.null(pll_args)) { # infer pll_args from x pll_type <- str_if(block %in% c("data", "tdata"), "data ") if (is.integer(x)) { if (length(x) == 1L) { pll_type <- paste0(pll_type, "int") } else { pll_type <- paste0(pll_type, "int[]") } } else if (is.vector(x)) { if (length(x) == 1L) { pll_type <- paste0(pll_type, "real") } else { pll_type <- paste0(pll_type, "vector") } } else if (is.array(x)) { if (length(dim(x)) == 1L) { pll_type <- paste0(pll_type, "vector") } else if (is.matrix(x)) { pll_type <- paste0(pll_type, "matrix") } } if (!is.null(pll_type)) { pll_args <- paste0(pll_type, " ", name) } else { # don't throw an error because most people will not use threading pll_args <- character(0) } } } else { x <- NULL if (is.null(name)) { name <- "" } name <- as_one_character(name) if (is.null(scode)) { stop2("Argument 'scode' is required if block is not 'data'.") } scode <- as.character(scode) pll_args <- as.character(pll_args) } if (position == "end" && block %in% c("functions", "data")) { stop2("Position '", position, "' is not sensible for block '", block, "'.") } out <- nlist(name, sdata = x, scode, block, position, pll_args) structure(setNames(list(out), name), class = "stanvars") } # take a subset of a stanvars object # @param x a stanvars object # @param ... conditions defining the desired subset subset_stanvars <- function(x, ...) { x <- validate_stanvars(x) structure_not_null(x[find_elements(x, ...)], class = "stanvars") } # collapse Stan code provided in a stanvars object collapse_stanvars <- function(x, block = NULL, position = NULL) { x <- validate_stanvars(x) if (!length(x)) { return(character(0)) } if (!is.null(block)) { x <- subset_stanvars(x, block = block) } if (!is.null(position)) { x <- subset_stanvars(x, position = position) } if (!length(x)) { return("") } collapse(wsp(nsp = 2), ulapply(x, "[[", "scode"), "\n") } # collapse partial lpg-lik code provided in a stanvars object collapse_stanvars_pll_args <- function(x) { x <- validate_stanvars(x) if (!length(x)) { return(character(0)) } out <- ulapply(x, "[[", "pll_args") if (!length(out)) { return("") } collapse(", ", out) } # validate 'stanvars' objects validate_stanvars <- function(x, stan_funs = NULL) { if (is.null(x)) { x <- empty_stanvars() } if (!is.stanvars(x)) { stop2("Argument 'stanvars' is invalid. See ?stanvar for help.") } if (length(stan_funs) > 0) { warning2("Argument 'stan_funs' is deprecated. Please use argument ", "'stanvars' instead. See ?stanvar for more help.") stan_funs <- as_one_character(stan_funs) x <- x + stanvar(scode = stan_funs, block = "functions") } x } # add new data to stanvars # @param x a 'stanvars' object # @param newdata2 a list with new 'data2' objects # @return a 'stanvars' object add_newdata_stanvars <- function(x, newdata2) { stopifnot(is.stanvars(x)) stanvars_data <- subset_stanvars(x, block = "data") for (name in names(stanvars_data)) { if (name %in% names(newdata2)) { x[[name]]$sdata <- newdata2[[name]] } } x } #' @export c.stanvars <- function(x, ...) { dots <- lapply(list(...), validate_stanvars) class(x) <- "list" out <- unlist(c(list(x), dots), recursive = FALSE) svnames <- names(out)[nzchar(names(out))] if (any(duplicated(svnames))) { stop2("Duplicated names in 'stanvars' are not allowed.") } structure(out, class = "stanvars") } #' @export "+.stanvars" <- function(e1, e2) { c(e1, e2) } is.stanvars <- function(x) { inherits(x, "stanvars") } empty_stanvars <- function() { structure(list(), class = "stanvars") } brms/R/brm.R0000644000175000017500000007100414136566200012460 0ustar nileshnilesh#' Fit Bayesian Generalized (Non-)Linear Multivariate Multilevel Models #' #' Fit Bayesian generalized (non-)linear multivariate multilevel models #' using Stan for full Bayesian inference. A wide range of distributions #' and link functions are supported, allowing users to fit -- among others -- #' linear, robust linear, count data, survival, response times, ordinal, #' zero-inflated, hurdle, and even self-defined mixture models all in a #' multilevel context. Further modeling options include non-linear and #' smooth terms, auto-correlation structures, censored data, meta-analytic #' standard errors, and quite a few more. In addition, all parameters of the #' response distributions can be predicted in order to perform distributional #' regression. Prior specifications are flexible and explicitly encourage #' users to apply prior distributions that actually reflect their beliefs. #' In addition, model fit can easily be assessed and compared with #' posterior predictive checks and leave-one-out cross-validation. #' #' @param formula An object of class \code{\link[stats:formula]{formula}}, #' \code{\link{brmsformula}}, or \code{\link{mvbrmsformula}} (or one that can #' be coerced to that classes): A symbolic description of the model to be #' fitted. The details of model specification are explained in #' \code{\link{brmsformula}}. #' @param data An object of class \code{data.frame} (or one that can be coerced #' to that class) containing data of all variables used in the model. #' @param family A description of the response distribution and link function to #' be used in the model. This can be a family function, a call to a family #' function or a character string naming the family. Every family function has #' a \code{link} argument allowing to specify the link function to be applied #' on the response variable. If not specified, default links are used. For #' details of supported families see \code{\link{brmsfamily}}. By default, a #' linear \code{gaussian} model is applied. In multivariate models, #' \code{family} might also be a list of families. #' @param prior One or more \code{brmsprior} objects created by #' \code{\link{set_prior}} or related functions and combined using the #' \code{c} method or the \code{+} operator. See also \code{\link{get_prior}} #' for more help. #' @param data2 A named \code{list} of objects containing data, which #' cannot be passed via argument \code{data}. Required for some objects #' used in autocorrelation structures to specify dependency structures #' as well as for within-group covariance matrices. #' @param autocor (Deprecated) An optional \code{\link{cor_brms}} object #' describing the correlation structure within the response variable (i.e., #' the 'autocorrelation'). See the documentation of \code{\link{cor_brms}} for #' a description of the available correlation structures. Defaults to #' \code{NULL}, corresponding to no correlations. In multivariate models, #' \code{autocor} might also be a list of autocorrelation structures. #' It is now recommend to specify autocorrelation terms directly #' within \code{formula}. See \code{\link{brmsformula}} for more details. #' @param sparse (Deprecated) Logical; indicates whether the population-level #' design matrices should be treated as sparse (defaults to \code{FALSE}). For #' design matrices with many zeros, this can considerably reduce required #' memory. Sampling speed is currently not improved or even slightly #' decreased. It is now recommended to use the \code{sparse} argument of #' \code{\link{brmsformula}} and related functions. #' @param cov_ranef (Deprecated) A list of matrices that are proportional to the #' (within) covariance structure of the group-level effects. The names of the #' matrices should correspond to columns in \code{data} that are used as #' grouping factors. All levels of the grouping factor should appear as #' rownames of the corresponding matrix. This argument can be used, among #' others to model pedigrees and phylogenetic effects. #' It is now recommended to specify those matrices in the formula #' interface using the \code{\link{gr}} and related functions. See #' \code{vignette("brms_phylogenetics")} for more details. #' @param save_pars An object generated by \code{\link{save_pars}} controlling #' which parameters should be saved in the model. The argument has no #' impact on the model fitting itself. #' @param save_ranef (Deprecated) A flag to indicate if group-level effects for #' each level of the grouping factor(s) should be saved (default is #' \code{TRUE}). Set to \code{FALSE} to save memory. The argument has no #' impact on the model fitting itself. #' @param save_mevars (Deprecated) A flag to indicate if draws of latent #' noise-free variables obtained by using \code{me} and \code{mi} terms should #' be saved (default is \code{FALSE}). Saving these draws allows to better #' use methods such as \code{predict} with the latent variables but leads to #' very large \R objects even for models of moderate size and complexity. #' @param save_all_pars (Deprecated) A flag to indicate if draws from all #' variables defined in Stan's \code{parameters} block should be saved #' (default is \code{FALSE}). Saving these draws is required in order to #' apply the methods \code{bridge_sampler}, \code{bayes_factor}, and #' \code{post_prob}. #' @param sample_prior Indicate if draws from priors should be drawn #' additionally to the posterior draws. Options are \code{"no"} (the #' default), \code{"yes"}, and \code{"only"}. Among others, these draws can #' be used to calculate Bayes factors for point hypotheses via #' \code{\link{hypothesis}}. Please note that improper priors are not sampled, #' including the default improper priors used by \code{brm}. See #' \code{\link{set_prior}} on how to set (proper) priors. Please also note #' that prior draws for the overall intercept are not obtained by default #' for technical reasons. See \code{\link{brmsformula}} how to obtain prior #' draws for the intercept. If \code{sample_prior} is set to \code{"only"}, #' draws are drawn solely from the priors ignoring the likelihood, which #' allows among others to generate draws from the prior predictive #' distribution. In this case, all parameters must have proper priors. #' @param knots Optional list containing user specified knot values to be used #' for basis construction of smoothing terms. See #' \code{\link[mgcv:gamm]{gamm}} for more details. #' @param stanvars An optional \code{stanvars} object generated by function #' \code{\link{stanvar}} to define additional variables for use in #' \pkg{Stan}'s program blocks. #' @param stan_funs (Deprecated) An optional character string containing #' self-defined \pkg{Stan} functions, which will be included in the functions #' block of the generated \pkg{Stan} code. It is now recommended to use the #' \code{stanvars} argument for this purpose instead. #' @param fit An instance of S3 class \code{brmsfit} derived from a previous #' fit; defaults to \code{NA}. If \code{fit} is of class \code{brmsfit}, the #' compiled model associated with the fitted result is re-used and all #' arguments modifying the model code or data are ignored. It is not #' recommended to use this argument directly, but to call the #' \code{\link[brms:update.brmsfit]{update}} method, instead. #' @param inits Either \code{"random"} or \code{"0"}. If inits is #' \code{"random"} (the default), Stan will randomly generate initial values #' for parameters. If it is \code{"0"}, all parameters are initialized to #' zero. This option is sometimes useful for certain families, as it happens #' that default (\code{"random"}) inits cause draws to be essentially #' constant. Generally, setting \code{inits = "0"} is worth a try, if chains #' do not behave well. Alternatively, \code{inits} can be a list of lists #' containing the initial values, or a function (or function name) generating #' initial values. The latter options are mainly implemented for internal #' testing but are available to users if necessary. If specifying initial #' values using a list or a function then currently the parameter names must #' correspond to the names used in the generated Stan code (not the names #' used in \R). For more details on specifying initial values you can consult #' the documentation of the selected \code{backend}. #' @param chains Number of Markov chains (defaults to 4). #' @param iter Number of total iterations per chain (including warmup; defaults #' to 2000). #' @param warmup A positive integer specifying number of warmup (aka burnin) #' iterations. This also specifies the number of iterations used for stepsize #' adaptation, so warmup draws should not be used for inference. The number #' of warmup should not be larger than \code{iter} and the default is #' \code{iter/2}. #' @param thin Thinning rate. Must be a positive integer. Set \code{thin > 1} to #' save memory and computation time if \code{iter} is large. #' @param cores Number of cores to use when executing the chains in parallel, #' which defaults to 1 but we recommend setting the \code{mc.cores} option to #' be as many processors as the hardware and RAM allow (up to the number of #' chains). For non-Windows OS in non-interactive \R sessions, forking is used #' instead of PSOCK clusters. #' @param threads Number of threads to use in within-chain parallelization. For #' more control over the threading process, \code{threads} may also be a #' \code{brmsthreads} object created by \code{\link{threading}}. Within-chain #' parallelization is experimental! We recommend its use only if you are #' experienced with Stan's \code{reduce_sum} function and have a slow running #' model that cannot be sped up by any other means. #' @param opencl The platform and device IDs of the OpenCL device to use for #' fitting using GPU support. If you don't know the IDs of your OpenCL #' device, \code{c(0,0)} is most likely what you need. For more details, see #' \code{\link{opencl}}. #' @param normalize Logical. Indicates whether normalization constants should #' be included in the Stan code (defaults to \code{TRUE}). Setting it #' to \code{FALSE} requires Stan version >= 2.25 to work. If \code{FALSE}, #' sampling efficiency may be increased but some post processing functions #' such as \code{\link{bridge_sampler}} will not be available. Can be #' controlled globally for the current \R session via the `brms.normalize` #' option. #' @param algorithm Character string naming the estimation approach to use. #' Options are \code{"sampling"} for MCMC (the default), \code{"meanfield"} for #' variational inference with independent normal distributions, #' \code{"fullrank"} for variational inference with a multivariate normal #' distribution, or \code{"fixed_param"} for sampling from fixed parameter #' values. Can be set globally for the current \R session via the #' \code{"brms.algorithm"} option (see \code{\link{options}}). #' @param backend Character string naming the package to use as the backend for #' fitting the Stan model. Options are \code{"rstan"} (the default) or #' \code{"cmdstanr"}. Can be set globally for the current \R session via the #' \code{"brms.backend"} option (see \code{\link{options}}). Details on the #' \pkg{rstan} and \pkg{cmdstanr} packages are available at #' \url{https://mc-stan.org/rstan/} and \url{https://mc-stan.org/cmdstanr/}, #' respectively. Additionally a \code{"mock"} backend is available to make #' testing \pkg{brms} and packages that depend on it easier. #' The \code{"mock"} backend does not actually do any fitting, it only checks #' the generated Stan code for correctness and then returns whatever is passed #' in an additional \code{mock_fit} argument as the result of the fit. #' @param control A named \code{list} of parameters to control the sampler's #' behavior. It defaults to \code{NULL} so all the default values are used. #' The most important control parameters are discussed in the 'Details' #' section below. For a comprehensive overview see #' \code{\link[rstan:stan]{stan}}. #' @param future Logical; If \code{TRUE}, the \pkg{\link[future:future]{future}} #' package is used for parallel execution of the chains and argument #' \code{cores} will be ignored. Can be set globally for the current \R #' session via the \code{"future"} option. The execution type is controlled via #' \code{\link[future:plan]{plan}} (see the examples section below). #' @param silent Verbosity level between \code{0} and \code{2}. #' If \code{1} (the default), most of the #' informational messages of compiler and sampler are suppressed. #' If \code{2}, even more messages are suppressed. The actual #' sampling progress is still printed. Set \code{refresh = 0} to turn this off #' as well. If using \code{backend = "rstan"} you can also set #' \code{open_progress = FALSE} to prevent opening additional progress bars. #' @param seed The seed for random number generation to make results #' reproducible. If \code{NA} (the default), \pkg{Stan} will set the seed #' randomly. #' @param save_model Either \code{NULL} or a character string. In the latter #' case, the model's Stan code is saved via \code{\link{cat}} in a text file #' named after the string supplied in \code{save_model}. #' @param file Either \code{NULL} or a character string. In the latter case, the #' fitted model object is saved via \code{\link{saveRDS}} in a file named #' after the string supplied in \code{file}. The \code{.rds} extension is #' added automatically. If the file already exists, \code{brm} will load and #' return the saved model object instead of refitting the model. #' Unless you specify the \code{file_refit} argument as well, the existing #' files won't be overwritten, you have to manually remove the file in order #' to refit and save the model under an existing file name. The file name #' is stored in the \code{brmsfit} object for later usage. #' @param file_refit Modifies when the fit stored via the \code{file} parameter #' is re-used. Can be set globally for the current \R session via the #' \code{"brms.file_refit"} option (see \code{\link{options}}). #' For \code{"never"} (default) the fit is always loaded if it #' exists and fitting is skipped. For \code{"always"} the model is always #' refitted. If set to \code{"on_change"}, brms will #' refit the model if model, data or algorithm as passed to Stan differ from #' what is stored in the file. This also covers changes in priors, #' \code{sample_prior}, \code{stanvars}, covariance structure, etc. If you #' believe there was a false positive, you can use #' \code{\link{brmsfit_needs_refit}} to see why refit is deemed necessary. #' Refit will not be triggered for changes in additional parameters of the fit #' (e.g., initial values, number of iterations, control arguments, ...). A #' known limitation is that a refit will be triggered if within-chain #' parallelization is switched on/off. #' @param empty Logical. If \code{TRUE}, the Stan model is not created #' and compiled and the corresponding \code{'fit'} slot of the \code{brmsfit} #' object will be empty. This is useful if you have estimated a brms-created #' Stan model outside of \pkg{brms} and want to feed it back into the package. #' @param rename For internal use only. #' @param stan_model_args A \code{list} of further arguments passed to #' \code{\link[rstan:stan_model]{stan_model}}. #' @param ... Further arguments passed to Stan. #' For \code{backend = "rstan"} the arguments are passed to #' \code{\link[rstan]{sampling}} or \code{\link[rstan]{vb}}. #' For \code{backend = "cmdstanr"} the arguments are passed to the #' \code{cmdstanr::sample} or \code{cmdstanr::variational} method. #' #' @return An object of class \code{brmsfit}, which contains the posterior #' draws along with many other useful information about the model. Use #' \code{methods(class = "brmsfit")} for an overview on available methods. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @details Fit a generalized (non-)linear multivariate multilevel model via #' full Bayesian inference using Stan. A general overview is provided in the #' vignettes \code{vignette("brms_overview")} and #' \code{vignette("brms_multilevel")}. For a full list of available vignettes #' see \code{vignette(package = "brms")}. #' #' \bold{Formula syntax of brms models} #' #' Details of the formula syntax applied in \pkg{brms} can be found in #' \code{\link{brmsformula}}. #' #' \bold{Families and link functions} #' #' Details of families supported by \pkg{brms} can be found in #' \code{\link{brmsfamily}}. #' #' \bold{Prior distributions} #' #' Priors should be specified using the #' \code{\link[brms:set_prior]{set_prior}} function. Its documentation #' contains detailed information on how to correctly specify priors. To find #' out on which parameters or parameter classes priors can be defined, use #' \code{\link[brms:get_prior]{get_prior}}. Default priors are chosen to be #' non or very weakly informative so that their influence on the results will #' be negligible and you usually don't have to worry about them. However, #' after getting more familiar with Bayesian statistics, I recommend you to #' start thinking about reasonable informative priors for your model #' parameters: Nearly always, there is at least some prior information #' available that can be used to improve your inference. #' #' \bold{Adjusting the sampling behavior of \pkg{Stan}} #' #' In addition to choosing the number of iterations, warmup draws, and #' chains, users can control the behavior of the NUTS sampler, by using the #' \code{control} argument. The most important reason to use \code{control} is #' to decrease (or eliminate at best) the number of divergent transitions that #' cause a bias in the obtained posterior draws. Whenever you see the #' warning "There were x divergent transitions after warmup." you should #' really think about increasing \code{adapt_delta}. To do this, write #' \code{control = list(adapt_delta = )}, where \code{} should usually #' be value between \code{0.8} (current default) and \code{1}. Increasing #' \code{adapt_delta} will slow down the sampler but will decrease the number #' of divergent transitions threatening the validity of your posterior #' draws. #' #' Another problem arises when the depth of the tree being evaluated in each #' iteration is exceeded. This is less common than having divergent #' transitions, but may also bias the posterior draws. When it happens, #' \pkg{Stan} will throw out a warning suggesting to increase #' \code{max_treedepth}, which can be accomplished by writing \code{control = #' list(max_treedepth = )} with a positive integer \code{} that should #' usually be larger than the current default of \code{10}. For more details #' on the \code{control} argument see \code{\link[rstan:stan]{stan}}. #' #' @references #' Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel #' Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. #' \code{doi:10.18637/jss.v080.i01} #' #' Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling #' with the R Package brms. \emph{The R Journal}. 10(1), 395–411. #' \code{doi:10.32614/RJ-2018-017} #' #' @seealso \code{\link{brms}}, \code{\link{brmsformula}}, #' \code{\link{brmsfamily}}, \code{\link{brmsfit}} #' #' @examples #' \dontrun{ #' # Poisson regression for the number of seizures in epileptic patients #' # using normal priors for population-level effects #' # and half-cauchy priors for standard deviations of group-level effects #' prior1 <- prior(normal(0,10), class = b) + #' prior(cauchy(0,2), class = sd) #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), prior = prior1) #' #' # generate a summary of the results #' summary(fit1) #' #' # plot the MCMC chains as well as the posterior distributions #' plot(fit1, ask = FALSE) #' #' # predict responses based on the fitted model #' head(predict(fit1)) #' #' # plot conditional effects for each predictor #' plot(conditional_effects(fit1), ask = FALSE) #' #' # investigate model fit #' loo(fit1) #' pp_check(fit1) #' #' #' # Ordinal regression modeling patient's rating of inhaler instructions #' # category specific effects are estimated for variable 'treat' #' fit2 <- brm(rating ~ period + carry + cs(treat), #' data = inhaler, family = sratio("logit"), #' prior = set_prior("normal(0,5)"), chains = 2) #' summary(fit2) #' plot(fit2, ask = FALSE) #' WAIC(fit2) #' #' #' # Survival regression modeling the time between the first #' # and second recurrence of an infection in kidney patients. #' fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit3) #' plot(fit3, ask = FALSE) #' plot(conditional_effects(fit3), ask = FALSE) #' #' #' # Probit regression using the binomial family #' ntrials <- sample(1:10, 100, TRUE) #' success <- rbinom(100, size = ntrials, prob = 0.4) #' x <- rnorm(100) #' data4 <- data.frame(ntrials, success, x) #' fit4 <- brm(success | trials(ntrials) ~ x, data = data4, #' family = binomial("probit")) #' summary(fit4) #' #' #' # Non-linear Gaussian model #' fit5 <- brm( #' bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), #' ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, #' nl = TRUE), #' data = loss, family = gaussian(), #' prior = c( #' prior(normal(5000, 1000), nlpar = "ult"), #' prior(normal(1, 2), nlpar = "omega"), #' prior(normal(45, 10), nlpar = "theta") #' ), #' control = list(adapt_delta = 0.9) #' ) #' summary(fit5) #' conditional_effects(fit5) #' #' #' # Normal model with heterogeneous variances #' data_het <- data.frame( #' y = c(rnorm(50), rnorm(50, 1, 2)), #' x = factor(rep(c("a", "b"), each = 50)) #' ) #' fit6 <- brm(bf(y ~ x, sigma ~ 0 + x), data = data_het) #' summary(fit6) #' plot(fit6) #' conditional_effects(fit6) #' #' # extract estimated residual SDs of both groups #' sigmas <- exp(as.data.frame(fit6, variable = "^b_sigma_", regex = TRUE)) #' ggplot(stack(sigmas), aes(values)) + #' geom_density(aes(fill = ind)) #' #' #' # Quantile regression predicting the 25%-quantile #' fit7 <- brm(bf(y ~ x, quantile = 0.25), data = data_het, #' family = asym_laplace()) #' summary(fit7) #' conditional_effects(fit7) #' #' #' # use the future package for more flexible parallelization #' library(future) #' plan(multiprocess) #' fit7 <- update(fit7, future = TRUE) #' #' #' # fit a model manually via rstan #' scode <- make_stancode(count ~ Trt, data = epilepsy) #' sdata <- make_standata(count ~ Trt, data = epilepsy) #' stanfit <- rstan::stan(model_code = scode, data = sdata) #' # feed the Stan model back into brms #' fit8 <- brm(count ~ Trt, data = epilepsy, empty = TRUE) #' fit8$fit <- stanfit #' fit8 <- rename_pars(fit8) #' summary(fit8) #' } #' #' @import parallel #' @import methods #' @import stats #' @import Rcpp #' @export brm <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, fit = NA, save_pars = NULL, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL, inits = "random", chains = 4, iter = 2000, warmup = floor(iter / 2), thin = 1, cores = getOption("mc.cores", 1), threads = NULL, opencl = NULL, normalize = getOption("brms.normalize", TRUE), control = NULL, algorithm = getOption("brms.algorithm", "sampling"), backend = getOption("brms.backend", "rstan"), future = getOption("future", FALSE), silent = 1, seed = NA, save_model = NULL, stan_model_args = list(), file = NULL, file_refit = getOption("brms.file_refit", "never"), empty = FALSE, rename = TRUE, ...) { # optionally load brmsfit from file # Loading here only when we should directly load the file. # The "on_change" option needs sdata and scode to be built file_refit <- match.arg(file_refit, file_refit_options()) if (!is.null(file) && file_refit == "never") { x <- read_brmsfit(file) if (!is.null(x)) { return(x) } } # validate arguments later passed to Stan algorithm <- match.arg(algorithm, algorithm_choices()) backend <- match.arg(backend, backend_choices()) normalize <- as_one_logical(normalize) silent <- validate_silent(silent) iter <- as_one_numeric(iter) warmup <- as_one_numeric(warmup) thin <- as_one_numeric(thin) chains <- as_one_numeric(chains) cores <- as_one_numeric(cores) threads <- validate_threads(threads) opencl <- validate_opencl(opencl) future <- as_one_logical(future) && chains > 0L seed <- as_one_numeric(seed, allow_na = TRUE) empty <- as_one_logical(empty) rename <- as_one_logical(rename) # initialize brmsfit object if (is.brmsfit(fit)) { # re-use existing model x <- fit x$criteria <- list() sdata <- standata(x) if (!is.null(file) && file_refit == "on_change") { x_from_file <- read_brmsfit(file) if (!is.null(x_from_file)) { needs_refit <- brmsfit_needs_refit( x_from_file, scode = stancode(x), sdata = sdata, data = x$data, algorithm = algorithm, silent = silent ) if (!needs_refit) { return(x_from_file) } } } backend <- x$backend model <- compiled_model(x) exclude <- exclude_pars(x) } else { # build new model formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse, cov_ranef = cov_ranef ) family <- get_element(formula, "family") bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data_name <- substitute_name(data) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots ) attr(data, "data_name") <- data_name prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior ) stanvars <- validate_stanvars(stanvars, stan_funs = stan_funs) save_pars <- validate_save_pars( save_pars, save_ranef = save_ranef, save_mevars = save_mevars, save_all_pars = save_all_pars ) ranef <- tidy_ranef(bterms, data = data) # generate Stan code model <- .make_stancode( bterms, data = data, prior = prior, stanvars = stanvars, save_model = save_model, backend = backend, threads = threads, opencl = opencl, normalize = normalize ) # initialize S3 object x <- brmsfit( formula = formula, data = data, data2 = data2, prior = prior, stanvars = stanvars, model = model, algorithm = algorithm, backend = backend, threads = threads, opencl = opencl, save_pars = save_pars, ranef = ranef, family = family ) exclude <- exclude_pars(x) # generate Stan data before compiling the model to avoid # unnecessary compilations in case of invalid data sdata <- .make_standata( bterms, data = data, prior = prior, data2 = data2, stanvars = stanvars, threads = threads ) if (empty) { # return the brmsfit object with an empty 'fit' slot return(x) } if (!is.null(file) && file_refit == "on_change") { x_from_file <- read_brmsfit(file) if (!is.null(x_from_file)) { needs_refit <- brmsfit_needs_refit( x_from_file, scode = model, sdata = sdata, data = data, algorithm = algorithm, silent = silent ) if (!needs_refit) { return(x_from_file) } } } # compile the Stan model compile_args <- stan_model_args compile_args$model <- model compile_args$backend <- backend compile_args$threads <- threads compile_args$opencl <- opencl compile_args$silent <- silent model <- do_call(compile_model, compile_args) } # fit the Stan model fit_args <- nlist( model, sdata, algorithm, backend, iter, warmup, thin, chains, cores, threads, opencl, inits, exclude, control, future, seed, silent, ... ) x$fit <- do_call(fit_model, fit_args) # rename parameters to have human readable names if (rename) { x <- rename_pars(x) } if (!is.null(file)) { x <- write_brmsfit(x, file) } x } brms/R/data-response.R0000644000175000017500000005115614111751666014460 0ustar nileshnilesh#' Extract response values #' #' Extract response values from a \code{\link{brmsfit}} object. #' #' @param x A \code{\link{brmsfit}} object. #' @param resp Optional names of response variables for which to extract values. #' @param warn For internal use only. #' @param ... Further arguments passed to \code{\link{standata}}. #' @inheritParams posterior_predict.brmsfit #' #' @return Returns a vector of response values for univariate models and a #' matrix of response values with one column per response variable for #' multivariate models. #' #' @keywords internal #' @export get_y <- function(x, resp = NULL, sort = FALSE, warn = FALSE, ...) { stopifnot(is.brmsfit(x)) resp <- validate_resp(resp, x) sort <- as_one_logical(sort) warn <- as_one_logical(warn) args <- list(x, resp = resp, ...) args$re_formula <- NA args$check_response <- TRUE args$only_response <- TRUE args$internal <- TRUE sdata <- do_call(standata, args) if (warn) { if (any(paste0("cens", usc(resp)) %in% names(sdata))) { warning2("Results may not be meaningful for censored models.") } } Ynames <- paste0("Y", usc(resp)) if (length(Ynames) > 1L) { out <- do_call(cbind, sdata[Ynames]) colnames(out) <- resp } else { out <- sdata[[Ynames]] } old_order <- attr(sdata, "old_order") if (!is.null(old_order) && !sort) { stopifnot(length(old_order) == NROW(out)) out <- p(out, old_order) } out } #' Prepare Response Data #' #' Prepare data related to response variables in \pkg{brms}. #' Only exported for use in package development. #' #' @param x An \R object. #' @param ... Further arguments passed to or from other methods. #' #' @return A named list of data related to response variables. #' #' @keywords internal #' @export data_response <- function(x, ...) { UseMethod("data_response") } #' @export data_response.mvbrmsterms <- function(x, basis = NULL, ...) { out <- list() for (i in seq_along(x$terms)) { bs <- basis$resps[[x$responses[i]]] c(out) <- data_response(x$terms[[i]], basis = bs, ...) } if (x$rescor) { out$nresp <- length(x$responses) out$nrescor <- out$nresp * (out$nresp - 1) / 2 } out } #' @export data_response.brmsterms <- function(x, data, check_response = TRUE, internal = FALSE, basis = NULL, ...) { data <- subset_data(data, x) N <- nrow(data) # TODO: rename 'Y' to 'y' Y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out <- list(N = N, Y = unname(Y)) if (is_binary(x$family) || is_categorical(x$family)) { out$Y <- as_factor(out$Y, levels = basis$resp_levels) out$Y <- as.numeric(out$Y) if (is_binary(x$family)) { out$Y <- out$Y - 1 } } if (is_ordinal(x$family) && is.ordered(out$Y)) { out$Y <- as.numeric(out$Y) } if (check_response) { family4error <- family_names(x$family) if (is.mixfamily(x$family)) { family4error <- paste0(family4error, collapse = ", ") family4error <- paste0("mixture(", family4error, ")") } if (!allow_factors(x$family) && !is.numeric(out$Y)) { stop2("Family '", family4error, "' requires numeric responses.") } if (is_binary(x$family)) { if (any(!out$Y %in% c(0, 1))) { stop2("Family '", family4error, "' requires responses ", "to contain only two different values.") } } if (is_ordinal(x$family)) { if (any(!is_wholenumber(out$Y)) || any(!out$Y > 0)) { stop2("Family '", family4error, "' requires either positive ", "integers or ordered factors as responses.") } } if (use_int(x$family)) { if (!all(is_wholenumber(out$Y))) { stop2("Family '", family4error, "' requires integer responses.") } } if (has_multicol(x$family)) { if (!is.matrix(out$Y)) { stop2("This model requires a response matrix.") } } if (is_dirichlet(x$family)) { if (!is_equal(rowSums(out$Y), rep(1, nrow(out$Y)))) { stop2("Response values in dirichlet models must sum to 1.") } } ybounds <- family_info(x$family, "ybounds") closed <- family_info(x$family, "closed") if (is.finite(ybounds[1])) { y_min <- min(out$Y, na.rm = TRUE) if (closed[1] && y_min < ybounds[1]) { stop2("Family '", family4error, "' requires response greater ", "than or equal to ", ybounds[1], ".") } else if (!closed[1] && y_min <= ybounds[1]) { stop2("Family '", family4error, "' requires response greater ", "than ", round(ybounds[1], 2), ".") } } if (is.finite(ybounds[2])) { y_max <- max(out$Y, na.rm = TRUE) if (closed[2] && y_max > ybounds[2]) { stop2("Family '", family4error, "' requires response smaller ", "than or equal to ", ybounds[2], ".") } else if (!closed[2] && y_max >= ybounds[2]) { stop2("Family '", family4error, "' requires response smaller ", "than ", round(ybounds[2], 2), ".") } } out$Y <- as.array(out$Y) } # data for addition arguments of the response if (has_trials(x$family) || is.formula(x$adforms$trials)) { if (!length(x$adforms$trials)) { if (is_multinomial(x$family)) { stop2("Specifying 'trials' is required in multinomial models.") } trials <- round(max(out$Y, na.rm = TRUE)) if (isTRUE(is.finite(trials))) { message("Using the maximum response value as the number of trials.") warning2( "Using 'binomial' families without specifying 'trials' ", "on the left-hand side of the model formula is deprecated." ) } else if (!is.null(basis$trials)) { trials <- max(basis$trials) } else { stop2("Could not compute the number of trials.") } } else if (is.formula(x$adforms$trials)) { trials <- get_ad_values(x, "trials", "trials", data) if (!is.numeric(trials)) { stop2("Number of trials must be numeric.") } if (any(!is_wholenumber(trials) | trials < 0)) { stop2("Number of trials must be non-negative integers.") } } else { stop2("Argument 'trials' is misspecified.") } if (length(trials) == 1L) { trials <- rep(trials, nrow(data)) } if (check_response) { if (is_multinomial(x$family)) { if (!is_equal(rowSums(out$Y), trials)) { stop2("Number of trials does not match the number of events.") } } else if (has_trials(x$family)) { if (max(trials) == 1L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } if (any(out$Y > trials)) { stop2("Number of trials is smaller than the number of events.") } } } out$trials <- as.array(trials) } if (has_cat(x$family)) { ncat <- length(get_cats(x$family)) if (min(ncat) < 2L) { stop2("At least two response categories are required.") } if (!has_multicol(x$family)) { if (ncat == 2L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } if (check_response && any(out$Y > ncat)) { stop2("Number of categories is smaller than the response ", "variable would suggest.") } } out$ncat <- ncat } if (has_thres(x$family)) { thres <- family_info(x, "thres") if (has_thres_groups(x$family)) { groups <- get_thres_groups(x) out$ngrthres <- length(groups) grthres <- get_ad_values(x, "thres", "gr", data) grthres <- factor(rename(grthres), levels = groups) # create an matrix of threshold indices per observation Jgrthres <- match(grthres, groups) nthres <- as.array(rep(NA, length(groups))) for (i in seq_along(groups)) { nthres[i] <- max(subset2(thres, group = groups[i])$thres) } if (check_response && any(out$Y > nthres[Jgrthres] + 1)) { stop2("Number of thresholds is smaller than required by the response.") } Kthres_cumsum <- cumsum(nthres) Kthres_start <- c(1, Kthres_cumsum[-length(nthres)] + 1) Kthres_end <- Kthres_cumsum Jthres <- cbind(Kthres_start, Kthres_end)[Jgrthres, , drop = FALSE] out$Jthres <- Jthres } else { nthres <- max(thres$thres) if (check_response && any(out$Y > nthres + 1)) { stop2("Number of thresholds is smaller than required by the response.") } } if (max(nthres) == 1L && !internal) { message("Only 2 levels detected so that family 'bernoulli' ", "might be a more efficient choice.") } out$nthres <- nthres } if (is.formula(x$adforms$cat)) { warning2("Addition argument 'cat' is deprecated. Use 'thres' instead. ", "See ?brmsformula for more details.") } if (is.formula(x$adforms$se)) { se <- get_ad_values(x, "se", "se", data) if (!is.numeric(se)) { stop2("Standard errors must be numeric.") } if (min(se) < 0) { stop2("Standard errors must be non-negative.") } out$se <- as.array(se) } if (is.formula(x$adforms$weights)) { weights <- get_ad_values(x, "weights", "weights", data) if (!is.numeric(weights)) { stop2("Weights must be numeric.") } if (min(weights) < 0) { stop2("Weights must be non-negative.") } if (get_ad_flag(x, "weights", "scale")) { weights <- weights / sum(weights) * length(weights) } out$weights <- as.array(weights) } if (is.formula(x$adforms$dec)) { dec <- get_ad_values(x, "dec", "dec", data) if (is.character(dec) || is.factor(dec)) { if (!all(unique(dec) %in% c("lower", "upper"))) { stop2("Decisions should be 'lower' or 'upper' ", "when supplied as characters or factors.") } dec <- ifelse(dec == "lower", 0, 1) } else { dec <- as.numeric(as.logical(dec)) } out$dec <- as.array(dec) } if (is.formula(x$adforms$rate)) { denom <- get_ad_values(x, "rate", "denom", data) if (!is.numeric(denom)) { stop2("Rate denomiators should be numeric.") } if (isTRUE(any(denom <= 0))) { stop2("Rate denomiators should be positive.") } out$denom <- as.array(denom) } if (is.formula(x$adforms$cens) && check_response) { cens <- get_ad_values(x, "cens", "cens", data) cens <- prepare_cens(cens) if (!all(is_wholenumber(cens) & cens %in% -1:2)) { stop2( "Invalid censoring data. Accepted values are ", "'left', 'none', 'right', and 'interval'\n", "(abbreviations are allowed) or -1, 0, 1, and 2.\n", "TRUE and FALSE are also accepted ", "and refer to 'right' and 'none' respectively." ) } out$cens <- as.array(cens) icens <- cens %in% 2 if (any(icens)) { y2 <- unname(get_ad_values(x, "cens", "y2", data)) if (is.null(y2)) { stop2("Argument 'y2' is required for interval censored data.") } if (anyNA(y2[icens])) { stop2("'y2' should not be NA for interval censored observations.") } if (any(out$Y[icens] >= y2[icens])) { stop2("Left censor points must be smaller than right ", "censor points for interval censored data.") } y2[!icens] <- 0 # not used in Stan out$rcens <- as.array(y2) } } if (is.formula(x$adforms$trunc)) { lb <- as.numeric(get_ad_values(x, "trunc", "lb", data)) ub <- as.numeric(get_ad_values(x, "trunc", "ub", data)) if (any(lb >= ub)) { stop2("Truncation bounds are invalid: lb >= ub") } if (length(lb) == 1L) { lb <- rep(lb, N) } if (length(ub) == 1L) { ub <- rep(ub, N) } if (length(lb) != N || length(ub) != N) { stop2("Invalid truncation bounds.") } inv_bounds <- out$Y < lb | out$Y > ub if (check_response && isTRUE(any(inv_bounds))) { stop2("Some responses are outside of the truncation bounds.") } out$lb <- lb out$ub <- ub } if (is.formula(x$adforms$mi)) { sdy <- get_sdy(x, data) if (is.null(sdy)) { # missings only which_mi <- which(is.na(out$Y)) out$Jmi <- as.array(which_mi) out$Nmi <- length(out$Jmi) } else { # measurement error in the response if (length(sdy) == 1L) { sdy <- rep(sdy, length(out$Y)) } if (length(sdy) != length(out$Y)) { stop2("'sdy' must have the same length as the response.") } # all observations will have a latent score which_mi <- which(is.na(out$Y) | is.infinite(sdy)) out$Jme <- as.array(setdiff(seq_along(out$Y), which_mi)) out$Nme <- length(out$Jme) out$noise <- as.array(sdy) if (!internal) { out$noise[which_mi] <- Inf } } # bounds are required for predicting new missing values # not required in Stan right now as bounds are hard-coded there tbounds <- trunc_bounds(x, data, incl_family = TRUE) out$lbmi <- tbounds$lb out$ubmi <- tbounds$ub if (!internal) { # Stan does not allow NAs in data # use Inf to that min(Y) is not affected out$Y[which_mi] <- Inf } } if (is.formula(x$adforms$vreal)) { # vectors of real values for use in custom families vreal <- eval_rhs(x$adforms$vreal) vreal <- lapply(vreal$vars, eval2, data) names(vreal) <- paste0("vreal", seq_along(vreal)) for (i in seq_along(vreal)) { if (length(vreal[[i]]) == 1L) { vreal[[i]] <- rep(vreal[[i]], N) } vreal[[i]] <- as.array(as.numeric(vreal[[i]])) } c(out) <- vreal } if (is.formula(x$adforms$vint)) { # vectors of integer values for use in custom families vint <- eval_rhs(x$adforms$vint) vint <- lapply(vint$vars, eval2, data) names(vint) <- paste0("vint", seq_along(vint)) for (i in seq_along(vint)) { if (length(vint[[i]]) == 1L) { vint[[i]] <- rep(vint[[i]], N) } if (!all(is_wholenumber(vint[[i]]))) { stop2("'vint' requires whole numbers as input.") } vint[[i]] <- as.array(vint[[i]]) } c(out) <- vint } if (length(out)) { resp <- usc(combine_prefix(x)) out <- setNames(out, paste0(names(out), resp)) } out } # data specific for mixture models data_mixture <- function(bterms, data2, prior) { stopifnot(is.brmsterms(bterms)) out <- list() if (is.mixfamily(bterms$family)) { families <- family_names(bterms$family) dp_classes <- dpar_class(names(c(bterms$dpars, bterms$fdpars))) if (!any(dp_classes %in% "theta")) { # estimate mixture probabilities directly take <- find_rows(prior, class = "theta", resp = bterms$resp) theta_prior <- prior$prior[take] con_theta <- eval_dirichlet(theta_prior, length(families), data2) out$con_theta <- as.array(con_theta) p <- usc(combine_prefix(bterms)) names(out) <- paste0(names(out), p) } } out } # data for the baseline functions of Cox models data_bhaz <- function(bterms, data, data2, prior, basis = NULL) { out <- list() if (!is_cox(bterms$family)) { return(out) } y <- model.response(model.frame(bterms$respform, data, na.action = na.pass)) args <- bterms$family$bhaz bs <- basis$basis_matrix out$Zbhaz <- bhaz_basis_matrix(y, args, basis = bs) out$Zcbhaz <- bhaz_basis_matrix(y, args, integrate = TRUE, basis = bs) out$Kbhaz <- NCOL(out$Zbhaz) sbhaz_prior <- subset2(prior, class = "sbhaz", resp = bterms$resp) con_sbhaz <- eval_dirichlet(sbhaz_prior$prior, out$Kbhaz, data2) out$con_sbhaz <- as.array(con_sbhaz) out } # Basis matrices for baseline hazard functions of the Cox model # @param y vector of response values # @param args arguments passed to the spline generating functions # @param integrate compute the I-spline instead of the M-spline basis? # @param basis optional precomputed basis matrix # @return the design matrix of the baseline hazard function bhaz_basis_matrix <- function(y, args = list(), integrate = FALSE, basis = NULL) { require_package("splines2") if (!is.null(basis)) { # perform predictions based on an existing basis matrix stopifnot(inherits(basis, "mSpline")) if (integrate) { # for predictions just the attibutes are required # which are the same of M-Splines and I-Splines class(basis) <- c("matrix", "iSpline") } return(predict(basis, y)) } stopifnot(is.list(args)) args$x <- y if (!is.null(args$intercept)) { args$intercept <- as_one_logical(args$intercept) } if (is.null(args$Boundary.knots)) { # avoid 'knots' outside 'Boundary.knots' error (#1143) # we also need a smaller lower boundary knot to avoid lp = -Inf # the below choices are ad-hoc and may need further thought min_y <- min(y, na.rm = TRUE) max_y <- max(y, na.rm = TRUE) diff_y <- max_y - min_y lower_knot <- max(min_y - diff_y / 50, 0) upper_knot <- max_y + diff_y / 50 args$Boundary.knots <- c(lower_knot, upper_knot) } if (integrate) { out <- do_call(splines2::iSpline, args) } else { out <- do_call(splines2::mSpline, args) } out } # extract names of response categories # @param x a brmsterms object or one that can be coerced to it # @param data user specified data # @return a vector of category names extract_cat_names <- function(x, data) { stopifnot(is.brmsformula(x) || is.brmsterms(x)) respform <- validate_resp_formula(x$formula) mr <- model.response(model.frame(respform, data)) if (has_multicol(x)) { mr <- as.matrix(mr) out <- as.character(colnames(mr)) if (!length(out)) { out <- as.character(seq_cols(mr)) } } else { out <- levels(factor(mr)) } out } # extract names of ordinal thresholds # @param x a brmsterms object or one that can be coerced to it # @param data user specified data # @return a data.frame with columns 'thres' and 'group' extract_thres_names <- function(x, data) { stopifnot(is.brmsformula(x) || is.brmsterms(x), has_thres(x)) if (is.null(x$adforms)) { x$adforms <- terms_ad(x$formula, x$family) } nthres <- get_ad_values(x, "thres", "thres", data) if (any(!is_wholenumber(nthres) | nthres < 1L)) { stop2("Number of thresholds must be a positive integer.") } grthres <- get_ad_values(x, "thres", "gr", data) if (!is.null(grthres)) { # grouping variable was specified if (!is_like_factor(grthres)) { stop2("Variable 'gr' in 'thres' needs to be factor-like.") } grthres <- factor(grthres) group <- levels(grthres) if (!length(nthres)) { # extract number of thresholds from the response values nthres <- rep(NA, length(group)) for (i in seq_along(group)) { take <- grthres %in% group[i] nthres[i] <- extract_nthres(x$formula, data[take, , drop = FALSE]) } } else if (length(nthres) == 1L) { # replicate number of thresholds across groups nthres <- rep(nthres, length(group)) } else { # number of thresholds is a variable in the data for (i in seq_along(group)) { # validate values of the same level take <- grthres %in% group[i] if (length(unique(nthres[take])) > 1L) { stop2("Number of thresholds should be unique for each group.") } } nthres <- get_one_value_per_group(nthres, grthres) } group <- rep(rename(group), nthres) thres <- ulapply(unname(nthres), seq_len) } else { # no grouping variable was specified group <- "" if (!length(nthres)) { # extract number of thresholds from the response values nthres <- extract_nthres(x$formula, data) } if (length(nthres) > 1L) { stop2("Number of thresholds needs to be a single value.") } thres <- seq_len(nthres) } data.frame(thres, group, stringsAsFactors = FALSE) } # extract threshold names from the response values # @param formula with the response on the LHS # @param data a data.frame from which to extract responses # @return a single value for the number of thresholds extract_nthres <- function(formula, data) { respform <- validate_resp_formula(formula) mr <- model.response(model.frame(respform, data)) if (is_like_factor(mr)) { out <- length(levels(factor(mr))) - 1 } else { out <- max(mr) - 1 } out } brms/R/zzz.R0000644000175000017500000000175613657252321012547 0ustar nileshnilesh# Uncomment the code below to enable unit tests for new stan functions # new_stan_functions <- function() { # # copy all new stan functions into a single .stan file and compile it # isystem <- system.file("chunks", package = "brms") # chunk_filenames <- list.files(isystem, pattern = "^fun_") # families <- list(cumulative("probit"), sratio("logit"), # cratio("cloglog"), acat("cauchit")) # cs <- c(rep(FALSE, 2), rep(TRUE, 2)) # ordinal_funs <- ulapply(seq_along(families), function(i) # stan_ordinal(families[[i]], cs = cs[i])$fun) # temp_file <- tempfile() # cat(paste0("functions { \n", # collapse(" #include '", chunk_filenames, "' \n"), # collapse(ordinal_funs), "} \nmodel {} \n"), # file = temp_file) # model <- rstan::stanc_builder(file = temp_file, isystem = isystem, # obfuscate_model_name = TRUE) # rstan::stan_model(stanc_ret = model) # } # new_stan_functions <- new_stan_functions() brms/R/stan-predictor.R0000644000175000017500000023547514115707362014657 0ustar nileshnilesh# unless otherwise specified, functions return a named list # of Stan code snippets to be pasted together later on # generate stan code for predictor terms stan_predictor <- function(x, ...) { UseMethod("stan_predictor") } # combine effects for the predictors of a single (non-linear) parameter # @param ... arguments passed to the underlying effect-specific functions #' @export stan_predictor.btl <- function(x, ...) { out <- collapse_lists( stan_fe(x, ...), stan_thres(x, ...), stan_sp(x, ...), stan_cs(x, ...), stan_sm(x, ...), stan_gp(x, ...), stan_ac(x, ...), stan_offset(x, ...), stan_bhaz(x, ...), stan_special_prior_global(x, ...) ) stan_eta_combine(out, bterms = x, ...) } # prepare Stan code for non-linear terms #' @export stan_predictor.btnl <- function(x, ...) { collapse_lists( stan_nl(x, ...), stan_thres(x, ...), stan_bhaz(x, ...), stan_ac(x, ...) ) } #' @export stan_predictor.brmsterms <- function(x, data, prior, normalize, ...) { px <- check_prefix(x) resp <- usc(combine_prefix(px)) data <- subset_data(data, x) out <- list() str_add_list(out) <- stan_response(x, data = data, normalize = normalize) valid_dpars <- valid_dpars(x) args <- nlist(data, prior, normalize, nlpars = names(x$nlpars), ...) args$primitive <- use_glm_primitive(x) for (nlp in names(x$nlpars)) { nlp_args <- list(x$nlpars[[nlp]]) str_add_list(out) <- do_call(stan_predictor, c(nlp_args, args)) } for (dp in valid_dpars) { dp_terms <- x$dpars[[dp]] if (is.btl(dp_terms) || is.btnl(dp_terms)) { # distributional parameter is predicted ilink <- stan_eta_ilink(dp, bterms = x, resp = resp) dp_args <- list(dp_terms, ilink = ilink) str_add_list(out) <- do_call(stan_predictor, c(dp_args, args)) } else if (is.numeric(x$fdpars[[dp]]$value)) { # distributional parameter is fixed to a numeric value dp_type <- stan_dpar_types(dp, resp, family = x$family, fixed = TRUE) if (nzchar(dp_type)) { dp_value <- x$fdpars[[dp]]$value dp_comment <- stan_comment(attr(dp_type, "comment")) str_add(out$tpar_def) <- glue( " {dp_type} {dp}{resp} = {dp_value};{dp_comment}\n" ) str_add(out$pll_args) <- glue(", real {dp}{resp}") } } else if (is.character(x$fdpars[[dp]]$value)) { # distributional parameter is fixed to another distributional parameter if (!x$fdpars[[dp]]$value %in% valid_dpars) { stop2("Parameter '", x$fdpars[[dp]]$value, "' cannot be found.") } dp_type <- stan_dpar_types(dp, resp, family = x$family) if (nzchar(dp_type)) { dp_value <- x$fdpars[[dp]]$value dp_comment <- stan_comment(attr(dp_type, "comment")) str_add(out$tpar_def) <- glue( " {dp_type} {dp}{resp};{dp_comment}\n" ) str_add(out$tpar_comp) <- glue( " {dp}{resp} = {dp_value}{resp};\n" ) str_add(out$pll_args) <- glue(", real {dp}{resp}") } } else { # distributional parameter is estimated as a scalar dp_type <- stan_dpar_types(dp, resp, family = x$family) dp_tmp_type <- stan_dpar_tmp_types(dp, resp, family = x$family) if (nzchar(dp_tmp_type)) { # distributional parameter has a temporary definition dp_comment <- attr(dp_tmp_type, "comment") str_add_list(out) <- stan_prior( prior, dp, type = dp_tmp_type, prefix = "tmp_", suffix = resp, header_type = "real", px = px, comment = dp_comment, normalize = normalize ) } else if (nzchar(dp_type)) { # distributional parameter has a regular definition dp_comment <- attr(dp_type, "comment") str_add_list(out) <- stan_prior( prior, dp, type = dp_type, suffix = resp, header_type = "real", px = px, comment = dp_comment, normalize = normalize ) } } } str_add_list(out) <- stan_mixture( x, data = data, prior = prior, normalize = normalize, ... ) str_add_list(out) <- stan_dpar_transform(x, ...) out$model_log_lik <- stan_log_lik(x, data = data, normalize = normalize, ...) list(out) } #' @export stan_predictor.mvbrmsterms <- function(x, prior, threads, normalize, ...) { out <- lapply(x$terms, stan_predictor, prior = prior, threads = threads, normalize = normalize, ...) out <- unlist(out, recursive = FALSE) if (!x$rescor) { return(out) } resp_type <- out[[1]]$resp_type out <- collapse_lists(ls = out) out$resp_type <- "vector" adforms <- lapply(x$terms, "[[", "adforms") adnames <- unique(ulapply(adforms, names)) adallowed <- c("se", "weights", "mi") if (!all(adnames %in% adallowed)) { stop2("Only ", collapse_comma(adallowed), " are supported ", "addition arguments when 'rescor' is estimated.") } # we already know at this point that all families are identical family <- family_names(x)[1] stopifnot(family %in% c("gaussian", "student")) resp <- x$responses nresp <- length(resp) str_add(out$model_def) <- glue( " // multivariate predictor array\n", " vector[nresp] Mu[N];\n" ) str_add(out$model_comp_mvjoin) <- glue( " Mu[n] = {stan_vector(glue('mu_{resp}[n]'))};\n" ) str_add(out$data) <- glue( " int nresp; // number of responses\n", " int nrescor; // number of residual correlations\n" ) str_add(out$pll_args) <- glue(", data int nresp") str_add(out$tdata_def) <- glue( " vector[nresp] Y[N]; // response array\n" ) str_add(out$tdata_comp) <- glue( " for (n in 1:N) {{\n", " Y[n] = {stan_vector(glue('Y_{resp}[n]'))};\n", " }}\n" ) str_add(out$pll_args) <- ", data vector[] Y" if (any(adnames %in% "weights")) { str_add(out$tdata_def) <- glue( " // weights of the pointwise log-likelihood\n", " vector[N] weights = weights_{resp[1]};\n" ) str_add(out$pll_args) <- glue(", data vector weights") } miforms <- rmNULL(lapply(adforms, "[[", "mi")) if (length(miforms)) { str_add(out$model_no_pll_def) <- " vector[nresp] Yl[N] = Y;\n" str_add(out$pll_args) <- ", vector[] Yl" for (i in seq_along(miforms)) { j <- match(names(miforms)[i], resp) # needs to happen outside of reduce_sum # to maintain consistency of indexing Yl str_add(out$model_no_pll_comp_mvjoin) <- glue( " Yl[n][{j}] = Yl_{resp[j]}[n];\n" ) } } str_add_list(out) <- stan_prior( prior, class = "Lrescor", type = "cholesky_factor_corr[nresp]", header_type = "matrix", comment = "parameters for multivariate linear models", normalize = normalize ) if (family == "student") { str_add_list(out) <- stan_prior( prior, class = "nu", type = stan_dpar_types("nu"), header_type = "real", normalize = normalize ) } sigma <- ulapply(x$terms, stan_sigma_transform, threads = threads) if (any(grepl(stan_nn_regex(), sigma))) { str_add(out$model_def) <- " vector[nresp] sigma[N];\n" str_add(out$model_comp_mvjoin) <- glue( " sigma[n] = {stan_vector(sigma)};\n" ) if (family == "gaussian") { str_add(out$model_def) <- glue( " // cholesky factor of residual covariance matrix\n", " matrix[nresp, nresp] LSigma[N];\n" ) str_add(out$model_comp_mvjoin) <- glue( " LSigma[n] = diag_pre_multiply(sigma[n], Lrescor);\n" ) } else if (family == "student") { str_add(out$model_def) <- glue( " // residual covariance matrix\n", " matrix[nresp, nresp] Sigma[N];\n" ) str_add(out$model_comp_mvjoin) <- glue( " Sigma[n] = multiply_lower_tri_self_transpose(", "diag_pre_multiply(sigma[n], Lrescor));\n" ) } } else { str_add(out$model_def) <- glue( " vector[nresp] sigma = {stan_vector(sigma)};\n" ) if (family == "gaussian") { str_add(out$model_def) <- glue( " // cholesky factor of residual covariance matrix\n", " matrix[nresp, nresp] LSigma = ", "diag_pre_multiply(sigma, Lrescor);\n" ) } else if (family == "student") { str_add(out$model_def) <- glue( " // residual covariance matrix\n", " matrix[nresp, nresp] Sigma = ", "multiply_lower_tri_self_transpose(", "diag_pre_multiply(sigma, Lrescor));\n" ) } } str_add(out$gen_def) <- glue( " // residual correlations\n", " corr_matrix[nresp] Rescor", " = multiply_lower_tri_self_transpose(Lrescor);\n", " vector[nrescor] rescor;\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp("rescor", "nresp") out$model_comp_mvjoin <- paste0( " // combine univariate parameters\n", " for (n in 1:N) {\n", stan_nn_def(threads), out$model_comp_mvjoin, " }\n" ) if (isTRUE(nzchar(out$model_no_pll_comp_mvjoin))) { out$model_no_pll_comp_mvjoin <- paste0( " // combine univariate parameters\n", " for (n in 1:N) {\n", out$model_no_pll_comp_mvjoin, " }\n" ) } out$model_log_lik <- stan_log_lik( x, threads = threads, normalize = normalize, ... ) list(out) } # Stan code for population-level effects stan_fe <- function(bterms, data, prior, stanvars, threads, primitive, normalize, ...) { out <- list() family <- bterms$family fixef <- colnames(data_fe(bterms, data)$X) sparse <- is_sparse(bterms$fe) decomp <- get_decomp(bterms$fe) if (length(fixef) < 2L) { # decompositions require at least two predictors decomp <- "none" } center_X <- stan_center_X(bterms) ct <- str_if(center_X, "c") # remove the intercept from the design matrix? if (center_X) { fixef <- setdiff(fixef, "Intercept") } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) if (length(fixef)) { str_add(out$data) <- glue( " int K{p};", " // number of population-level effects\n", " matrix[N{resp}, K{p}] X{p};", " // population-level design matrix\n" ) if (decomp == "none") { str_add(out$pll_args) <- glue(", data matrix X{ct}{p}") } if (sparse) { if (decomp != "none") { stop2("Cannot use ", decomp, " decomposition for sparse matrices.") } if (use_threading(threads)) { stop2("Cannot use threading and sparse matrices at the same time.") } str_add(out$tdata_def) <- glue( " // sparse matrix representation of X{p}\n", " vector[rows(csr_extract_w(X{p}))] wX{p}", " = csr_extract_w(X{p});\n", " int vX{p}[size(csr_extract_v(X{p}))]", " = csr_extract_v(X{p});\n", " int uX{p}[size(csr_extract_u(X{p}))]", " = csr_extract_u(X{p});\n" ) } # prepare population-level coefficients b_bound <- get_bound(prior, class = "b", px = px) b_type <- glue("vector{b_bound}[K{ct}{p}]") b_coef_type <- glue("real{b_bound}") assign_b_tpar <- stan_assign_b_tpar(bterms, prior) if (decomp == "none") { b_suffix <- "" b_comment <- "population-level effects" if (assign_b_tpar) { str_add(out$tpar_def) <- glue(" {b_type} b{p}; // {b_comment}\n") str_add(out$pll_args) <- glue(", vector b{p}") } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = fixef, type = b_type, coef_type = b_coef_type, px = px, suffix = p, header_type = "vector", comment = b_comment, normalize = normalize ) } } else { stopifnot(decomp == "QR") if (nzchar(b_bound)) { stop2("Cannot impose bounds on decomposed coefficients.") } b_suffix <- "Q" b_comment <- "regression coefficients at QR scale" if (assign_b_tpar) { str_add(out$tpar_def) <- glue(" {b_type} bQ{p}; // {b_comment}\n") str_add(out$pll_args) <- glue(", vector bQ{p}") } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = fixef, type = b_type, coef_type = b_coef_type, px = px, suffix = glue("Q{p}"), header_type = "vector", comment = b_comment, normalize = normalize ) } str_add(out$gen_def) <- glue( " // obtain the actual coefficients\n", " vector[K{ct}{p}] b{p} = XR{p}_inv * bQ{p};\n" ) } str_add_list(out) <- stan_special_prior_local( prior, class = "b", ncoef = length(fixef), px = px, center_X = center_X, suffix = b_suffix, normalize = normalize ) } order_intercepts <- order_intercepts(bterms) if (order_intercepts && !center_X) { stop2( "Identifying mixture components via ordering requires ", "population-level intercepts to be present.\n", "Try setting order = 'none' in function 'mixture'." ) } if (center_X) { # centering the design matrix improves convergence sub_X_means <- "" if (length(fixef)) { sub_X_means <- glue(" - dot_product(means_X{p}, b{p})") if (is_ordinal(family)) { # the intercept was already removed during the data preparation str_add(out$tdata_def) <- glue( " int Kc{p} = K{p};\n", " matrix[N{resp}, Kc{p}] Xc{p};", " // centered version of X{p}\n", " vector[Kc{p}] means_X{p};", " // column means of X{p} before centering\n" ) str_add(out$tdata_comp) <- glue( " for (i in 1:K{p}) {{\n", " means_X{p}[i] = mean(X{p}[, i]);\n", " Xc{p}[, i] = X{p}[, i] - means_X{p}[i];\n", " }}\n" ) } else { str_add(out$tdata_def) <- glue( " int Kc{p} = K{p} - 1;\n", " matrix[N{resp}, Kc{p}] Xc{p};", " // centered version of X{p} without an intercept\n", " vector[Kc{p}] means_X{p};", " // column means of X{p} before centering\n" ) str_add(out$tdata_comp) <- glue( " for (i in 2:K{p}) {{\n", " means_X{p}[i - 1] = mean(X{p}[, i]);\n", " Xc{p}[, i - 1] = X{p}[, i] - means_X{p}[i - 1];\n", " }}\n" ) } } if (!is_ordinal(family)) { # intercepts of ordinal models are handled in 'stan_thres' intercept_type <- "real" if (order_intercepts) { # identify mixtures via ordering of the intercepts dp_id <- dpar_id(px$dpar) str_add(out$tpar_def) <- glue( " // identify mixtures via ordering of the intercepts\n", " real Intercept{p} = ordered_Intercept{resp}[{dp_id}];\n" ) str_add(out$pll_args) <- glue(", real Intercept{p}") # intercept parameter needs to be defined outside of 'stan_prior' intercept_type <- "" } str_add(out$eta) <- glue(" + Intercept{p}") str_add(out$gen_def) <- glue( " // actual population-level intercept\n", " real b{p}_Intercept = Intercept{p}{sub_X_means};\n" ) str_add_list(out) <- stan_prior( prior, class = "Intercept", type = intercept_type, suffix = p, px = px, header_type = "real", comment = "temporary intercept for centered predictors", normalize = normalize ) } } if (decomp == "QR") { str_add(out$tdata_def) <- glue( " // matrices for QR decomposition\n", " matrix[N{resp}, K{ct}{p}] XQ{p};\n", " matrix[K{ct}{p}, K{ct}{p}] XR{p};\n", " matrix[K{ct}{p}, K{ct}{p}] XR{p}_inv;\n" ) str_add(out$tdata_comp) <- glue( " // compute and scale QR decomposition\n", " XQ{p} = qr_thin_Q(X{ct}{p}) * sqrt(N{resp} - 1);\n", " XR{p} = qr_thin_R(X{ct}{p}) / sqrt(N{resp} - 1);\n", " XR{p}_inv = inverse(XR{p});\n" ) str_add(out$pll_args) <- glue(", data matrix XQ{p}") } str_add(out$eta) <- stan_eta_fe(fixef, bterms, threads, primitive) out } # Stan code for group-level effects stan_re <- function(ranef, prior, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") IDs <- unique(ranef$id) out <- list() # special handling of student-t group effects as their 'df' parameters # are defined on a per-group basis instead of a per-ID basis tranef <- get_dist_groups(ranef, "student") if (has_rows(tranef)) { str_add(out$par) <- " // parameters for student-t distributed group-level effects\n" for (i in seq_rows(tranef)) { g <- usc(tranef$ggn[i]) id <- tranef$id[i] str_add_list(out) <- stan_prior( prior, class = "df", group = tranef$group[i], type = "real", suffix = g, normalize = normalize ) str_add(out$par) <- glue( " vector[N_{id}] udf{g};\n" ) str_add(out$prior) <- glue( " target += inv_chi_square_{lpdf}(udf{g} | df{g});\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " vector[N_{id}] dfm{g};\n" ) str_add(out$tpar_comp) <- glue( " dfm{g} = sqrt(df{g} * udf{g});\n" ) } } # the ID syntax requires group-level effects to be evaluated separately tmp <- lapply(IDs, .stan_re, ranef = ranef, prior = prior, normalize = normalize, ...) out <- collapse_lists(ls = c(list(out), tmp)) out } # Stan code for group-level effects per ID # @param id the ID of the grouping factor # @param ranef output of tidy_ranef # @param prior object of class brmsprior .stan_re <- function(id, ranef, prior, threads, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") out <- list() r <- subset2(ranef, id = id) has_cov <- nzchar(r$cov[1]) has_by <- nzchar(r$by[[1]]) Nby <- seq_along(r$bylevels[[1]]) ng <- seq_along(r$gcall[[1]]$groups) px <- check_prefix(r) uresp <- usc(unique(px$resp)) idp <- paste0(r$id, usc(combine_prefix(px))) # define data needed for group-level effects str_add(out$data) <- glue( " // data for group-level effects of ID {id}\n", " int N_{id}; // number of grouping levels\n", " int M_{id}; // number of coefficients per level\n" ) if (r$gtype[1] == "mm") { for (res in uresp) { str_add(out$data) <- cglue( " int J_{id}{res}_{ng}[N{res}];", " // grouping indicator per observation\n", " real W_{id}{res}_{ng}[N{res}];", " // multi-membership weights\n" ) str_add(out$pll_args) <- cglue( ", data int[] J_{id}{res}_{ng}, data real[] W_{id}{res}_{ng}" ) } } else { str_add(out$data) <- cglue( " int J_{id}{uresp}[N{uresp}];", " // grouping indicator per observation\n" ) str_add(out$pll_args) <- cglue( ", data int[] J_{id}{uresp}" ) } if (has_by) { str_add(out$data) <- glue( " int Nby_{id}; // number of by-factor levels\n", " int Jby_{id}[N_{id}];", " // by-factor indicator per observation\n" ) } if (has_cov) { str_add(out$data) <- glue( " matrix[N_{id}, N_{id}] Lcov_{id};", " // cholesky factor of known covariance matrix\n" ) } J <- seq_rows(r) reqZ <- !r$type %in% "sp" if (any(reqZ)) { str_add(out$data) <- " // group-level predictor values\n" if (r$gtype[1] == "mm") { for (i in which(reqZ)) { str_add(out$data) <- cglue( " vector[N{usc(r$resp[i])}] Z_{idp[i]}_{r$cn[i]}_{ng};\n" ) str_add(out$pll_args) <- cglue( ", data vector Z_{idp[i]}_{r$cn[i]}_{ng}" ) } } else { str_add(out$data) <- cglue( " vector[N{usc(r$resp[reqZ])}] Z_{idp[reqZ]}_{r$cn[reqZ]};\n" ) str_add(out$pll_args) <- cglue( ", data vector Z_{idp[reqZ]}_{r$cn[reqZ]}" ) } } # define standard deviation parameters if (has_by) { str_add_list(out) <- stan_prior( prior, class = "sd", group = r$group[1], coef = r$coef, type = glue("matrix[M_{id}, Nby_{id}]"), coef_type = glue("row_vector[Nby_{id}]"), suffix = glue("_{id}"), px = px, broadcast = "matrix", comment = "group-level standard deviations", normalize = normalize ) } else { str_add_list(out) <- stan_prior( prior, class = "sd", group = r$group[1], coef = r$coef, type = glue("vector[M_{id}]"), coef_type = "real", suffix = glue("_{id}"), px = px, comment = "group-level standard deviations", normalize = normalize ) } dfm <- "" tr <- get_dist_groups(r, "student") if (nrow(r) > 1L && r$cor[1]) { # multiple correlated group-level effects str_add(out$data) <- glue( " int NC_{id}; // number of group-level correlations\n" ) str_add(out$par) <- glue( " matrix[M_{id}, N_{id}] z_{id};", " // standardized group-level effects\n" ) str_add(out$prior) <- glue( " target += std_normal_{lpdf}(to_vector(z_{id}));\n" ) if (has_rows(tr)) { dfm <- glue("rep_matrix(dfm_{tr$ggn[1]}, M_{id}) .* ") } if (has_by) { str_add_list(out) <- stan_prior( prior, class = "L", group = r$group[1], coef = Nby, type = glue("cholesky_factor_corr[M_{id}]"), coef_type = glue("cholesky_factor_corr[M_{id}]"), suffix = glue("_{id}"), dim = glue("[Nby_{id}]"), comment = "cholesky factor of correlation matrix", normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[N_{id}, M_{id}] r_{id}; // actual group-level effects\n" ) if (has_cov) { rdef <- glue( "scale_r_cor_by_cov(z_{id}, sd_{id}, L_{id}, Jby_{id}, Lcov_{id})" ) } else { rdef <- glue("scale_r_cor_by(z_{id}, sd_{id}, L_{id}, Jby_{id})") } str_add(out$tpar_comp) <- glue( " // compute actual group-level effects\n", " r_{id} = {dfm}{rdef};\n" ) str_add(out$gen_def) <- cglue( " // compute group-level correlations\n", " corr_matrix[M_{id}] Cor_{id}_{Nby}", " = multiply_lower_tri_self_transpose(L_{id}[{Nby}]);\n", " vector[NC_{id}] cor_{id}_{Nby};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( glue("cor_{id}_{Nby}"), glue("M_{id}") ) } else { str_add_list(out) <- stan_prior( prior, class = "L", group = r$group[1], suffix = usc(id), type = glue("cholesky_factor_corr[M_{id}]"), comment = "cholesky factor of correlation matrix", normalize = normalize ) if (has_cov) { rdef <- glue("scale_r_cor_cov(z_{id}, sd_{id}, L_{id}, Lcov_{id})") } else { rdef <- glue("scale_r_cor(z_{id}, sd_{id}, L_{id})") } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[N_{id}, M_{id}] r_{id}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- glue( " // compute actual group-level effects\n", " r_{id} = {dfm}{rdef};\n" ) str_add(out$gen_def) <- glue( " // compute group-level correlations\n", " corr_matrix[M_{id}] Cor_{id}", " = multiply_lower_tri_self_transpose(L_{id});\n", " vector[NC_{id}] cor_{id};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( cor = glue("cor_{id}"), ncol = glue("M_{id}") ) } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // using vectors speeds up indexing in loops\n" str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn};\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = r_{id}[, {J}];\n" ) str_add(out$pll_args) <- cglue( ", vector r_{idp}_{r$cn}" ) } else { # single or uncorrelated group-level effects str_add(out$par) <- glue( " vector[N_{id}] z_{id}[M_{id}];", " // standardized group-level effects\n" ) str_add(out$prior) <- cglue( " target += std_normal_{lpdf}(z_{id}[{seq_rows(r)}]);\n" ) Lcov <- str_if(has_cov, glue("Lcov_{id} * ")) if (has_rows(tr)) { dfm <- glue("dfm_{tr$ggn[1]} .* ") } if (has_by) { # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = {dfm}(transpose(sd_{id}[{J}, Jby_{id}])", " .* ({Lcov}z_{id}[{J}]));\n" ) } else { # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[N_{id}] r_{idp}_{r$cn}; // actual group-level effects\n" ) str_add(out$tpar_comp) <- cglue( " r_{idp}_{r$cn} = {dfm}(sd_{id}[{J}] * ({Lcov}z_{id}[{J}]));\n" ) } str_add(out$pll_args) <- cglue( ", vector r_{idp}_{r$cn}" ) } out } # Stan code of smooth terms stan_sm <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- ifelse(normalize, "lpdf", "lupdf") out <- list() smef <- tidy_smef(bterms, data) if (!NROW(smef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { str_add(out$data) <- glue( " // data for splines\n", " int Ks{p}; // number of linear effects\n", " matrix[N{resp}, Ks{p}] Xs{p};", " // design matrix for the linear effects\n" ) str_add(out$pll_args) <- glue(", data matrix Xs{p}") str_add_list(out) <- stan_prior( prior, class = "b", coef = Xs_names, type = glue("vector[Ks{p}]"), suffix = glue("s{p}"), header_type = "vector", px = px, comment = "spline coefficients", normalize = normalize ) str_add(out$eta) <- glue(" + Xs{p}{slice} * bs{p}") } for (i in seq_rows(smef)) { pi <- glue("{p}_{i}") nb <- seq_len(smef$nbases[[i]]) str_add(out$data) <- glue( " // data for spline {smef$byterm[i]}\n", " int nb{pi}; // number of bases\n", " int knots{pi}[nb{pi}]; // number of knots\n" ) str_add(out$data) <- " // basis function matrices\n" str_add(out$data) <- cglue( " matrix[N{resp}, knots{pi}[{nb}]] Zs{pi}_{nb};\n" ) str_add(out$pll_args) <- cglue(", data matrix Zs{pi}_{nb}") str_add(out$par) <- glue( " // parameters for spline {smef$byterm[i]}\n" ) str_add(out$par) <- cglue( " // standarized spline coefficients\n", " vector[knots{pi}[{nb}]] zs{pi}_{nb};\n" ) for (j in nb) { str_add_list(out) <- stan_prior( prior, class = "sds", coef = smef$term[i], type = "real", coef_type = "real", suffix = glue("{pi}_{j}"), px = px, comment = "standard deviations of spline coefficients", normalize = normalize ) } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " // actual spline coefficients\n", " vector[knots{pi}[{nb}]] s{pi}_{nb};\n" ) str_add(out$tpar_comp) <- cglue( " // compute actual spline coefficients\n", " s{pi}_{nb} = sds{pi}_{nb} * zs{pi}_{nb};\n" ) str_add(out$pll_args) <- cglue(", vector s{pi}_{nb}") str_add(out$prior) <- cglue( " target += std_normal_{lpdf}(zs{pi}_{nb});\n" ) str_add(out$eta) <- cglue( " + Zs{pi}_{nb}{slice} * s{pi}_{nb}" ) } out } # Stan code for category specific effects # @note not implemented for non-linear models stan_cs <- function(bterms, data, prior, ranef, threads, normalize, ...) { out <- list() csef <- colnames(get_model_matrix(bterms$cs, data)) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(bterms$resp) slice <- stan_slice(threads) ranef <- subset2(ranef, type = "cs", ls = px) if (length(csef)) { str_add(out$data) <- glue( " int Kcs{p}; // number of category specific effects\n", " matrix[N{resp}, Kcs{p}] Xcs{p}; // category specific design matrix\n" ) str_add(out$pll_args) <- glue(", data matrix Xcs{p}") bound <- get_bound(prior, class = "b", px = px) str_add_list(out) <- stan_prior( prior, class = "b", coef = csef, type = glue("matrix{bound}[Kcs{p}, nthres{resp}]"), coef_type = glue("row_vector{bound}[nthres{resp}]"), suffix = glue("cs{p}"), px = px, broadcast = "matrix", header_type = "matrix", comment = "category specific effects", normalize = normalize ) str_add(out$model_def) <- glue( " // linear predictor for category specific effects\n", " matrix[N{resp}, nthres{resp}] mucs{p} = Xcs{p}{slice} * bcs{p};\n" ) } if (nrow(ranef)) { if (!length(csef)) { # only group-level category specific effects present str_add(out$model_def) <- glue( " // linear predictor for category specific effects\n", " matrix[N{resp}, nthres{resp}] mucs{p}", " = rep_matrix(0, N{resp}, nthres{resp});\n" ) } n <- stan_nn(threads) thres_regex <- "(?<=\\[)[[:digit:]]+(?=\\]$)" thres <- get_matches(thres_regex, ranef$coef, perl = TRUE) nthres <- max(as.numeric(thres)) mucs_loop <- "" for (i in seq_len(nthres)) { r_cat <- ranef[grepl(glue("\\[{i}\\]$"), ranef$coef), ] str_add(mucs_loop) <- glue( " mucs{p}[n, {i}] = mucs{p}[n, {i}]" ) for (id in unique(r_cat$id)) { r <- r_cat[r_cat$id == id, ] rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) str_add(mucs_loop) <- cglue( " + r_{idp}_{r$cn}[J_{idresp}{n}] * Z_{idp}_{r$cn}{n}" ) } str_add(mucs_loop) <- ";\n" } str_add(out$model_comp_eta_loop) <- glue( " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), mucs_loop, " }\n" ) } out } # Stan code for special effects stan_sp <- function(bterms, data, prior, stanvars, ranef, meef, threads, normalize, ...) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) return(out) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) n <- stan_nn(threads) ranef <- subset2(ranef, type = "sp", ls = px) spef_coef <- rename(spef$term) invalid_coef <- setdiff(ranef$coef, spef_coef) if (length(invalid_coef)) { stop2( "Special group-level terms require corresponding ", "population-level terms:\nOccured for ", collapse_comma(invalid_coef) ) } # prepare Stan code of the linear predictor component for (i in seq_rows(spef)) { eta <- spef$joint_call[[i]] if (!is.null(spef$calls_mo[[i]])) { new_mo <- glue("mo(simo{p}_{spef$Imo[[i]]}, Xmo{p}_{spef$Imo[[i]]}{n})") eta <- rename(eta, spef$calls_mo[[i]], new_mo) } if (!is.null(spef$calls_me[[i]])) { Kme <- seq_along(meef$term) Ime <- match(meef$grname, unique(meef$grname)) nme <- ifelse(nzchar(meef$grname), glue("[Jme_{Ime}{n}]"), n) new_me <- glue("Xme_{Kme}{nme}") eta <- rename(eta, meef$term, new_me) } if (!is.null(spef$calls_mi[[i]])) { is_na_idx <- is.na(spef$idx2_mi[[i]]) idx_mi <- glue("[idxl{p}_{spef$vars_mi[[i]]}_{spef$idx2_mi[[i]]}{n}]") idx_mi <- ifelse(is_na_idx, n, idx_mi) new_mi <- glue("Yl_{spef$vars_mi[[i]]}{idx_mi}") eta <- rename(eta, spef$calls_mi[[i]], new_mi) str_add(out$pll_args) <- glue(", vector Yl_{spef$vars_mi[[i]]}") } if (spef$Ic[i] > 0) { str_add(eta) <- glue(" * Csp{p}_{spef$Ic[i]}{n}") } r <- subset2(ranef, coef = spef_coef[i]) rpars <- str_if(nrow(r), cglue(" + {stan_eta_rsp(r)}")) str_add(out$loopeta) <- glue(" + (bsp{p}[{i}]{rpars}) * {eta}") } # prepare general Stan code ncovars <- max(spef$Ic) str_add(out$data) <- glue( " int Ksp{p}; // number of special effects terms\n" ) if (ncovars > 0L) { str_add(out$data) <- " // covariates of special effects terms\n" str_add(out$data) <- cglue( " vector[N{resp}] Csp{p}_{seq_len(ncovars)};\n" ) str_add(out$pll_args) <- cglue(", data vector Csp{p}_{seq_len(ncovars)}") } # include special Stan code for monotonic effects which_Imo <- which(lengths(spef$Imo) > 0) if (any(which_Imo)) { str_add(out$data) <- glue( " int Imo{p}; // number of monotonic variables\n", " int Jmo{p}[Imo{p}]; // length of simplexes\n" ) ids <- unlist(spef$ids_mo) lpdf <- stan_lpdf_name(normalize) for (i in which_Imo) { for (k in seq_along(spef$Imo[[i]])) { j <- spef$Imo[[i]][[k]] id <- spef$ids_mo[[i]][[k]] # index of first ID appearance j_id <- match(id, ids) str_add(out$data) <- glue( " int Xmo{p}_{j}[N{resp}]; // monotonic variable\n" ) str_add(out$pll_args) <- glue( ", int[] Xmo{p}_{j}, vector simo{p}_{j}" ) if (is.na(id) || j_id == j) { # no ID or first appearance of the ID str_add(out$data) <- glue( " vector[Jmo{p}[{j}]] con_simo{p}_{j};", " // prior concentration of monotonic simplex\n" ) str_add(out$par) <- glue( " simplex[Jmo{p}[{j}]] simo{p}_{j}; // monotonic simplex\n" ) str_add(out$prior) <- glue( " target += dirichlet_{lpdf}(simo{p}_{j} | con_simo{p}_{j});\n" ) } else { # use the simplex shared across all terms of the same ID str_add(out$tpar_def) <- glue( " simplex[Jmo{p}[{j}]] simo{p}_{j} = simo{p}_{j_id};\n" ) } } } } # include special Stan code for missing value terms uni_mi <- na.omit(attr(spef, "uni_mi")) for (j in seq_rows(uni_mi)) { idxl <- glue("idxl{p}_{uni_mi$var[j]}_{uni_mi$idx2[j]}") str_add(out$data) <- glue( " int {idxl}[N{resp}]; // matching indices\n" ) str_add(out$pll_args) <- glue(", data int[] {idxl}") } # prepare special effects coefficients bound <- get_bound(prior, class = "b", px = px) if (stan_assign_b_tpar(bterms, prior)) { str_add(out$tpar_def) <- glue( " // special effects coefficients\n", " vector{bound}[Ksp{p}] bsp{p};\n" ) } else { str_add_list(out) <- stan_prior( prior, class = "b", coef = spef$coef, type = glue("vector{bound}[Ksp{p}]"), coef_type = glue("real{bound}"), px = px, suffix = glue("sp{p}"), header_type = "vector", comment = "special effects coefficients", normalize = normalize ) } stan_special_priors <- stan_special_prior_local( prior, class = "bsp", ncoef = nrow(spef), px = px, center_X = FALSE, normalize = normalize ) out <- collapse_lists(out, stan_special_priors) out } # Stan code for latent gaussian processes stan_gp <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- stan_lpdf_name(normalize) out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) gpef <- tidy_gpef(bterms, data) # kernel methods cannot simply be split up into partial sums for (i in seq_rows(gpef)) { pi <- glue("{p}_{i}") byvar <- gpef$byvars[[i]] cons <- gpef$cons[[i]] byfac <- length(cons) > 0L bynum <- !is.null(byvar) && !byfac k <- gpef$k[i] is_approx <- !isNA(k) iso <- gpef$iso[i] gr <- gpef$gr[i] sfx1 <- gpef$sfx1[[i]] sfx2 <- gpef$sfx2[[i]] str_add(out$data) <- glue( " // data related to GPs\n", " int Kgp{pi};", " // number of sub-GPs (equal to 1 unless 'by' was used)\n", " int Dgp{pi}; // GP dimension\n" ) if (is_approx) { str_add(out$data) <- glue( " // number of basis functions of an approximate GP\n", " int NBgp{pi};\n" ) } str_add_list(out) <- stan_prior( prior, class = "sdgp", coef = sfx1, type = glue("vector[Kgp{pi}]"), coef_type = "real", px = px, suffix = pi, comment = "GP standard deviation parameters", normalize = normalize ) if (gpef$iso[i]) { lscale_type <- "vector[1]" lscale_dim <- glue("[Kgp{pi}]") lscale_comment <- "GP length-scale parameters" } else { lscale_type <- glue("vector[Dgp{pi}]") lscale_dim <- glue("[Kgp{pi}]") lscale_comment <- "GP length-scale parameters" } if (byfac) { J <- seq_along(cons) Ngp <- glue("Ngp{pi}") Nsubgp <- glue("N", str_if(gr, "sub"), glue("gp{pi}")) Igp <- glue("Igp{pi}_{J}") str_add(out$data) <- glue( " // number of observations relevant for a certain sub-GP\n", " int {Ngp}[Kgp{pi}];\n" ) str_add(out$data) <- " // indices and contrasts of sub-GPs per observation\n" str_add(out$data) <- cglue( " int {Igp}[{Ngp}[{J}]];\n", " vector[{Ngp}[{J}]] Cgp{pi}_{J};\n" ) str_add(out$pll_args) <- cglue( ", data int[] {Igp}, data vector Cgp{pi}_{J}" ) str_add_list(out) <- stan_prior( prior, class = "lscale", coef = sfx2, type = lscale_type, coef_type = "real", dim = lscale_dim, suffix = glue("{pi}"), px = px, comment = lscale_comment, normalize = normalize ) if (gr) { str_add(out$data) <- glue( " // number of latent GP groups\n", " int Nsubgp{pi}[Kgp{pi}];\n" ) str_add(out$data) <- cglue( " // indices of latent GP groups per observation\n", " int Jgp{pi}_{J}[{Ngp}[{J}]];\n" ) str_add(out$pll_args) <- cglue(", data int[] Jgp{pi}_{J}") } if (is_approx) { str_add(out$data) <- " // approximate GP basis matrices and eigenvalues\n" str_add(out$data) <- cglue( " matrix[{Nsubgp}[{J}], NBgp{pi}] Xgp{pi}_{J};\n", " vector[Dgp{pi}] slambda{pi}_{J}[NBgp{pi}];\n" ) str_add(out$par) <- " // latent variables of the GP\n" str_add(out$par) <- cglue( " vector[NBgp{pi}] zgp{pi}_{J};\n" ) str_add(out$model_no_pll_def) <- " // scale latent variables of the GP\n" str_add(out$model_no_pll_def) <- cglue( " vector[NBgp{pi}] rgp{pi}_{J} = sqrt(spd_cov_exp_quad(", "slambda{pi}_{J}, sdgp{pi}[{J}], lscale{pi}[{J}])) .* zgp{pi}_{J};\n" ) gp_call <- glue("Xgp{pi}_{J} * rgp{pi}_{J}") } else { # exact GPs str_add(out$data) <- " // covariates of the GP\n" str_add(out$data) <- cglue( " vector[Dgp{pi}] Xgp{pi}_{J}[{Nsubgp}[{J}]];\n" ) str_add(out$par) <- " // latent variables of the GP\n" str_add(out$par) <- cglue( " vector[{Nsubgp}[{J}]] zgp{pi}_{J};\n" ) gp_call <- glue( "gp(Xgp{pi}_{J}, sdgp{pi}[{J}], lscale{pi}[{J}], zgp{pi}_{J})" ) } slice2 <- "" Igp_sub <- Igp if (use_threading(threads)) { str_add(out$model_comp_basic) <- cglue( " int which_gp{pi}_{J}[size_range({Igp}, start, end)] =", " which_range({Igp}, start, end);\n" ) slice2 <- glue("[which_gp{pi}_{J}]") Igp_sub <- glue("start_at_one({Igp}{slice2}, start)") } # TODO: add all GP elements to 'eta' at the same time? eta <- combine_prefix(px, keep_mu = TRUE, nlp = TRUE) eta <- glue("{eta}[{Igp_sub}]") str_add(out$model_no_pll_def) <- cglue( " vector[{Nsubgp}[{J}]] gp_pred{pi}_{J} = {gp_call};\n" ) str_add(out$pll_args) <- cglue(", vector gp_pred{pi}_{J}") Cgp <- glue("Cgp{pi}_{J}{slice2} .* ") Jgp <- str_if(gr, glue("[Jgp{pi}_{J}{slice2}]"), slice) str_add(out$model_comp_basic) <- cglue( " {eta} += {Cgp}gp_pred{pi}_{J}{Jgp};\n" ) str_add(out$prior) <- cglue( "{tp()}std_normal_{lpdf}(zgp{pi}_{J});\n" ) } else { # no by-factor variable str_add_list(out) <- stan_prior( prior, class = "lscale", coef = sfx2, type = lscale_type, coef_type = "real", dim = lscale_dim, suffix = glue("{pi}"), px = px, comment = lscale_comment, normalize = normalize ) Nsubgp <- glue("N{resp}") if (gr) { Nsubgp <- glue("Nsubgp{pi}") str_add(out$data) <- glue( " // number of latent GP groups\n", " int {Nsubgp};\n", " // indices of latent GP groups per observation\n", " int Jgp{pi}[N{resp}];\n" ) str_add(out$pll_args) <- glue(", data int[] Jgp{pi}") } Cgp <- "" if (bynum) { str_add(out$data) <- glue( " // numeric by-variable of the GP\n", " vector[N{resp}] Cgp{pi};\n" ) str_add(out$pll_args) <- glue(", data vector Cgp{pi}") Cgp <- glue("Cgp{pi}{slice} .* ") } if (is_approx) { str_add(out$data) <- glue( " // approximate GP basis matrices\n", " matrix[{Nsubgp}, NBgp{pi}] Xgp{pi};\n", " // approximate GP eigenvalues\n", " vector[Dgp{pi}] slambda{pi}[NBgp{pi}];\n" ) str_add(out$par) <- glue( " vector[NBgp{pi}] zgp{pi}; // latent variables of the GP\n" ) str_add(out$model_no_pll_def) <- glue( " // scale latent variables of the GP\n", " vector[NBgp{pi}] rgp{pi} = sqrt(spd_cov_exp_quad(", "slambda{pi}, sdgp{pi}[1], lscale{pi}[1])) .* zgp{pi};\n" ) if (gr) { # grouping prevents GPs to be computed efficiently inside reduce_sum str_add(out$model_no_pll_def) <- glue( " vector[{Nsubgp}] gp_pred{pi} = Xgp{pi} * rgp{pi};\n" ) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}[Jgp{pi}{slice}]") str_add(out$pll_args) <- glue(", vector gp_pred{pi}") } else { # efficient computation of approx GPs inside reduce_sum is possible str_add(out$model_def) <- glue( " vector[N{resp}] gp_pred{pi} = Xgp{pi}{slice} * rgp{pi};\n" ) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}") str_add(out$pll_args) <- glue(", data matrix Xgp{pi}, vector rgp{pi}") } } else { # exact GPs str_add(out$data) <- glue( " vector[Dgp{pi}] Xgp{pi}[{Nsubgp}]; // covariates of the GP\n" ) str_add(out$par) <- glue( " vector[{Nsubgp}] zgp{pi}; // latent variables of the GP\n" ) gp_call <- glue("gp(Xgp{pi}, sdgp{pi}[1], lscale{pi}[1], zgp{pi})") # exact GPs are kernel based methods which # need to be computed outside of reduce_sum str_add(out$model_no_pll_def) <- glue( " vector[{Nsubgp}] gp_pred{pi} = {gp_call};\n" ) Jgp <- str_if(gr, glue("[Jgp{pi}{slice}]"), slice) str_add(out$eta) <- glue(" + {Cgp}gp_pred{pi}{Jgp}") str_add(out$pll_args) <- glue(", vector gp_pred{pi}") } str_add(out$prior) <- glue( "{tp()}std_normal_{lpdf}(zgp{pi});\n" ) } } out } # Stan code for the linear predictor of autocorrelation terms stan_ac <- function(bterms, data, prior, threads, normalize, ...) { lpdf <- stan_lpdf_name(normalize) out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) n <- stan_nn(threads) slice <- stan_slice(threads) has_natural_residuals <- has_natural_residuals(bterms) has_ac_latent_residuals <- has_ac_latent_residuals(bterms) acef <- tidy_acef(bterms, data) if (has_ac_latent_residuals) { # families that do not have natural residuals require latent # residuals for residual-based autocor structures err_msg <- "Latent residuals are not implemented" if (is.btnl(bterms)) { stop2(err_msg, " for non-linear models.") } str_add(out$par) <- glue( " vector[N{resp}] zerr{p}; // unscaled residuals\n" ) str_add_list(out) <- stan_prior( prior, class = "sderr", px = px, suffix = p, type = "real", comment = "SD of residuals", normalize = normalize ) str_add(out$tpar_def) <- glue( " vector[N{resp}] err{p}; // actual residuals\n" ) str_add(out$pll_args) <- glue(", vector err{p}") str_add(out$prior) <- glue( " target += std_normal_{lpdf}(zerr{p});\n" ) str_add(out$eta) <- glue(" + err{p}{slice}") } # validity of the autocor terms has already been checked in 'tidy_acef' acef_arma <- subset2(acef, class = "arma") if (NROW(acef_arma)) { if (use_threading(threads) && (!acef_arma$cov || has_natural_residuals)) { stop2("Threading is not supported for this ARMA model.") } str_add(out$data) <- glue( " // data needed for ARMA correlations\n", " int Kar{p}; // AR order\n", " int Kma{p}; // MA order\n" ) str_add(out$tdata_def) <- glue( " int max_lag{p} = max(Kar{p}, Kma{p});\n" ) if (!acef_arma$cov) { err_msg <- "Please set cov = TRUE in ARMA structures" if (is.formula(bterms$adforms$se)) { stop2(err_msg, " when including known standard errors.") } str_add(out$data) <- glue( " // number of lags per observation\n", " int J_lag{p}[N{resp}];\n" ) str_add(out$model_def) <- glue( " // matrix storing lagged residuals\n", " matrix[N{resp}, max_lag{p}] Err{p}", " = rep_matrix(0, N{resp}, max_lag{p});\n" ) if (has_natural_residuals) { str_add(out$model_def) <- glue( " vector[N{resp}] err{p}; // actual residuals\n" ) Y <- str_if(is.formula(bterms$adforms$mi), "Yl", "Y") comp_err <- glue(" err{p}[n] = {Y}{p}[n] - mu{p}[n];\n") } else { if (acef_arma$q > 0) { # AR and MA structures cannot be distinguished when # using a single vector of latent residuals stop2("Please set cov = TRUE when modeling MA structures ", "for this family.") } str_add(out$tpar_comp) <- glue( " // compute ctime-series residuals\n", " err{p} = sderr{p} * zerr{p};\n" ) comp_err <- "" } add_ar <- str_if(acef_arma$p > 0, glue(" mu{p}[n] += Err{p}[n, 1:Kar{p}] * ar{p};\n") ) add_ma <- str_if(acef_arma$q > 0, glue(" mu{p}[n] += Err{p}[n, 1:Kma{p}] * ma{p};\n") ) str_add(out$model_comp_arma) <- glue( " // include ARMA terms\n", " for (n in 1:N{resp}) {{\n", add_ma, comp_err, " for (i in 1:J_lag{p}[n]) {{\n", " Err{p}[n + 1, i] = err{p}[n + 1 - i];\n", " }}\n", add_ar, " }}\n" ) } # no boundaries are required in the conditional formulation # when natural residuals automatically define the scale need_arma_bound <- acef_arma$cov || has_ac_latent_residuals if (acef_arma$p > 0) { ar_bound <- str_if(need_arma_bound, "") str_add_list(out) <- stan_prior( prior, class = "ar", px = px, suffix = p, coef = seq_along(acef_arma$p), type = glue("vector{ar_bound}[Kar{p}]"), coef_type = glue("real{ar_bound}"), header_type = "vector", comment = "autoregressive coefficients", normalize = normalize ) } if (acef_arma$q > 0) { ma_bound <- str_if(need_arma_bound, "") str_add_list(out) <- stan_prior( prior, class = "ma", px = px, suffix = p, coef = seq_along(acef_arma$q), type = glue("vector{ma_bound}[Kma{p}]"), coef_type = glue("real{ma_bound}"), header_type = "vector", comment = "moving-average coefficients", normalize = normalize ) } } acef_cosy <- subset2(acef, class = "cosy") if (NROW(acef_cosy)) { # compound symmetry correlation structure # most code is shared with ARMA covariance models # cosy correlations may be negative in theory but # this causes problems divergent transitions (#878) # str_add(out$tdata_def) <- glue( # " real lb_cosy{p} = -1.0 / (max(nobs_tg{p}) - 1);", # " // lower bound of the cosy correlation\n" # ) str_add_list(out) <- stan_prior( prior, class = "cosy", px = px, suffix = p, type = glue("real"), comment = "compound symmetry correlation", normalize = normalize ) } acef_time_cov <- subset2(acef, dim = "time", cov = TRUE) if (NROW(acef_time_cov)) { # use correlation structures in covariance matrix parameterization # optional for ARMA models and obligatory for COSY models # can only model one covariance structure at a time stopifnot(NROW(acef_time_cov) == 1) str_add(out$data) <- glue( " // see the functions block for details\n", " int N_tg{p};\n", " int begin_tg{p}[N_tg{p}];\n", " int end_tg{p}[N_tg{p}];\n", " int nobs_tg{p}[N_tg{p}];\n" ) str_add(out$tdata_def) <- glue( " int max_nobs_tg{p} = max(nobs_tg{p});", " // maximum dimension of the autocorrelation matrix\n" ) if (!is.formula(bterms$adforms$se)) { str_add(out$tdata_def) <- glue( " // no known standard errors specified by the user\n", " vector[N{resp}] se2{p} = rep_vector(0.0, N{resp});\n" ) } str_add(out$tpar_def) <- glue( " // cholesky factor of the autocorrelation matrix\n", " matrix[max_nobs_tg{p}, max_nobs_tg{p}] chol_cor{p};\n" ) if (acef_time_cov$class == "arma") { if (acef_time_cov$p > 0 && acef_time_cov$q == 0) { cor_fun <- "ar1" cor_args <- glue("ar{p}[1]") } else if (acef_time_cov$p == 0 && acef_time_cov$q > 0) { cor_fun <- "ma1" cor_args <- glue("ma{p}[1]") } else { cor_fun <- "arma1" cor_args <- glue("ar{p}[1], ma{p}[1]") } } else if (acef_time_cov$class == "cosy") { cor_fun <- "cosy" cor_args <- glue("cosy{p}") } str_add(out$tpar_comp) <- glue( " // compute residual covariance matrix\n", " chol_cor{p} = cholesky_cor_{cor_fun}({cor_args}, max_nobs_tg{p});\n" ) if (has_ac_latent_residuals) { str_add(out$tpar_comp) <- glue( " // compute correlated time-series residuals\n", " err{p} = scale_time_err(", "zerr{p}, sderr{p}, chol_cor{p}, nobs_tg{p}, begin_tg{p}, end_tg{p});\n" ) } } acef_sar <- subset2(acef, class = "sar") if (NROW(acef_sar)) { if (!has_natural_residuals) { stop2("SAR terms are not implemented for this family.") } if (use_threading(threads)) { stop2("Threading is not supported for SAR models.") } str_add(out$data) <- glue( " matrix[N{resp}, N{resp}] Msar{p}; // spatial weight matrix\n", " vector[N{resp}] eigenMsar{p}; // eigenvalues of Msar{p}\n" ) str_add(out$tdata_def) <- glue( " // the eigenvalues define the boundaries of the SAR correlation\n", " real min_eigenMsar{p} = min(eigenMsar{p});\n", " real max_eigenMsar{p} = max(eigenMsar{p});\n" ) if (acef_sar$type == "lag") { str_add_list(out) <- stan_prior( prior, class = "lagsar", px = px, suffix = p, type = glue("real"), comment = "lag-SAR correlation parameter", normalize = normalize ) } else if (acef_sar$type == "error") { str_add_list(out) <- stan_prior( prior, class = "errorsar", px = px, suffix = p, type = glue("real"), comment = "error-SAR correlation parameter", normalize = normalize ) } } acef_car <- subset2(acef, class = "car") if (NROW(acef_car)) { if (is.btnl(bterms)) { stop2("CAR terms are not implemented for non-linear models.") } str_add(out$data) <- glue( " // data for the CAR structure\n", " int Nloc{p};\n", " int Jloc{p}[N{resp}];\n", " int Nedges{p};\n", " int edges1{p}[Nedges{p}];\n", " int edges2{p}[Nedges{p}];\n" ) str_add_list(out) <- stan_prior( prior, class = "sdcar", px = px, suffix = p, type = "real", comment = "SD of the CAR structure", normalize = normalize ) str_add(out$pll_args) <- glue(", vector rcar{p}, data int[] Jloc{p}") str_add(out$loopeta) <- glue(" + rcar{p}[Jloc{p}{n}]") if (acef_car$type %in% c("escar", "esicar")) { str_add(out$data) <- glue( " vector[Nloc{p}] Nneigh{p};\n", " vector[Nloc{p}] eigenMcar{p};\n" ) } if (acef_car$type == "escar") { str_add(out$par) <- glue( " vector[Nloc{p}] rcar{p};\n" ) str_add_list(out) <- stan_prior( prior, class = "car", px = px, suffix = p, type = "real", normalize = normalize ) car_args <- c( "car", "sdcar", "Nloc", "Nedges", "Nneigh", "eigenMcar", "edges1", "edges2" ) car_args <- paste0(car_args, p, collapse = ", ") str_add(out$prior) <- glue( " target += sparse_car_lpdf(\n", " rcar{p} | {car_args}\n", " );\n" ) } else if (acef_car$type == "esicar") { str_add(out$par) <- glue( " vector[Nloc{p} - 1] zcar{p};\n" ) str_add(out$tpar_def) <- glue( " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // sum-to-zero constraint\n", " rcar[1:(Nloc{p} - 1)] = zcar{p};\n", " rcar[Nloc{p}] = - sum(zcar{p});\n" ) car_args <- c( "sdcar", "Nloc", "Nedges", "Nneigh", "eigenMcar", "edges1", "edges2" ) car_args <- paste0(car_args, p, collapse = ", ") str_add(out$prior) <- glue( " target += sparse_icar_lpdf(\n", " rcar{p} | {car_args}\n", " );\n" ) } else if (acef_car$type %in% "icar") { # intrinsic car based on the case study of Mitzi Morris # http://mc-stan.org/users/documentation/case-studies/icar_stan.html str_add(out$par) <- glue( " // parameters for the ICAR structure\n", " vector[Nloc{p}] zcar{p};\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " // scaled parameters for the ICAR structure\n", " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // compute scaled parameters for the ICAR structure\n", " rcar{p} = zcar{p} * sdcar{p};\n" ) str_add(out$prior) <- glue( " // improper prior on the spatial CAR component\n", " target += -0.5 * dot_self(zcar{p}[edges1{p}] - zcar{p}[edges2{p}]);\n", " // soft sum-to-zero constraint\n", " target += normal_{lpdf}(sum(zcar{p}) | 0, 0.001 * Nloc{p});\n" ) } else if (acef_car$type == "bym2") { # BYM2 car based on the case study of Mitzi Morris # http://mc-stan.org/users/documentation/case-studies/icar_stan.html str_add(out$data) <- glue( " // scaling factor of the spatial CAR component\n", " real car_scale{p};\n" ) str_add(out$par) <- glue( " // parameters for the BYM2 structure\n", " vector[Nloc{p}] zcar{p}; // spatial part\n", " vector[Nloc{p}] nszcar{p}; // non-spatial part\n", " // proportion of variance in the spatial part\n" ) str_add_list(out) <- stan_prior( prior, class = "rhocar", px = px, suffix = p, type = "real", normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " // scaled parameters for the BYM2 structure\n", " vector[Nloc{p}] rcar{p};\n" ) str_add(out$tpar_comp) <- glue( " // join the spatial and the non-spatial CAR component\n", " rcar{p} = (sqrt(1 - rhocar{p}) * nszcar{p}", " + sqrt(rhocar{p} * inv(car_scale{p})) * zcar{p}) * sdcar{p};\n" ) str_add(out$prior) <- glue( " // improper prior on the spatial BYM2 component\n", " target += -0.5 * dot_self(zcar{p}[edges1{p}] - zcar{p}[edges2{p}]);\n", " // soft sum-to-zero constraint\n", " target += normal_{lpdf}(sum(zcar{p}) | 0, 0.001 * Nloc{p});\n", " // proper prior on the non-spatial BYM2 component\n", " target += std_normal_{lpdf}(nszcar{p});\n" ) } } acef_fcor <- subset2(acef, class = "fcor") if (NROW(acef_fcor)) { if (!has_natural_residuals) { stop2("FCOR terms are not implemented for this family.") } if (use_threading(threads)) { stop2("Threading is not supported for FCOR models.") } str_add(out$data) <- glue( " matrix[N{resp}, N{resp}] Mfcor{p}; // known residual covariance matrix\n" ) str_add(out$tdata_def) <- glue( " matrix[N{resp}, N{resp}] Lfcor{p} = cholesky_decompose(Mfcor{p});\n" ) } out } # stan code for offsets stan_offset <- function(bterms, threads, ...) { out <- list() if (is.formula(bterms$offset)) { p <- usc(combine_prefix(bterms)) resp <- usc(bterms$resp) slice <- stan_slice(threads) # use 'offsets' as 'offset' will be reserved in stanc3 str_add(out$data) <- glue( " vector[N{resp}] offsets{p};\n") str_add(out$pll_args) <- glue(", data vector offsets{p}") str_add(out$eta) <- glue(" + offsets{p}{slice}") } out } # Stan code for non-linear predictor terms # @param nlpars names of the non-linear parameters # @param ilink character vector of length 2 defining the link to be applied stan_nl <- function(bterms, data, nlpars, threads, ilink = rep("", 2), ...) { stopifnot(length(ilink) == 2L) out <- list() resp <- usc(bterms$resp) par <- combine_prefix(bterms, keep_mu = TRUE, nlp = TRUE) # prepare non-linear model n <- paste0(str_if(bterms$loop, "[n]"), " ") new_nlpars <- glue(" nlp{resp}_{nlpars}{n}") # covariates in the non-linear model covars <- all.vars(bterms$covars) new_covars <- NULL if (length(covars)) { p <- usc(combine_prefix(bterms)) new_covars <- rep(NA, length(covars)) data_cnl <- data_cnl(bterms, data) if (bterms$loop) { slice <- stan_nn(threads) } else { slice <- stan_slice(threads) } slice <- paste0(slice, " ") str_add(out$data) <- " // covariate vectors for non-linear functions\n" for (i in seq_along(covars)) { is_integer <- is.integer(data_cnl[[glue("C{p}_{i}")]]) if (is_integer) { str_add(out$data) <- glue( " int C{p}_{i}[N{resp}];\n" ) str_add(out$pll_args) <- glue(", data int[] C{p}_{i}") } else { str_add(out$data) <- glue( " vector[N{resp}] C{p}_{i};\n" ) str_add(out$pll_args) <- glue(", data vector C{p}_{i}") } new_covars[i] <- glue(" C{p}_{i}{slice}") } } # add white spaces to be able to replace parameters and covariates syms <- c( "+", "-", "*", "/", "%", "^", ".*", "./", "'", ")", "(", ",", "==", "!=", "<=", ">=", "<", ">", "!", "&&", "||" ) regex <- glue("(? Nme_{i}; // number of latent values\n", " int Jme_{i}[N]; // group index per observation\n" ) str_add(out$pll_args) <- glue(", data int[] Jme_{i}") } else { Nme <- "N" } str_add(out$data) <- glue( " int Mme_{i}; // number of groups\n" ) str_add(out$data) <- cglue( " vector[{Nme}] Xn_{K}; // noisy values\n", " vector[{Nme}] noise_{K}; // measurement noise\n" ) str_add_list(out) <- stan_prior( prior, "meanme", coef = coefs[K], suffix = usc(i), type = glue("vector[Mme_{i}]"), comment = "latent means", normalize = normalize ) str_add_list(out) <- stan_prior( prior, "sdme", coef = coefs[K], suffix = usc(i), type = glue("vector[Mme_{i}]"), coef_type = "real", comment = "latent SDs", normalize = normalize ) str_add(out$prior) <- cglue( " target += normal_{lpdf}(Xn_{K} | Xme_{K}, noise_{K});\n" ) if (meef$cor[K[1]] && length(K) > 1L) { str_add(out$data) <- glue( " int NCme_{i}; // number of latent correlations\n" ) str_add(out$par) <- glue( " matrix[Mme_{i}, {Nme}] zme_{i}; // standardized latent values\n" ) str_add_list(out) <- stan_prior( prior, "Lme", group = g, suffix = usc(i), type = glue("cholesky_factor_corr[Mme_{i}]"), comment = "cholesky factor of the latent correlation matrix", normalize = normalize ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- glue( " matrix[{Nme}, Mme_{i}] Xme{i}; // actual latent values\n" ) str_add(out$tpar_comp) <- glue( " // compute actual latent values\n", " Xme{i} = rep_matrix(transpose(meanme_{i}), {Nme})", " + transpose(diag_pre_multiply(sdme_{i}, Lme_{i}) * zme_{i});\n" ) str_add(out$tpar_def) <- cglue( " // using separate vectors increases efficiency\n", " vector[{Nme}] Xme_{K};\n" ) str_add(out$tpar_comp) <- cglue( " Xme_{K} = Xme{i}[, {J}];\n" ) str_add(out$pll_args) <- cglue(", vector Xme_{K}") str_add(out$prior) <- glue( " target += std_normal_{lpdf}(to_vector(zme_{i}));\n" ) str_add(out$gen_def) <- cglue( " // obtain latent correlation matrix\n", " corr_matrix[Mme_{i}] Corme_{i}", " = multiply_lower_tri_self_transpose(Lme_{i});\n", " vector[NCme_{i}] corme_{i};\n" ) str_add(out$gen_comp) <- stan_cor_gen_comp( cor = glue("corme_{i}"), ncol = glue("Mme_{i}") ) } else { str_add(out$par) <- cglue( " vector[{Nme}] zme_{K}; // standardized latent values\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- cglue( " vector[{Nme}] Xme_{K}; // actual latent values\n" ) str_add(out$tpar_comp) <- cglue( " // compute actual latent values\n", " Xme_{K} = meanme_{i}[{J}] + sdme_{i}[{J}] * zme_{K};\n" ) str_add(out$pll_args) <- cglue(", vector Xme_{K}") str_add(out$prior) <- cglue( " target += std_normal_{lpdf}(zme_{K});\n" ) } } out } # initialize and compute a linear predictor term in Stan language # @param out list of character strings containing Stan code # @param bterms btl object # @param ranef output of tidy_ranef # @param primitive use Stan's GLM likelihood primitives? # @param ilink character vector of length 2 defining the link to be applied # @param ... currently unused # @return list of character strings containing Stan code stan_eta_combine <- function(out, bterms, ranef, threads, primitive, ilink = c("", ""), ...) { stopifnot(is.list(out), is.btl(bterms), length(ilink) == 2L) if (primitive && !has_special_terms(bterms)) { # only overall effects and perhaps an intercept are present # which will be evaluated directly in the GLM primitive likelihood return(out) } px <- check_prefix(bterms) resp <- usc(bterms$resp) eta <- combine_prefix(px, keep_mu = TRUE, nlp = TRUE) out$eta <- sub("^[ \t\r\n]+\\+", "", out$eta, perl = TRUE) str_add(out$model_def) <- glue( " // initialize linear predictor term\n", " vector[N{resp}] {eta} ={out$eta};\n" ) out$eta <- NULL str_add(out$loopeta) <- stan_eta_re(ranef, threads = threads, px = px) if (nzchar(out$loopeta)) { # parts of eta are computed in a loop over observations out$loopeta <- sub("^[ \t\r\n]+\\+", "", out$loopeta, perl = TRUE) str_add(out$model_comp_eta_loop) <- glue( " for (n in 1:N{resp}) {{\n", " // add more terms to the linear predictor\n", stan_nn_def(threads), " {eta}[n] +={out$loopeta};\n", " }}\n" ) } out$loopeta <- NULL # possibly transform eta before it is passed to the likelihood if (sum(nzchar(ilink))) { # make sure mu comes last as it might depend on other parameters is_mu <- isTRUE("mu" %in% dpar_class(bterms[["dpar"]])) position <- str_if(is_mu, "model_comp_mu_link", "model_comp_dpar_link") str_add(out[[position]]) <- glue( " for (n in 1:N{resp}) {{\n", " // apply the inverse link function\n", " {eta}[n] = {ilink[1]}{eta}[n]{ilink[2]};\n", " }}\n" ) } out } # define Stan code to compute the fixef part of eta # @param fixef names of the population-level effects # @param bterms object of class 'btl' # @param primitive use Stan's GLM likelihood primitives? # @return a single character string stan_eta_fe <- function(fixef, bterms, threads, primitive) { if (length(fixef) && !primitive) { p <- usc(combine_prefix(bterms)) center_X <- stan_center_X(bterms) decomp <- get_decomp(bterms$fe) sparse <- is_sparse(bterms$fe) if (sparse) { stopifnot(!center_X && decomp == "none") csr_args <- sargs( paste0(c("rows", "cols"), "(X", p, ")"), paste0(c("wX", "vX", "uX", "b"), p) ) eta_fe <- glue("csr_matrix_times_vector({csr_args})") } else { sfx_X <- sfx_b <- "" if (decomp == "QR") { sfx_X <- sfx_b <- "Q" } else if (center_X) { sfx_X <- "c" } slice <- stan_slice(threads) eta_fe <- glue("X{sfx_X}{p}{slice} * b{sfx_b}{p}") } } else { resp <- usc(bterms$resp) eta_fe <- glue("rep_vector(0.0, N{resp})") } glue(" + {eta_fe}") } # write the group-level part of the linear predictor # @return a single character string stan_eta_re <- function(ranef, threads, px = list()) { eta_re <- "" n <- stan_nn(threads) ranef <- subset2(ranef, type = c("", "mmc"), ls = px) for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) if (r$gtype[1] == "mm") { ng <- seq_along(r$gcall[[1]]$groups) for (i in seq_rows(r)) { str_add(eta_re) <- cglue( " + W_{idresp[i]}_{ng}{n}", " * r_{idp[i]}_{r$cn[i]}[J_{idresp[i]}_{ng}{n}]", " * Z_{idp[i]}_{r$cn[i]}_{ng}{n}" ) } } else { str_add(eta_re) <- cglue( " + r_{idp}_{r$cn}[J_{idresp}{n}] * Z_{idp}_{r$cn}{n}" ) } } eta_re } # Stan code for group-level parameters in special predictor terms # @param r data.frame created by tidy_ranef # @return a character vector: one element per row of 'r' stan_eta_rsp <- function(r) { stopifnot(nrow(r) > 0L, length(unique(r$gtype)) == 1L) rpx <- check_prefix(r) idp <- paste0(r$id, usc(combine_prefix(rpx))) idresp <- paste0(r$id, usc(rpx$resp)) if (r$gtype[1] == "mm") { ng <- seq_along(r$gcall[[1]]$groups) out <- rep("", nrow(r)) for (i in seq_along(out)) { out[i] <- glue( "W_{idresp[i]}_{ng}[n] * r_{idp[i]}_{r$cn[i]}[J_{idresp[i]}_{ng}[n]]", collapse = " + " ) } } else { out <- glue("r_{idp}_{r$cn}[J_{idresp}[n]]") } out } # does eta need to be transformed manually using the link functions # @param family the model family # @param cens_or_trunc is the model censored or truncated? stan_eta_transform <- function(family, bterms) { transeta <- "transeta" %in% family_info(family, "specials") no_transform <- family$link == "identity" && !transeta || has_joint_link(family) && !is.customfamily(family) !no_transform && !stan_has_built_in_fun(family, bterms) } # correctly apply inverse link to eta # @param dpar name of the parameter for which to define the link # @param bterms object of class 'brmsterms' # @param resp name of the response variable # @return a single character string stan_eta_ilink <- function(dpar, bterms, resp = "") { stopifnot(is.brmsterms(bterms)) out <- rep("", 2) family <- bterms$dpars[[dpar]]$family if (stan_eta_transform(family, bterms)) { dpar_id <- dpar_id(dpar) pred_dpars <- names(bterms$dpars) shape <- glue("shape{dpar_id}") n_shape <- str_if(shape %in% pred_dpars, "[n]") shape <- glue("{shape}{resp}{n_shape}") nu <- glue("nu{dpar_id}") n_nu <- str_if(nu %in% pred_dpars, "[n]") nu <- glue("{nu}{resp}{n_nu}") family_link <- str_if( family$family %in% c("gamma", "hurdle_gamma", "exponential"), paste0(family$family, "_", family$link), family$family ) ilink <- stan_ilink(family$link) out <- switch(family_link, c(glue("{ilink}("), ")"), gamma_log = c(glue("{shape} * exp(-("), "))"), gamma_inverse = c(glue("{shape} * ("), ")"), gamma_identity = c(glue("{shape} / ("), ")"), hurdle_gamma_log = c(glue("{shape} * exp(-("), "))"), hurdle_gamma_inverse = c(glue("{shape} * ("), ")"), hurdle_gamma_identity = c(glue("{shape} / ("), ")"), exponential_log = c("exp(-(", "))"), exponential_inverse = c("(", ")"), exponential_identity = c("inv(", ")"), weibull = c(glue("{ilink}("), glue(") / tgamma(1 + 1 / {shape})")), frechet = c(glue("{ilink}("), glue(") / tgamma(1 - 1 / {nu})")) ) } out } # indicate if the population-level design matrix should be centered # implies a temporary shift in the intercept of the model stan_center_X <- function(x) { is.btl(x) && !no_center(x$fe) && has_intercept(x$fe) && !fix_intercepts(x) && !is_sparse(x$fe) && !has_sum_to_zero_thres(x) } # indicate if the overall coefficients 'b' should be # assigned in the transformed parameters block stan_assign_b_tpar <- function(bterms, prior) { special <- get_special_prior(prior, bterms) !is.null(special$horseshoe) || !is.null(special$R2D2) } # default Stan definitions for distributional parameters # @param dpar name of a distributional parameter # @param suffix optional suffix of the parameter name # @param family optional brmsfamily object # @param fixed should the parameter be fixed to a certain value? stan_dpar_types <- function(dpar, suffix = "", family = NULL, fixed = FALSE) { dpar <- as_one_character(dpar) suffix <- as_one_character(suffix) fixed <- as_one_logical(fixed) if (is.mixfamily(family)) { if (dpar_class(dpar) == "theta") { return("") # theta is handled in stan_mixture } family <- family$mix[[as.numeric(dpar_id(dpar))]] } if (is.customfamily(family)) { dpar_class <- dpar_class(dpar) lb <- family$lb[[dpar_class]] ub <- family$ub[[dpar_class]] lb <- if (!is.na(lb)) glue("lower={lb}") ub <- if (!is.na(ub)) glue("upper={ub}") bounds <- paste0(c(lb, ub), collapse = ",") if (nzchar(bounds)) bounds <- glue("<{bounds}>") return(glue("real{bounds}")) } if (fixed) { min_Y <- glue("min(Y{suffix})") } else { min_Y <- glue("min_Y{suffix}") } default_types <- list( sigma = list( type = "real", comment = "dispersion parameter" ), shape = list( type = "real", comment = "shape parameter" ), nu = list( type = "real", comment = "degrees of freedom or shape" ), phi = list( type = "real", comment = "precision parameter" ), kappa = list( type = "real", comment = "precision parameter" ), beta = list( type = "real", comment = "scale parameter" ), zi = list( type = "real", comment = "zero-inflation probability" ), hu = list( type = "real", comment = "hurdle probability" ), zoi = list( type = "real", comment = "zero-one-inflation probability" ), coi = list( type = "real", comment = "conditional one-inflation probability" ), bs = list( type = "real", comment = "boundary separation parameter" ), ndt = list( type = glue("real"), comment = "non-decision time parameter" ), bias = list( type = "real", comment = "initial bias parameter" ), disc = list( type = "real", comment = "discrimination parameters" ), quantile = list( type = "real", comment = "quantile parameter" ), xi = list( type = "real", comment = "shape parameter" ), alpha = list( type = "real", comment = "skewness parameter" ) ) out <- "" types <- default_types[[dpar_class(dpar)]] if (!is.null(types)) { out <- types$type attr(out, "comment") <- types$comment } out } # default Stan definitions for temporary distributional parameters stan_dpar_tmp_types <- function(dpar, suffix = "", family = NULL) { dpar <- as_one_character(dpar) suffix <- as_one_character(suffix) if (is.mixfamily(family)) { family <- family$mix[[as.numeric(dpar_id(dpar))]] } if (is.customfamily(family)) { return("") # no temporary parameters in custom families } default_types <- list( xi = list( type = "real", comment = "unscaled shape parameter" ) ) out <- "" types <- default_types[[dpar_class(dpar)]] if (!is.null(types)) { out <- types$type attr(out, "comment") <- types$comment } out } # Stan code for transformations of distributional parameters stan_dpar_transform <- function(bterms, threads, ...) { stopifnot(is.brmsterms(bterms)) out <- list() families <- family_names(bterms) p <- usc(combine_prefix(bterms)) resp <- usc(bterms$resp) if (any(conv_cats_dpars(families))) { str_add(out$model_def) <- glue( " // linear predictor matrix\n", " vector[ncat{p}] mu{p}[N{resp}];\n" ) mu_dpars <- make_stan_names(glue("mu{bterms$family$cats}")) mu_dpars <- glue("{mu_dpars}{p}[n]") iref <- match(bterms$family$refcat, bterms$family$cats) mu_dpars[iref] <- "0" str_add(out$model_comp_catjoin) <- glue( " for (n in 1:N{resp}) {{\n", " mu{p}[n] = {stan_vector(mu_dpars)};\n", " }}\n" ) } if (any(families %in% "skew_normal")) { # as suggested by Stephen Martin use sigma and mu of CP # but the skewness parameter alpha of DP dp_names <- names(bterms$dpars) for (i in which(families %in% "skew_normal")) { id <- str_if(length(families) == 1L, "", i) sigma <- stan_sigma_transform(bterms, id = id, threads = threads) ns <- str_if(grepl(stan_nn_regex(), sigma), "[n]") na <- str_if(glue("alpha{id}") %in% dp_names, "[n]") type_delta <- str_if(nzchar(na), glue("vector[N{resp}]"), "real") no <- str_if(any(nzchar(c(ns, na))), "[n]", "") type_omega <- str_if(nzchar(no), glue("vector[N{resp}]"), "real") str_add(out$model_def) <- glue( " // parameters used to transform the skew-normal distribution\n", " {type_delta} delta{id}{p}; // transformed alpha parameter\n", " {type_omega} omega{id}{p}; // scale parameter\n" ) alpha <- glue("alpha{id}{p}{na}") delta <- glue("delta{id}{p}{na}") omega <- glue("omega{id}{p}{no}") comp_delta <- glue( " {delta} = {alpha} / sqrt(1 + {alpha}^2);\n" ) comp_omega <- glue( " {omega} = {sigma} / sqrt(1 - sqrt(2 / pi())^2 * {delta}^2);\n" ) str_add(out$model_comp_dpar_trans) <- glue( " // use efficient skew-normal parameterization\n", str_if(!nzchar(na), comp_delta), str_if(!nzchar(no), comp_omega), " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), str_if(nzchar(na), glue(" ", comp_delta)), str_if(nzchar(no), glue(" ", comp_omega)), " mu{id}{p}[n] = mu{id}{p}[n]", " - {omega} * {delta} * sqrt(2 / pi());\n", " }}\n" ) } } if (any(families %in% "gen_extreme_value")) { dp_names <- c(names(bterms$dpars), names(bterms$fdpars)) for (i in which(families %in% "gen_extreme_value")) { id <- str_if(length(families) == 1L, "", i) xi <- glue("xi{id}") if (!xi %in% dp_names) { str_add(out$model_def) <- glue( " real {xi}; // scaled shape parameter\n" ) sigma <- glue("sigma{id}") sfx <- str_if(sigma %in% names(bterms$dpars), "_vector") args <- sargs( glue("tmp_{xi}"), glue("Y{p}"), glue("mu{id}{p}"), glue("{sigma}{p}") ) str_add(out$model_comp_dpar_trans) <- glue( " {xi}{p} = scale_xi{sfx}({args});\n" ) } } } out } # Stan code for sigma to incorporate addition argument 'se' stan_sigma_transform <- function(bterms, id = "", threads = NULL) { if (nzchar(id)) { # find the right family in mixture models family <- family_names(bterms)[as.integer(id)] } else { family <- bterms$family$family stopifnot(!isTRUE(family == "mixture")) } p <- usc(combine_prefix(bterms)) ns <- str_if(glue("sigma{id}") %in% names(bterms$dpars), "[n]") has_sigma <- has_sigma(family) && !no_sigma(bterms) sigma <- str_if(has_sigma, glue("sigma{id}{p}{ns}")) if (is.formula(bterms$adforms$se)) { nse <- stan_nn(threads) sigma <- str_if(nzchar(sigma), glue("sqrt(square({sigma}) + se2{p}{nse})"), glue("se{p}{nse}") ) } sigma } brms/R/prepare_predictions.R0000644000175000017500000013422614111751666015754 0ustar nileshnilesh#' @export #' @rdname prepare_predictions prepare_predictions.brmsfit <- function( x, newdata = NULL, re_formula = NULL, allow_new_levels = FALSE, sample_new_levels = "uncertainty", incl_autocor = TRUE, oos = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, nug = NULL, smooths_only = FALSE, offset = TRUE, newdata2 = NULL, new_objects = NULL, point_estimate = NULL, ... ) { x <- restructure(x) # allows functions to fall back to old default behavior # which was used when originally fitting the model options(.brmsfit_version = x$version$brms) on.exit(options(.brmsfit_version = NULL)) snl_options <- c("uncertainty", "gaussian", "old_levels") sample_new_levels <- match.arg(sample_new_levels, snl_options) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) warn_brmsfit_multiple(x, newdata = newdata) newdata2 <- use_alias(newdata2, new_objects) x <- exclude_terms( x, incl_autocor = incl_autocor, offset = offset, smooths_only = smooths_only ) resp <- validate_resp(resp, x) draw_ids <- validate_draw_ids(x, draw_ids, ndraws) draws <- as_draws_matrix(x) draws <- suppressMessages(subset_draws(draws, draw = draw_ids)) draws <- point_draws(draws, point_estimate) new_formula <- update_re_terms(x$formula, re_formula) bterms <- brmsterms(new_formula) ranef <- tidy_ranef(bterms, x$data) meef <- tidy_meef(bterms, x$data) new <- !is.null(newdata) sdata <- standata( x, newdata = newdata, re_formula = re_formula, newdata2 = newdata2, resp = resp, allow_new_levels = allow_new_levels, internal = TRUE, ... ) prep_ranef <- prepare_predictions_ranef( ranef = ranef, draws = draws, sdata = sdata, resp = resp, old_ranef = x$ranef, sample_new_levels = sample_new_levels, ) prepare_predictions( bterms, draws = draws, sdata = sdata, data = x$data, prep_ranef = prep_ranef, meef = meef, resp = resp, sample_new_levels = sample_new_levels, nug = nug, new = new, oos = oos, stanvars = x$stanvars ) } prepare_predictions.mvbrmsterms <- function(x, draws, sdata, resp = NULL, ...) { resp <- validate_resp(resp, x$responses) if (length(resp) > 1) { if (has_subset(x)) { stop2("Argument 'resp' must be a single variable name ", "for models using addition argument 'draw_ids'.") } out <- list(ndraws = nrow(draws), nobs = sdata$N) out$resps <- named_list(resp) out$old_order <- attr(sdata, "old_order") for (r in resp) { out$resps[[r]] <- prepare_predictions( x$terms[[r]], draws = draws, sdata = sdata, ... ) } if (x$rescor) { out$family <- out$resps[[1]]$family out$family$fun <- paste0(out$family$family, "_mv") rescor <- get_cornames(resp, type = "rescor", brackets = FALSE) out$mvpars$rescor <- prepare_draws(draws, rescor) if (out$family$family == "student") { # store in out$dpars so that get_dpar can be called on nu out$dpars$nu <- as.vector(prepare_draws(draws, "nu")) } out$data$N <- out$resps[[1]]$data$N out$data$weights <- out$resps[[1]]$data$weights Y <- lapply(out$resps, function(x) x$data$Y) out$data$Y <- do_call(cbind, Y) } out <- structure(out, class = "mvbrmsprep") } else { out <- prepare_predictions( x$terms[[resp]], draws = draws, sdata = sdata, ... ) } out } #' @export prepare_predictions.brmsterms <- function(x, draws, sdata, data, ...) { data <- subset_data(data, x) ndraws <- nrow(draws) nobs <- sdata[[paste0("N", usc(x$resp))]] resp <- usc(combine_prefix(x)) out <- nlist(ndraws, nobs, resp = x$resp) out$family <- prepare_family(x) out$old_order <- attr(sdata, "old_order") valid_dpars <- valid_dpars(x) out$dpars <- named_list(valid_dpars) for (dp in valid_dpars) { dp_regex <- paste0("^", dp, resp, "$") if (is.btl(x$dpars[[dp]]) || is.btnl(x$dpars[[dp]])) { out$dpars[[dp]] <- prepare_predictions( x$dpars[[dp]], draws = draws, sdata = sdata, data = data, ... ) } else if (any(grepl(dp_regex, colnames(draws)))) { out$dpars[[dp]] <- as.vector(prepare_draws(draws, dp_regex, regex = TRUE)) } else if (is.numeric(x$fdpars[[dp]]$value)) { # fixed dpars are stored as regular draws as of brms 2.12.9 # so this manual extraction is only required for older models out$dpars[[dp]] <- x$fdpars[[dp]]$value } } out$nlpars <- named_list(names(x$nlpars)) for (nlp in names(x$nlpars)) { out$nlpars[[nlp]] <- prepare_predictions( x$nlpars[[nlp]], draws = draws, sdata = sdata, data = data, ... ) } if (is.mixfamily(x$family)) { families <- family_names(x$family) thetas <- paste0("theta", seq_along(families)) if (any(ulapply(out$dpars[thetas], is.list))) { # theta was predicted missing_id <- which(ulapply(out$dpars[thetas], is.null)) out$dpars[[paste0("theta", missing_id)]] <- structure( data2draws(0, c(ndraws, nobs)), predicted = TRUE ) } else { # theta was not predicted out$dpars$theta <- do_call(cbind, out$dpars[thetas]) out$dpars[thetas] <- NULL if (nrow(out$dpars$theta) == 1L) { dim <- c(nrow(draws), ncol(out$dpars$theta)) out$dpars$theta <- data2draws(out$dpars$theta, dim = dim) } } } if (is_ordinal(x$family)) { # it is better to handle ordinal thresholds outside the # main predictor term in particular for use in custom families if (is.mixfamily(x$family)) { mu_pars <- str_subset(names(x$dpars), "^mu[[:digit:]]+") for (mu in mu_pars) { out$thres[[mu]] <- prepare_predictions_thres(x$dpars[[mu]], draws, sdata, ...) } } else { out$thres <- prepare_predictions_thres(x$dpars$mu, draws, sdata, ...) } } if (is_cox(x$family)) { # prepare baseline hazard functions for the Cox model if (is.mixfamily(x$family)) { mu_pars <- str_subset(names(x$dpars), "^mu[[:digit:]]+") for (mu in mu_pars) { out$bhaz[[mu]] <- prepare_predictions_bhaz( x$dpars[[mu]], draws, sdata, ... ) } } else { out$bhaz <- prepare_predictions_bhaz(x$dpars$mu, draws, sdata, ...) } } # response category names for categorical and ordinal models out$cats <- get_cats(x) # only include those autocor draws on the top-level # of the output which imply covariance matrices on natural residuals out$ac <- prepare_predictions_ac(x$dpars$mu, draws, sdata, nat_cov = TRUE, ...) out$data <- prepare_predictions_data(x, sdata = sdata, data = data, ...) structure(out, class = "brmsprep") } #' @export prepare_predictions.btnl <- function(x, draws, sdata, ...) { out <- list( family = x$family, nlform = x$formula[[2]], ndraws = nrow(draws), nobs = sdata[[paste0("N", usc(x$resp))]], used_nlpars = x$used_nlpars, loop = x$loop ) class(out) <- "bprepnl" p <- usc(combine_prefix(x)) covars <- all.vars(x$covars) dim <- c(out$ndraws, out$nobs) for (i in seq_along(covars)) { cvalues <- sdata[[paste0("C", p, "_", i)]] out$C[[covars[i]]] <- data2draws(cvalues, dim = dim) } out } #' @export prepare_predictions.btl <- function(x, draws, sdata, ...) { ndraws <- nrow(draws) nobs <- sdata[[paste0("N", usc(x$resp))]] out <- nlist(family = x$family, ndraws, nobs) class(out) <- "bprepl" out$fe <- prepare_predictions_fe(x, draws, sdata, ...) out$sp <- prepare_predictions_sp(x, draws, sdata, ...) out$cs <- prepare_predictions_cs(x, draws, sdata, ...) out$sm <- prepare_predictions_sm(x, draws, sdata, ...) out$gp <- prepare_predictions_gp(x, draws, sdata, ...) out$re <- prepare_predictions_re(x, sdata, ...) out$ac <- prepare_predictions_ac(x, draws, sdata, nat_cov = FALSE, ...) out$offset <- prepare_predictions_offset(x, sdata, ...) out } # prepare predictions of ordinary population-level effects prepare_predictions_fe <- function(bterms, draws, sdata, ...) { out <- list() if (is.null(bterms[["fe"]])) { return(out) } p <- usc(combine_prefix(bterms)) X <- sdata[[paste0("X", p)]] fixef <- colnames(X) if (length(fixef)) { out$X <- X b_pars <- paste0("b", p, "_", fixef) out$b <- prepare_draws(draws, b_pars) } out } # prepare predictions of special effects terms prepare_predictions_sp <- function(bterms, draws, sdata, data, meef = empty_meef(), new = FALSE, ...) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) { return(out) } p <- usc(combine_prefix(bterms)) resp <- usc(bterms$resp) # prepare calls evaluated in sp_predictor out$calls <- vector("list", nrow(spef)) for (i in seq_along(out$calls)) { call <- spef$joint_call[[i]] if (!is.null(spef$calls_mo[[i]])) { new_mo <- paste0(".mo(simo_", spef$Imo[[i]], ", Xmo_", spef$Imo[[i]], ")") call <- rename(call, spef$calls_mo[[i]], new_mo) } if (!is.null(spef$calls_me[[i]])) { new_me <- paste0("Xme_", seq_along(meef$term)) call <- rename(call, meef$term, new_me) } if (!is.null(spef$calls_mi[[i]])) { is_na_idx <- is.na(spef$idx2_mi[[i]]) idx_mi <- paste0("idxl", p, "_", spef$vars_mi[[i]], "_", spef$idx2_mi[[i]]) idx_mi <- ifelse(is_na_idx, "", paste0("[, ", idx_mi, "]")) new_mi <- paste0("Yl_", spef$vars_mi[[i]], idx_mi) call <- rename(call, spef$calls_mi[[i]], new_mi) } if (spef$Ic[i] > 0) { str_add(call) <- paste0(" * Csp_", spef$Ic[i]) } out$calls[[i]] <- parse(text = paste0(call)) } # extract general data and parameters for special effects bsp_pars <- paste0("bsp", p, "_", spef$coef) out$bsp <- prepare_draws(draws, bsp_pars) colnames(out$bsp) <- spef$coef # prepare predictions specific to monotonic effects simo_coef <- get_simo_labels(spef) Jmo <- sdata[[paste0("Jmo", p)]] out$simo <- out$Xmo <- named_list(simo_coef) for (i in seq_along(simo_coef)) { J <- seq_len(Jmo[i]) simo_par <- paste0("simo", p, "_", simo_coef[i], "[", J, "]") out$simo[[i]] <- prepare_draws(draws, simo_par) out$Xmo[[i]] <- sdata[[paste0("Xmo", p, "_", i)]] } # prepare predictions specific to noise-free effects warn_me <- FALSE if (nrow(meef)) { save_mevars <- any(grepl("^Xme_", colnames(draws))) warn_me <- warn_me || !new && !save_mevars out$Xme <- named_list(meef$coef) Xme_regex <- paste0("^Xme_", escape_all(meef$coef), "\\[") Xn <- sdata[paste0("Xn_", seq_rows(meef))] noise <- sdata[paste0("noise_", seq_rows(meef))] groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) if (nzchar(g)) { Jme <- sdata[[paste0("Jme_", i)]] } if (!new && save_mevars) { # extract original draws of latent variables for (k in K) { out$Xme[[k]] <- prepare_draws(draws, Xme_regex[k], regex = TRUE) } } else { # sample new values of latent variables if (nzchar(g)) { # TODO: reuse existing levels in predictions? # represent all indices between 1 and length(unique(Jme)) Jme <- as.numeric(factor(Jme)) me_dim <- c(nrow(out$bsp), max(Jme)) } else { me_dim <- c(nrow(out$bsp), sdata$N) } for (k in K) { dXn <- data2draws(Xn[[k]], me_dim) dnoise <- data2draws(noise[[k]], me_dim) out$Xme[[k]] <- array(rnorm(prod(me_dim), dXn, dnoise), me_dim) remove(dXn, dnoise) } } if (nzchar(g)) { for (k in K) { out$Xme[[k]] <- out$Xme[[k]][, Jme, drop = FALSE] } } } } # prepare predictions specific to missing value variables dim <- c(nrow(out$bsp), sdata[[paste0("N", resp)]]) vars_mi <- unique(unlist(spef$vars_mi)) if (length(vars_mi)) { # we know at this point that the model is multivariate Yl_names <- paste0("Yl_", vars_mi) out$Yl <- named_list(Yl_names) for (i in seq_along(out$Yl)) { vmi <- vars_mi[i] dim_y <- c(nrow(out$bsp), sdata[[paste0("N_", vmi)]]) Y <- data2draws(sdata[[paste0("Y_", vmi)]], dim_y) sdy <- sdata[[paste0("noise_", vmi)]] if (is.null(sdy)) { # missings only out$Yl[[i]] <- Y if (!new) { Ymi_regex <- paste0("^Ymi_", escape_all(vmi), "\\[") Ymi <- prepare_draws(draws, Ymi_regex, regex = TRUE) Jmi <- sdata[[paste0("Jmi_", vmi)]] out$Yl[[i]][, Jmi] <- Ymi } } else { # measurement-error in the response save_mevars <- any(grepl("^Yl_", colnames(draws))) if (save_mevars && !new) { Ymi_regex <- paste0("^Yl_", escape_all(vmi), "\\[") out$Yl[[i]] <- prepare_draws(draws, Ymi_regex, regex = TRUE) } else { warn_me <- warn_me || !new sdy <- data2draws(sdy, dim) out$Yl[[i]] <- rcontinuous( n = prod(dim), dist = "norm", mean = Y, sd = sdy, lb = sdata[[paste0("lbmi_", vmi)]], ub = sdata[[paste0("ubmi_", vmi)]] ) out$Yl[[i]] <- array(out$Yl[[i]], dim_y) } } } # extract index variables belonging to mi terms uni_mi <- na.omit(attr(spef, "uni_mi")) idxl_vars <- paste0("idxl", p, "_", uni_mi$var, "_", uni_mi$idx2) out$idxl <- sdata[idxl_vars] } if (warn_me) { warning2( "Noise-free latent variables were not saved. ", "You can control saving those variables via 'save_pars()'. ", "Treating original data as if it was new data as a workaround." ) } # prepare covariates ncovars <- max(spef$Ic) out$Csp <- vector("list", ncovars) for (i in seq_len(ncovars)) { out$Csp[[i]] <- sdata[[paste0("Csp", p, "_", i)]] out$Csp[[i]] <- data2draws(out$Csp[[i]], dim = dim) } out } # prepare predictions of category specific effects prepare_predictions_cs <- function(bterms, draws, sdata, data, ...) { out <- list() if (!is_ordinal(bterms$family)) { return(out) } resp <- usc(bterms$resp) out$nthres <- sdata[[paste0("nthres", resp)]] csef <- colnames(get_model_matrix(bterms$cs, data)) if (length(csef)) { p <- usc(combine_prefix(bterms)) cs_pars <- paste0("^bcs", p, "_", escape_all(csef), "\\[") out$bcs <- prepare_draws(draws, cs_pars, regex = TRUE) out$Xcs <- sdata[[paste0("Xcs", p)]] } out } # prepare predictions of smooth terms prepare_predictions_sm <- function(bterms, draws, sdata, data, ...) { out <- list() smef <- tidy_smef(bterms, data) if (!NROW(smef)) { return(out) } p <- usc(combine_prefix(bterms)) Xs_names <- attr(smef, "Xs_names") if (length(Xs_names)) { out$fe$Xs <- sdata[[paste0("Xs", p)]] # allow for "b_" prefix for compatibility with version <= 2.5.0 bspars <- paste0("^bs?", p, "_", escape_all(Xs_names), "$") out$fe$bs <- prepare_draws(draws, bspars, regex = TRUE) } out$re <- named_list(smef$label) for (i in seq_rows(smef)) { sm <- list() for (j in seq_len(smef$nbases[i])) { sm$Zs[[j]] <- sdata[[paste0("Zs", p, "_", i, "_", j)]] spars <- paste0("^s", p, "_", smef$label[i], "_", j, "\\[") sm$s[[j]] <- prepare_draws(draws, spars, regex = TRUE) } out$re[[i]] <- sm } out } # prepare predictions for Gaussian processes # @param new is new data used? # @param nug small numeric value to avoid numerical problems in GPs prepare_predictions_gp <- function(bterms, draws, sdata, data, new = FALSE, nug = NULL, ...) { gpef <- tidy_gpef(bterms, data) if (!nrow(gpef)) { return(list()) } p <- usc(combine_prefix(bterms)) if (is.null(nug)) { # nug for old data must be the same as in the Stan code as even tiny # differences (e.g., 1e-12 vs. 1e-11) will matter for larger lscales nug <- ifelse(new, 1e-8, 1e-12) } out <- named_list(gpef$label) for (i in seq_along(out)) { cons <- gpef$cons[[i]] if (length(cons)) { gp <- named_list(cons) for (j in seq_along(cons)) { gp[[j]] <- .prepare_predictions_gp( gpef, draws = draws, sdata = sdata, nug = nug, new = new, byj = j, p = p, i = i ) } attr(gp, "byfac") <- TRUE } else { gp <- .prepare_predictions_gp( gpef, draws = draws, sdata = sdata, nug = nug, new = new, p = p, i = i ) } out[[i]] <- gp } out } # prepare predictions for Gaussian processes # @param gpef output of tidy_gpef # @param p prefix created by combine_prefix() # @param i indiex of the Gaussian process # @param byj index for the contrast of a categorical 'by' variable # @return a list to be evaluated by .predictor_gp() .prepare_predictions_gp <- function(gpef, draws, sdata, nug, new, p, i, byj = NULL) { sfx1 <- escape_all(gpef$sfx1[[i]]) sfx2 <- escape_all(gpef$sfx2[[i]]) if (is.null(byj)) { lvl <- "" } else { lvl <- gpef$bylevels[[i]][byj] sfx1 <- sfx1[byj] sfx2 <- sfx2[byj, ] } j <- usc(byj) pi <- paste0(p, "_", i) gp <- list() sdgp <- paste0("^sdgp", p, "_", sfx1, "$") gp$sdgp <- as.vector(prepare_draws(draws, sdgp, regex = TRUE)) lscale <- paste0("^lscale", p, "_", sfx2, "$") gp$lscale <- prepare_draws(draws, lscale, regex = TRUE) zgp_regex <- paste0("^zgp", p, "_", sfx1, "\\[") gp$zgp <- prepare_draws(draws, zgp_regex, regex = TRUE) Xgp_name <- paste0("Xgp", pi, j) Igp_name <- paste0("Igp", pi, j) Jgp_name <- paste0("Jgp", pi, j) if (new && isNA(gpef$k[i])) { # in exact GPs old covariate values are required for predictions gp$x <- sdata[[paste0(Xgp_name, "_old")]] # nug for old data must be the same as in the Stan code as even tiny # differences (e.g., 1e-12 vs. 1e-11) will matter for larger lscales gp$nug <- 1e-12 # computing GPs for new data requires the old GP terms gp$yL <- .predictor_gp(gp) gp$x_new <- sdata[[Xgp_name]] gp$Igp <- sdata[[Igp_name]] } else { gp$x <- sdata[[Xgp_name]] gp$Igp <- sdata[[Igp_name]] if (!isNA(gpef$k[i])) { gp$slambda <- sdata[[paste0("slambda", pi, j)]] } } gp$Jgp <- sdata[[Jgp_name]] # possible factor from 'by' variable gp$Cgp <- sdata[[paste0("Cgp", pi, j)]] gp$nug <- nug gp } # prepare predictions for all group level effects # needs to be separate from 'prepare_predictions_re' to take correlations # across responses and distributional parameters into account (#779) # @param ranef output of 'tidy_ranef' based on the new formula and old data # @param old_ranef same as 'ranef' but based on the original formula # @return a named list with one element per group containing posterior draws # of levels used in the data as well as additional meta-data prepare_predictions_ranef <- function(ranef, draws, sdata, old_ranef, resp = NULL, sample_new_levels = "uncertainty", ...) { if (!nrow(ranef)) { return(list()) } # ensures subsetting 'ranef' by 'resp' works correctly resp <- resp %||% "" groups <- unique(ranef$group) out <- named_list(groups, list()) for (g in groups) { # prepare general variables related to group g ranef_g <- subset2(ranef, group = g) old_ranef_g <- subset2(old_ranef, group = g) used_levels <- attr(sdata, "levels")[[g]] old_levels <- attr(old_ranef, "levels")[[g]] nlevels <- length(old_levels) nranef <- nrow(ranef_g) # prepare draws of group-level effects rpars <- paste0("^r_", g, "(__.+)?\\[") rdraws <- prepare_draws(draws, rpars, regex = TRUE) if (!length(rdraws)) { stop2( "Group-level coefficients of group '", g, "' not found. ", "You can control saving those coefficients via 'save_pars()'." ) } # only prepare predictions of effects specified in the new formula cols_match <- c("coef", "resp", "dpar", "nlpar") used_rpars <- which(find_rows(old_ranef_g, ls = ranef_g[cols_match])) used_rpars <- outer(seq_len(nlevels), (used_rpars - 1) * nlevels, "+") used_rpars <- as.vector(used_rpars) rdraws <- rdraws[, used_rpars, drop = FALSE] rdraws <- column_to_row_major_order(rdraws, nranef) # prepare data required for indexing parameters gtype <- ranef_g$gtype[1] resp_g <- intersect(ranef_g$resp, resp)[1] # any valid ID works here as J and W are independent of the ID id <- subset2(ranef_g, resp = resp)$id[1] idresp <- paste0(id, usc(resp_g)) if (gtype == "mm") { ngf <- length(ranef_g$gcall[[1]]$groups) gf <- sdata[paste0("J_", idresp, "_", seq_len(ngf))] weights <- sdata[paste0("W_", idresp, "_", seq_len(ngf))] } else { gf <- sdata[paste0("J_", idresp)] weights <- list(rep(1, length(gf[[1]]))) } # generate draws for new levels args_new_rdraws <- nlist( ranef = ranef_g, gf, used_levels, old_levels, rdraws = rdraws, draws, sample_new_levels ) new_rdraws <- do_call(get_new_rdraws, args_new_rdraws) max_level <- attr(new_rdraws, "max_level") gf <- attr(new_rdraws, "gf") rdraws <- cbind(rdraws, new_rdraws) # keep only those levels actually used in the current data levels <- unique(unlist(gf)) rdraws <- subset_levels(rdraws, levels, nranef) # store all information required in 'prepare_predictions_re' out[[g]]$ranef <- ranef_g out[[g]]$rdraws <- rdraws out[[g]]$levels <- levels out[[g]]$nranef <- nranef out[[g]]$max_level <- max_level out[[g]]$gf <- gf out[[g]]$weights <- weights } out } # prepare predictions of group-level effects # @param prep_ranef a named list with one element per group containing # posterior draws of levels as well as additional meta-data prepare_predictions_re <- function(bterms, sdata, prep_ranef = list(), sample_new_levels = "uncertainty", ...) { out <- list() if (!length(prep_ranef)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) ranef_px <- lapply(prep_ranef, "[[", "ranef") ranef_px <- do_call(rbind, ranef_px) ranef_px <- subset2(ranef_px, ls = px) if (!NROW(ranef_px)) { return(out) } groups <- unique(ranef_px$group) # assigning S4 objects requires initialisation of list elements out[c("Z", "Zsp", "Zcs")] <- list(named_list(groups)) for (g in groups) { # extract variables specific to group 'g' ranef_g <- prep_ranef[[g]]$ranef ranef_g_px <- subset2(ranef_g, ls = px) rdraws <- prep_ranef[[g]]$rdraws nranef <- prep_ranef[[g]]$nranef levels <- prep_ranef[[g]]$levels max_level <- prep_ranef[[g]]$max_level gf <- prep_ranef[[g]]$gf weights <- prep_ranef[[g]]$weights # TODO: define 'select' according to parameter names not by position # store draws and corresponding data in the output # special group-level terms (mo, me, mi) ranef_g_px_sp <- subset2(ranef_g_px, type = "sp") if (nrow(ranef_g_px_sp)) { Z <- matrix(1, length(gf[[1]])) out[["Zsp"]][[g]] <- prepare_Z(Z, gf, max_level, weights) for (co in ranef_g_px_sp$coef) { # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & ranef_g$coef == co & ranef_g$type == "sp" select <- which(select) select <- select + nranef * (seq_along(levels) - 1) out[["rsp"]][[co]][[g]] <- rdraws[, select, drop = FALSE] } } # category specific group-level terms ranef_g_px_cs <- subset2(ranef_g_px, type = "cs") if (nrow(ranef_g_px_cs)) { # all categories share the same Z matrix ranef_g_px_cs_1 <- ranef_g_px_cs[grepl("\\[1\\]$", ranef_g_px_cs$coef), ] Znames <- paste0("Z_", ranef_g_px_cs_1$id, p, "_", ranef_g_px_cs_1$cn) Z <- do_call(cbind, sdata[Znames]) out[["Zcs"]][[g]] <- prepare_Z(Z, gf, max_level, weights) for (i in seq_len(sdata$nthres)) { index <- paste0("\\[", i, "\\]$") # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & grepl(index, ranef_g$coef) & ranef_g$type == "cs" select <- which(select) select <- as.vector(outer(select, nranef * (seq_along(levels) - 1), "+")) out[["rcs"]][[g]][[i]] <- rdraws[, select, drop = FALSE] } } # basic group-level terms ranef_g_px_basic <- subset2(ranef_g_px, type = c("", "mmc")) if (nrow(ranef_g_px_basic)) { Znames <- paste0("Z_", ranef_g_px_basic$id, p, "_", ranef_g_px_basic$cn) if (ranef_g_px_basic$gtype[1] == "mm") { ng <- length(ranef_g_px_basic$gcall[[1]]$groups) Z <- vector("list", ng) for (k in seq_len(ng)) { Z[[k]] <- do_call(cbind, sdata[paste0(Znames, "_", k)]) } } else { Z <- do_call(cbind, sdata[Znames]) } out[["Z"]][[g]] <- prepare_Z(Z, gf, max_level, weights) # select from all varying effects of that group select <- find_rows(ranef_g, ls = px) & ranef_g$type %in% c("", "mmc") select <- which(select) select <- as.vector(outer(select, nranef * (seq_along(levels) - 1), "+")) out[["r"]][[g]] <- rdraws[, select, drop = FALSE] } } out } # prepare predictions of autocorrelation parameters # @param nat_cov extract terms for covariance matrices of natural residuals? prepare_predictions_ac <- function(bterms, draws, sdata, oos = NULL, nat_cov = FALSE, new = FALSE, ...) { out <- list() nat_cov <- as_one_logical(nat_cov) acef <- tidy_acef(bterms) acef <- subset2(acef, nat_cov = nat_cov) if (!NROW(acef)) { return(out) } out$acef <- acef p <- usc(combine_prefix(bterms)) out$N_tg <- sdata[[paste0("N_tg", p)]] if (has_ac_class(acef, "arma")) { acef_arma <- subset2(acef, class = "arma") out$Y <- sdata[[paste0("Y", p)]] if (!is.null(oos)) { if (any(oos > length(out$Y))) { stop2("'oos' should not contain integers larger than N.") } # .predictor_arma has special behavior for NA responses out$Y[oos] <- NA } out$J_lag <- sdata[[paste0("J_lag", p)]] if (acef_arma$p > 0) { ar_regex <- paste0("^ar", p, "\\[") out$ar <- prepare_draws(draws, ar_regex, regex = TRUE) } if (acef_arma$q > 0) { ma_regex <- paste0("^ma", p, "\\[") out$ma <- prepare_draws(draws, ma_regex, regex = TRUE) } } if (has_ac_class(acef, "cosy")) { cosy_regex <- paste0("^cosy", p, "$") out$cosy <- prepare_draws(draws, cosy_regex, regex = TRUE) } if (use_ac_cov_time(acef)) { # prepare predictions for the covariance structures of time-series models out$begin_tg <- sdata[[paste0("begin_tg", p)]] out$end_tg <- sdata[[paste0("end_tg", p)]] } if (has_ac_latent_residuals(bterms)) { err_regex <- paste0("^err", p, "\\[") has_err <- any(grepl(err_regex, colnames(draws))) if (has_err && !new) { out$err <- prepare_draws(draws, err_regex, regex = TRUE) } else { if (!use_ac_cov_time(acef)) { stop2("Cannot predict new latent residuals ", "when using cov = FALSE in autocor terms.") } # need to sample correlated residuals out$err <- matrix(nrow = nrow(draws), ncol = length(out$Y)) sderr_regex <- paste0("^sderr", p, "$") out$sderr <- prepare_draws(draws, sderr_regex, regex = TRUE) for (i in seq_len(out$N_tg)) { obs <- with(out, begin_tg[i]:end_tg[i]) zeros <- rep(0, length(obs)) cov <- get_cov_matrix_ac(list(ac = out), obs, latent = TRUE) .err <- function(s) rmulti_normal(1, zeros, Sigma = cov[s, , ]) out$err[, obs] <- rblapply(seq_rows(draws), .err) } } } if (has_ac_class(acef, "sar")) { lagsar_regex <- paste0("^lagsar", p, "$") errorsar_regex <- paste0("^errorsar", p, "$") out$lagsar <- prepare_draws(draws, lagsar_regex, regex = TRUE) out$errorsar <- prepare_draws(draws, errorsar_regex, regex = TRUE) out$Msar <- sdata[[paste0("Msar", p)]] } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") if (new && acef_car$gr == "NA") { stop2("Without a grouping factor, CAR models cannot handle newdata.") } gcar <- sdata[[paste0("Jloc", p)]] Zcar <- matrix(rep(1, length(gcar))) out$Zcar <- prepare_Z(Zcar, list(gcar)) rcar_regex <- paste0("^rcar", p, "\\[") rcar <- prepare_draws(draws, rcar_regex, regex = TRUE) rcar <- rcar[, unique(gcar), drop = FALSE] out$rcar <- rcar } if (has_ac_class(acef, "fcor")) { out$Mfcor <- sdata[[paste0("Mfcor", p)]] } out } prepare_predictions_offset <- function(bterms, sdata, ...) { p <- usc(combine_prefix(bterms)) sdata[[paste0("offsets", p)]] } # prepare predictions of ordinal thresholds prepare_predictions_thres <- function(bterms, draws, sdata, ...) { out <- list() if (!is_ordinal(bterms$family)) { return(out) } resp <- usc(bterms$resp) out$nthres <- sdata[[paste0("nthres", resp)]] out$Jthres <- sdata[[paste0("Jthres", resp)]] p <- usc(combine_prefix(bterms)) thres_regex <- paste0("^b", p, "_Intercept\\[") out$thres <- prepare_draws(draws, thres_regex, regex = TRUE) out } # prepare predictions of baseline functions for the cox model prepare_predictions_bhaz <- function(bterms, draws, sdata, ...) { if (!is_cox(bterms$family)) { return(NULL) } out <- list() p <- usc(combine_prefix(bterms)) sbhaz_regex <- paste0("^sbhaz", p) sbhaz <- prepare_draws(draws, sbhaz_regex, regex = TRUE) Zbhaz <- sdata[[paste0("Zbhaz", p)]] out$bhaz <- tcrossprod(sbhaz, Zbhaz) Zcbhaz <- sdata[[paste0("Zcbhaz", p)]] out$cbhaz <- tcrossprod(sbhaz, Zcbhaz) out } # extract data mainly related to the response variable prepare_predictions_data <- function(bterms, sdata, data, stanvars = NULL, ...) { resp <- usc(combine_prefix(bterms)) vars <- c( "Y", "trials", "ncat", "nthres", "se", "weights", "denom", "dec", "cens", "rcens", "lb", "ub" ) vars <- paste0(vars, resp) vars <- intersect(vars, names(sdata)) # variables of variable length need to be handled via regular expression escaped_resp <- escape_all(resp) vl_vars <- c("vreal", "vint") vl_vars <- regex_or(vl_vars) vl_vars <- paste0("^", vl_vars, "[[:digit:]]+", escaped_resp, "$") vl_vars <- str_subset(names(sdata), vl_vars) vars <- union(vars, vl_vars) out <- sdata[vars] # remove resp suffix from names to simplify post-processing names(out) <- sub(paste0(escaped_resp, "$"), "", names(out)) if (length(stanvars)) { stopifnot(is.stanvars(stanvars)) out[names(stanvars)] <- sdata[names(stanvars)] } out } # choose number of observations to be used in post-processing methods choose_N <- function(prep) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) if (!is.null(prep$ac$N_tg)) prep$ac$N_tg else prep$nobs } # create pseudo brmsprep objects for components of mixture models # @param comp the mixture component number # @param draw_ids see predict_mixture pseudo_prep_for_mixture <- function(prep, comp, draw_ids = NULL) { stopifnot(is.brmsprep(prep), is.mixfamily(prep$family)) if (!is.null(draw_ids)) { ndraws <- length(draw_ids) } else { ndraws <- prep$ndraws } out <- list( family = prep$family$mix[[comp]], ndraws = ndraws, nobs = prep$nobs, data = prep$data ) out$family$fun <- out$family$family for (dp in valid_dpars(out$family)) { out$dpars[[dp]] <- prep$dpars[[paste0(dp, comp)]] if (length(draw_ids) && length(out$dpars[[dp]]) > 1L) { out$dpars[[dp]] <- p(out$dpars[[dp]], draw_ids, row = TRUE) } } if (is_ordinal(out$family)) { out$thres <- prep$thres[[paste0("mu", comp)]] } if (is_cox(out$family)) { out$bhaz <- prep$bhaz[[paste0("mu", comp)]] } # weighting should happen after computing the mixture out$data$weights <- NULL structure(out, class = "brmsprep") } # take relevant cols of a matrix of group-level terms # if only a subset of levels is provided (for newdata) # @param x a matrix typically draws of r or Z design matrices # draws need to be stored in row major order # @param levels grouping factor levels to keep # @param nranef number of group-level effects subset_levels <- function(x, levels, nranef) { take_levels <- ulapply(levels, function(l) ((l - 1) * nranef + 1):(l * nranef) ) x[, take_levels, drop = FALSE] } # transform x from column to row major order # rows represent levels and columns represent effects # @param x a matrix of draws of group-level parameters # @param nranef number of group-level effects column_to_row_major_order <- function(x, nranef) { nlevels <- ncol(x) / nranef sort_levels <- ulapply(seq_len(nlevels), function(l) seq(l, ncol(x), by = nlevels) ) x[, sort_levels, drop = FALSE] } # prepare group-level design matrices for use in 'predictor' # @param Z (list of) matrices to be prepared # @param gf (list of) vectors containing grouping factor values # @param weights optional (list of) weights of the same length as gf # @param max_level maximal level of 'gf' # @return a sparse matrix representation of Z prepare_Z <- function(Z, gf, max_level = NULL, weights = NULL) { if (!is.list(Z)) { Z <- list(Z) } if (!is.list(gf)) { gf <- list(gf) } if (is.null(weights)) { weights <- rep(1, length(gf[[1]])) } if (!is.list(weights)) { weights <- list(weights) } if (is.null(max_level)) { max_level <- max(unlist(gf)) } levels <- unique(unlist(gf)) nranef <- ncol(Z[[1]]) Z <- mapply( expand_matrix, A = Z, x = gf, weights = weights, MoreArgs = nlist(max_level) ) Z <- Reduce("+", Z) subset_levels(Z, levels, nranef) } # expand a matrix into a sparse matrix of higher dimension # @param A matrix to be expanded # @param x levels to expand the matrix # @param max_level maximal number of levels that x can take on # @param weights weights to apply to rows of A before expanding # @param a sparse matrix of dimension nrow(A) x (ncol(A) * max_level) expand_matrix <- function(A, x, max_level = max(x), weights = 1) { stopifnot(is.matrix(A)) stopifnot(length(x) == nrow(A)) stopifnot(all(is_wholenumber(x) & x > 0)) stopifnot(length(weights) %in% c(1, nrow(A), prod(dim(A)))) A <- A * as.vector(weights) K <- ncol(A) i <- rep(seq_along(x), each = K) make_j <- function(n, K, x) K * (x[n] - 1) + 1:K j <- ulapply(seq_along(x), make_j, K = K, x = x) Matrix::sparseMatrix( i = i, j = j, x = as.vector(t(A)), dims = c(nrow(A), ncol(A) * max_level) ) } # generate draws for new group levels # @param ranef 'ranef_frame' object of only a single grouping variable # @param gf list of vectors of level indices in the current data # @param rdraws matrix of group-level draws in row major order # @param used_levels names of levels used in the current data # @param old_levels names of levels used in the original data # @param sample_new_levels specifies the way in which new draws are generated # @param draws optional matrix of draws from all model parameters # @return a matrix of draws for new group levels get_new_rdraws <- function(ranef, gf, rdraws, used_levels, old_levels, sample_new_levels, draws = NULL) { snl_options <- c("uncertainty", "gaussian", "old_levels") sample_new_levels <- match.arg(sample_new_levels, snl_options) g <- unique(ranef$group) stopifnot(length(g) == 1L) stopifnot(is.list(gf)) used_by_per_level <- attr(used_levels, "by") old_by_per_level <- attr(old_levels, "by") new_levels <- setdiff(used_levels, old_levels) nranef <- nrow(ranef) nlevels <- length(old_levels) max_level <- nlevels out <- vector("list", length(gf)) for (i in seq_along(gf)) { has_new_levels <- any(gf[[i]] > nlevels) if (has_new_levels) { new_indices <- sort(setdiff(gf[[i]], seq_len(nlevels))) out[[i]] <- matrix(NA, nrow(rdraws), nranef * length(new_indices)) if (sample_new_levels == "uncertainty") { for (j in seq_along(new_indices)) { # selected levels need to be the same for all varying effects # to correctly take their correlations into account if (length(old_by_per_level)) { # select from all levels matching the 'by' variable new_by <- used_by_per_level[used_levels == new_levels[j]] possible_levels <- old_levels[old_by_per_level == new_by] possible_levels <- which(old_levels %in% possible_levels) sel_levels <- sample(possible_levels, NROW(rdraws), TRUE) } else { # select from all levels sel_levels <- sample(seq_len(nlevels), NROW(rdraws), TRUE) } for (k in seq_len(nranef)) { for (s in seq_rows(rdraws)) { sel <- (sel_levels[s] - 1) * nranef + k out[[i]][s, (j - 1) * nranef + k] <- rdraws[s, sel] } } } } else if (sample_new_levels == "old_levels") { for (j in seq_along(new_indices)) { # choose an existing person to take the parameters from if (length(old_by_per_level)) { # select from all levels matching the 'by' variable new_by <- used_by_per_level[used_levels == new_levels[j]] possible_levels <- old_levels[old_by_per_level == new_by] possible_levels <- which(old_levels %in% possible_levels) sel_level <- sample(possible_levels, 1) } else { # select from all levels sel_level <- sample(seq_len(nlevels), 1) } for (k in seq_len(nranef)) { sel <- (sel_level - 1) * nranef + k out[[i]][, (j - 1) * nranef + k] <- rdraws[, sel] } } } else if (sample_new_levels == "gaussian") { if (any(!ranef$dist %in% "gaussian")) { stop2("Option sample_new_levels = 'gaussian' is not ", "available for non-gaussian group-level effects.") } for (j in seq_along(new_indices)) { # extract hyperparameters used to compute the covariance matrix if (length(old_by_per_level)) { new_by <- used_by_per_level[used_levels == new_levels[j]] rnames <- as.vector(get_rnames(ranef, bylevels = new_by)) } else { rnames <- get_rnames(ranef) } sd_pars <- paste0("sd_", g, "__", rnames) sd_draws <- prepare_draws(draws, sd_pars) cor_type <- paste0("cor_", g) cor_pars <- get_cornames(rnames, cor_type, brackets = FALSE) cor_draws <- matrix(0, nrow(sd_draws), length(cor_pars)) for (k in seq_along(cor_pars)) { if (cor_pars[k] %in% colnames(draws)) { cor_draws[, k] <- prepare_draws(draws, cor_pars[k]) } } cov_matrix <- get_cov_matrix(sd_draws, cor_draws) # sample new levels from the normal distribution # implied by the covariance matrix indices <- ((j - 1) * nranef + 1):(j * nranef) out[[i]][, indices] <- t(apply( cov_matrix, 1, rmulti_normal, n = 1, mu = rep(0, length(sd_pars)) )) } } max_level <- max_level + length(new_indices) } else { out[[i]] <- matrix(nrow = nrow(rdraws), ncol = 0) } } out <- do_call(cbind, out) structure(out, gf = gf, max_level = max_level) } # prepare draws of selected variables prepare_draws <- function(x, variable, ...) { x <- subset_draws(x, variable = variable, ...) # brms still assumes standard dropping behavior in many places # and so keeping the posterior format is dangerous at the moment unclass_draws(x) } # compute point estimates of posterior draws # currently used primarily for 'loo_subsample' # @param draws matrix of posterior draws # @param point_estimate optional name of the point estimate to be computed # @return a draws_matrix with one row point_draws <- function(draws, point_estimate = NULL) { if (is.null(point_estimate)) { return(draws) } point_estimate <- match.arg(point_estimate, c("mean", "median")) variables <- colnames(draws) if (point_estimate == "mean") { draws <- matrixStats::colMeans2(draws) } else if (point_estimate == "median") { draws <- matrixStats::colMedians(draws) } draws <- t(draws) colnames(draws) <- variables as_draws_matrix(draws) } is.brmsprep <- function(x) { inherits(x, "brmsprep") } is.mvbrmsprep <- function(x) { inherits(x, "mvbrmsprep") } is.bprepl <- function(x) { inherits(x, "bprepl") } is.bprepnl <- function(x) { inherits(x, "bprepnl") } #' Prepare Predictions #' #' This method helps in preparing \pkg{brms} models for certin post-processing #' tasks most notably various forms of predictions. Unless you are a package #' developer, you will rarely need to call \code{prepare_predictions} directly. #' #' @name prepare_predictions #' @aliases prepare_predictions.brmsfit extract_draws #' #' @param x An \R object typically of class \code{'brmsfit'}. #' @param newdata An optional data.frame for which to evaluate predictions. If #' \code{NULL} (default), the original data of the model is used. #' \code{NA} values within factors are interpreted as if all dummy #' variables of this factor are zero. This allows, for instance, to make #' predictions of the grand mean when using sum coding. #' @param re_formula formula containing group-level effects to be considered in #' the prediction. If \code{NULL} (default), include all group-level effects; #' if \code{NA}, include no group-level effects. #' @param allow_new_levels A flag indicating if new levels of group-level #' effects are allowed (defaults to \code{FALSE}). Only relevant if #' \code{newdata} is provided. #'@param sample_new_levels Indicates how to sample new levels for grouping #' factors specified in \code{re_formula}. This argument is only relevant if #' \code{newdata} is provided and \code{allow_new_levels} is set to #' \code{TRUE}. If \code{"uncertainty"} (default), each posterior sample for a #' new level is drawn from the posterior draws of a randomly chosen existing #' level. Each posterior sample for a new level may be drawn from a different #' existing level such that the resulting set of new posterior draws #' represents the variation across existing levels. If \code{"gaussian"}, #' sample new levels from the (multivariate) normal distribution implied by the #' group-level standard deviations and correlations. This options may be useful #' for conducting Bayesian power analysis or predicting new levels in #' situations where relatively few levels where observed in the old_data. If #' \code{"old_levels"}, directly sample new levels from the existing levels, #' where a new level is assigned all of the posterior draws of the same #' (randomly chosen) existing level. #' @param newdata2 A named \code{list} of objects containing new data, which #' cannot be passed via argument \code{newdata}. Required for some objects #' used in autocorrelation structures, or \code{\link{stanvars}}. #' @param new_objects Deprecated alias of \code{newdata2}. #' @param incl_autocor A flag indicating if correlation structures originally #' specified via \code{autocor} should be included in the predictions. #' Defaults to \code{TRUE}. #' @param offset Logical; Indicates if offsets should be included in the #' predictions. Defaults to \code{TRUE}. #' @param oos Optional indices of observations for which to compute #' out-of-sample rather than in-sample predictions. Only required in models #' that make use of response values to make predictions, that is, currently #' only ARMA models. #' @param smooths_only Logical; If \code{TRUE} only predictions related to the #' @param resp Optional names of response variables. If specified, predictions #' are performed only for the specified response variables. #' @param ndraws Positive integer indicating how many posterior draws should #' be used. If \code{NULL} (the default) all draws are used. Ignored if #' \code{draw_ids} is not \code{NULL}. #' @param draw_ids An integer vector specifying the posterior draws to be used. #' If \code{NULL} (the default), all draws are used. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param subset Deprecated alias of \code{draw_ids}. #' @param nug Small positive number for Gaussian process terms only. For #' numerical reasons, the covariance matrix of a Gaussian process might not be #' positive definite. Adding a very small number to the matrix's diagonal #' often solves this problem. If \code{NULL} (the default), \code{nug} is #' chosen internally. #' @param point_estimate Shall the returned object contain only point estimates #' of the parameters instead of their posterior draws? Defaults to #' \code{NULL} in which case no point estimate is computed. Alternatively, may #' be set to \code{"mean"} or \code{"median"}. This argument is primarily #' implemented to ensure compatibility with the \code{\link{loo_subsample}} #' method. #' @param ... Further arguments passed to \code{\link{validate_newdata}}. #' #' @return An object of class \code{'brmsprep'} or \code{'mvbrmsprep'}, #' depending on whether a univariate or multivariate model is passed. #' #' @export prepare_predictions <- function(x, ...) { UseMethod("prepare_predictions") } #' @export prepare_predictions.default <- function(x, ...) { NULL } # the name 'extract_draws' is deprecated as of brms 2.12.6 # remove it eventually in brms 3.0 #' @export extract_draws <- function(x, ...) { warning2("Method 'extract_draws' is deprecated. ", "Please use 'prepare_predictions' instead.") UseMethod("prepare_predictions") } brms/R/families.R0000644000175000017500000017166514111751666013514 0ustar nileshnilesh#' Special Family Functions for \pkg{brms} Models #' #' Family objects provide a convenient way to specify the details of the models #' used by many model fitting functions. The family functions presented here are #' for use with \pkg{brms} only and will **not** work with other model #' fitting functions such as \code{glm} or \code{glmer}. #' However, the standard family functions as described in #' \code{\link[stats:family]{family}} will work with \pkg{brms}. #' You can also specify custom families for use in \pkg{brms} with #' the \code{\link{custom_family}} function. #' #' @param family A character string naming the distribution of the response #' variable be used in the model. Currently, the following families are #' supported: \code{gaussian}, \code{student}, \code{binomial}, #' \code{bernoulli}, \code{poisson}, \code{negbinomial}, \code{geometric}, #' \code{Gamma}, \code{skew_normal}, \code{lognormal}, #' \code{shifted_lognormal}, \code{exgaussian}, \code{wiener}, #' \code{inverse.gaussian}, \code{exponential}, \code{weibull}, #' \code{frechet}, \code{Beta}, \code{dirichlet}, \code{von_mises}, #' \code{asym_laplace}, \code{gen_extreme_value}, \code{categorical}, #' \code{multinomial}, \code{cumulative}, \code{cratio}, \code{sratio}, #' \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, #' \code{hurdle_gamma}, \code{hurdle_lognormal}, #' \code{zero_inflated_binomial}, \code{zero_inflated_beta}, #' \code{zero_inflated_negbinomial}, \code{zero_inflated_poisson}, and #' \code{zero_one_inflated_beta}. #' @param link A specification for the model link function. This can be a #' name/expression or character string. See the 'Details' section for more #' information on link functions supported by each family. #' @param link_sigma Link of auxiliary parameter \code{sigma} if being predicted. #' @param link_shape Link of auxiliary parameter \code{shape} if being predicted. #' @param link_nu Link of auxiliary parameter \code{nu} if being predicted. #' @param link_phi Link of auxiliary parameter \code{phi} if being predicted. #' @param link_kappa Link of auxiliary parameter \code{kappa} if being predicted. #' @param link_beta Link of auxiliary parameter \code{beta} if being predicted. #' @param link_zi Link of auxiliary parameter \code{zi} if being predicted. #' @param link_hu Link of auxiliary parameter \code{hu} if being predicted. #' @param link_zoi Link of auxiliary parameter \code{zoi} if being predicted. #' @param link_coi Link of auxiliary parameter \code{coi} if being predicted. #' @param link_disc Link of auxiliary parameter \code{disc} if being predicted. #' @param link_bs Link of auxiliary parameter \code{bs} if being predicted. #' @param link_ndt Link of auxiliary parameter \code{ndt} if being predicted. #' @param link_bias Link of auxiliary parameter \code{bias} if being predicted. #' @param link_alpha Link of auxiliary parameter \code{alpha} if being predicted. #' @param link_quantile Link of auxiliary parameter \code{quantile} if being predicted. #' @param link_xi Link of auxiliary parameter \code{xi} if being predicted. #' @param threshold A character string indicating the type #' of thresholds (i.e. intercepts) used in an ordinal model. #' \code{"flexible"} provides the standard unstructured thresholds, #' \code{"equidistant"} restricts the distance between #' consecutive thresholds to the same value, and #' \code{"sum_to_zero"} ensures the thresholds sum to zero. #' @param refcat Optional name of the reference response category used in #' categorical, multinomial, and dirichlet models. If \code{NULL} (the #' default), the first category is used as the reference. If \code{NA}, all #' categories will be predicted, which requires strong priors or carefully #' specified predictor terms in order to lead to an identified model. #' @param bhaz Currently for experimental purposes only. #' #' @details #' Below, we list common use cases for the different families. #' This list is not ment to be exhaustive. #' \itemize{ #' \item{Family \code{gaussian} can be used for linear regression.} #' #' \item{Family \code{student} can be used for robust linear regression #' that is less influenced by outliers.} #' #' \item{Family \code{skew_normal} can handle skewed responses in linear #' regression.} #' #' \item{Families \code{poisson}, \code{negbinomial}, and \code{geometric} #' can be used for regression of unbounded count data.} #' #' \item{Families \code{bernoulli} and \code{binomial} can be used for #' binary regression (i.e., most commonly logistic regression).} #' #' \item{Families \code{categorical} and \code{multinomial} can be used for #' multi-logistic regression when there are more than two possible outcomes.} #' #' \item{Families \code{cumulative}, \code{cratio} ('continuation ratio'), #' \code{sratio} ('stopping ratio'), and \code{acat} ('adjacent category') #' leads to ordinal regression.} #' #' \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, #' \code{lognormal}, \code{frechet}, \code{inverse.gaussian}, and \code{cox} #' (Cox proportional hazards model) can be used (among others) for #' time-to-event regression also known as survival regression.} #' #' \item{Families \code{weibull}, \code{frechet}, and \code{gen_extreme_value} #' ('generalized extreme value') allow for modeling extremes.} #' #' \item{Families \code{beta} and \code{dirichlet} can be used to model #' responses representing rates or probabilities.} #' #' \item{Family \code{asym_laplace} allows for quantile regression when fixing #' the auxiliary \code{quantile} parameter to the quantile of interest.} #' #' \item{Family \code{exgaussian} ('exponentially modified Gaussian') and #' \code{shifted_lognormal} are especially suited to model reaction times.} #' #' \item{Family \code{wiener} provides an implementation of the Wiener #' diffusion model. For this family, the main formula predicts the drift #' parameter 'delta' and all other parameters are modeled as auxiliary parameters #' (see \code{\link{brmsformula}} for details).} #' #' \item{Families \code{hurdle_poisson}, \code{hurdle_negbinomial}, #' \code{hurdle_gamma}, \code{hurdle_lognormal}, \code{zero_inflated_poisson}, #' \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, #' \code{zero_inflated_beta}, and \code{zero_one_inflated_beta} #' allow to estimate zero-inflated and hurdle models. #' These models can be very helpful when there are many zeros in the data #' (or ones in case of one-inflated models) #' that cannot be explained by the primary distribution of the response.} #' } #' #' Below, we list all possible links for each family. #' The first link mentioned for each family is the default. #' \itemize{ #' \item{Families \code{gaussian}, \code{student}, \code{skew_normal}, #' \code{exgaussian}, \code{asym_laplace}, and \code{gen_extreme_value} #' support the links (as names) \code{identity}, \code{log}, \code{inverse}, #' and \code{softplus}.} #' #' \item{Families \code{poisson}, \code{negbinomial}, \code{geometric}, #' \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, #' \code{hurdle_poisson}, and \code{hurdle_negbinomial} support #' \code{log}, \code{identity}, \code{sqrt}, and \code{softplus}.} #' #' \item{Families \code{binomial}, \code{bernoulli}, \code{Beta}, #' \code{zero_inflated_binomial}, \code{zero_inflated_beta}, #' and \code{zero_one_inflated_beta} support \code{logit}, #' \code{probit}, \code{probit_approx}, \code{cloglog}, #' \code{cauchit}, and \code{identity}.} #' #' \item{Families \code{cumulative}, \code{cratio}, \code{sratio}, #' and \code{acat} support \code{logit}, \code{probit}, #' \code{probit_approx}, \code{cloglog}, and \code{cauchit}.} #' #' \item{Families \code{categorical}, \code{multinomial}, and \code{dirichlet} #' support \code{logit}.} #' #' \item{Families \code{Gamma}, \code{weibull}, \code{exponential}, #' \code{frechet}, and \code{hurdle_gamma} support #' \code{log}, \code{identity}, \code{inverse}, and \code{softplus}.} #' #' \item{Families \code{lognormal} and \code{hurdle_lognormal} #' support \code{identity} and \code{inverse}.} #' #' \item{Family \code{inverse.gaussian} supports \code{1/mu^2}, #' \code{inverse}, \code{identity}, \code{log}, and \code{softplus}.} #' #' \item{Family \code{von_mises} supports \code{tan_half} and #' \code{identity}.} #' #' \item{Family \code{cox} supports \code{log}, \code{identity}, #' and \code{softplus} for the proportional hazards parameter.} #' #' \item{Family \code{wiener} supports \code{identity}, \code{log}, #' and \code{softplus} for the main parameter which represents the #' drift rate.} #' } #' #' Please note that when calling the \code{\link[stats:family]{Gamma}} family #' function of the \pkg{stats} package, the default link will be #' \code{inverse} instead of \code{log} although the latter is the default in #' \pkg{brms}. Also, when using the family functions \code{gaussian}, #' \code{binomial}, \code{poisson}, and \code{Gamma} of the \pkg{stats} #' package (see \code{\link[stats:family]{family}}), special link functions #' such as \code{softplus} or \code{cauchit} won't work. In this case, you #' have to use \code{brmsfamily} to specify the family with corresponding link #' function. #' #' @seealso \code{\link[brms:brm]{brm}}, #' \code{\link[stats:family]{family}}, #' \code{\link{customfamily}} #' #' @examples #' # create a family object #' (fam1 <- student("log")) #' # alternatively use the brmsfamily function #' (fam2 <- brmsfamily("student", "log")) #' # both leads to the same object #' identical(fam1, fam2) #' #' @export brmsfamily <- function(family, link = NULL, link_sigma = "log", link_shape = "log", link_nu = "logm1", link_phi = "log", link_kappa = "log", link_beta = "log", link_zi = "logit", link_hu = "logit", link_zoi = "logit", link_coi = "logit", link_disc = "log", link_bs = "log", link_ndt = "log", link_bias = "logit", link_xi = "log1p", link_alpha = "identity", link_quantile = "logit", threshold = "flexible", refcat = NULL, bhaz = NULL) { slink <- substitute(link) .brmsfamily( family, link = link, slink = slink, link_sigma = link_sigma, link_shape = link_shape, link_nu = link_nu, link_phi = link_phi, link_kappa = link_kappa, link_beta = link_beta, link_zi = link_zi, link_hu = link_hu, link_zoi = link_zoi, link_coi = link_coi, link_disc = link_disc, link_bs = link_bs, link_ndt = link_ndt, link_bias = link_bias, link_alpha = link_alpha, link_xi = link_xi, link_quantile = link_quantile, threshold = threshold, refcat = refcat, bhaz = bhaz ) } # helper function to prepare brmsfamily objects # @param family character string naming the model family # @param link character string naming the link function # @param slink can be used with substitute(link) for # non-standard evaluation of the link function # @param threshold threshold type for ordinal models # @param ... link functions (as character strings) of parameters # @return an object of 'brmsfamily' which inherits from 'family' .brmsfamily <- function(family, link = NULL, slink = link, threshold = "flexible", refcat = NULL, bhaz = NULL, ...) { family <- tolower(as_one_character(family)) aux_links <- list(...) pattern <- c("^normal$", "^zi_", "^hu_") replacement <- c("gaussian", "zero_inflated_", "hurdle_") family <- rename(family, pattern, replacement, fixed = FALSE) ok_families <- lsp("brms", pattern = "^\\.family_") ok_families <- sub("^\\.family_", "", ok_families) if (!family %in% ok_families) { stop2(family, " is not a supported family. Supported ", "families are:\n", collapse_comma(ok_families)) } family_info <- get(paste0(".family_", family))() ok_links <- family_info$links family_info$links <- NULL # non-standard evaluation of link if (!is.character(slink)) { slink <- deparse(slink) } if (!slink %in% ok_links) { if (is.character(link)) { slink <- link } else if (!length(link) || identical(link, NA)) { slink <- NA } } if (length(slink) != 1L) { stop2("Argument 'link' must be of length 1.") } if (is.na(slink)) { slink <- ok_links[1] } if (!slink %in% ok_links) { stop2("'", slink, "' is not a supported link ", "for family '", family, "'.\nSupported links are: ", collapse_comma(ok_links)) } out <- list( family = family, link = slink, linkfun = function(mu) link(mu, link = slink), linkinv = function(eta) ilink(eta, link = slink) ) out[names(family_info)] <- family_info class(out) <- c("brmsfamily", "family") for (dp in valid_dpars(out)) { alink <- as.character(aux_links[[paste0("link_", dp)]]) if (length(alink)) { alink <- as_one_character(alink) valid_links <- links_dpars(dp) if (!alink %in% valid_links) { stop2( "'", alink, "' is not a supported link ", "for parameter '", dp, "'.\nSupported links are: ", collapse_comma(valid_links) ) } out[[paste0("link_", dp)]] <- alink } } if (is_ordinal(out$family)) { # TODO: move specification of 'threshold' to the 'resp_thres' function? thres_options <- c("flexible", "equidistant", "sum_to_zero") out$threshold <- match.arg(threshold, thres_options) } if (conv_cats_dpars(out$family)) { if (!has_joint_link(out$family)) { out$refcat <- NA } else if (!is.null(refcat)) { out$refcat <- as_one_character(refcat, allow_na = TRUE) } } if (is_cox(out$family)) { if (!is.null(bhaz)) { if (!is.list(bhaz)) { stop2("'bhaz' should be a list.") } out$bhaz <- bhaz } else { out$bhaz <- list() } # set default arguments if (is.null(out$bhaz$df)) { out$bhaz$df <- 5L } if (is.null(out$bhaz$intercept)) { out$bhaz$intercept <- TRUE } } out } # checks and corrects validity of the model family # @param family Either a function, an object of class 'family' # or a character string of length one or two # @param link an optional character string naming the link function # ignored if family is a function or a family object # @param threshold optional character string specifying the threshold # type in ordinal models validate_family <- function(family, link = NULL, threshold = NULL) { if (is.function(family)) { family <- family() } if (!is(family, "brmsfamily")) { if (is.family(family)) { link <- family$link family <- family$family } if (is.character(family)) { if (is.null(link)) { link <- family[2] } family <- .brmsfamily(family[1], link = link) } else { stop2("Argument 'family' is invalid.") } } if (is_ordinal(family) && !is.null(threshold)) { # slot 'threshold' deprecated as of brms > 1.7.0 threshold <- match.arg(threshold, c("flexible", "equidistant")) family$threshold <- threshold } family } # extract special information of families # @param x object from which to extract # @param y name of the component to extract family_info <- function(x, y, ...) { UseMethod("family_info") } #' @export family_info.default <- function(x, y, ...) { x <- as.character(x) ulapply(x, .family_info, y = y, ...) } .family_info <- function(x, y, ...) { x <- as_one_character(x) y <- as_one_character(y) if (y == "family") { return(x) } if (!nzchar(x)) { return(NULL) } info <- get(paste0(".family_", x))() if (y == "link") { out <- info$links[1] # default link } else { info$links <- NULL out <- info[[y]] } out } family_info.NULL <- function(x, y, ...) { NULL } #' @export family_info.list <- function(x, y, ...) { ulapply(x, family_info, y = y, ...) } #' @export family_info.family <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.brmsfamily <- function(x, y, ...) { y <- as_one_character(y) out <- x[[y]] if (is.null(out)) { # required for models fitted with brms 2.2 or earlier out <- family_info(x$family, y = y, ...) } out } #' @export family_info.mixfamily <- function(x, y, ...) { out <- lapply(x$mix, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.brmsformula <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.mvbrmsformula <- function(x, y, ...) { out <- lapply(x$forms, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.brmsterms <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.mvbrmsterms <- function(x, y, ...) { out <- lapply(x$terms, family_info, y = y, ...) combine_family_info(out, y = y) } #' @export family_info.btl <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.btnl <- function(x, y, ...) { family_info(x$family, y = y, ...) } #' @export family_info.brmsfit <- function(x, y, ...) { family_info(x$formula, y = y, ...) } # combine information from multiple families # provides special handling for certain elements combine_family_info <- function(x, y, ...) { y <- as_one_character(y) unite <- c( "dpars", "type", "specials", "include", "const", "cats", "ad", "normalized" ) if (y %in% c("family", "link")) { x <- unlist(x) } else if (y %in% unite) { x <- Reduce("union", x) } else if (y == "ybounds") { x <- do_call(rbind, x) x <- c(max(x[, 1]), min(x[, 2])) } else if (y == "closed") { # closed only if no bounds are open x <- do_call(rbind, x) clb <- !any(ulapply(x[, 1], isFALSE)) cub <- !any(ulapply(x[, 2], isFALSE)) x <- c(clb, cub) } else if (y == "thres") { # thresholds are the same across mixture components x <- x[[1]] } x } #' @rdname brmsfamily #' @export student <- function(link = "identity", link_sigma = "log", link_nu = "logm1") { slink <- substitute(link) .brmsfamily("student", link = link, slink = slink, link_sigma = link_sigma, link_nu = link_nu) } #' @rdname brmsfamily #' @export bernoulli <- function(link = "logit") { slink <- substitute(link) .brmsfamily("bernoulli", link = link, slink = slink) } #' @rdname brmsfamily #' @export negbinomial <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("negbinomial", link = link, slink = slink, link_shape = link_shape) } # not yet officially supported # @rdname brmsfamily # @export negbinomial2 <- function(link = "log", link_sigma = "log") { slink <- substitute(link) .brmsfamily("negbinomial2", link = link, slink = slink, link_sigma = link_sigma) } #' @rdname brmsfamily #' @export geometric <- function(link = "log") { slink <- substitute(link) .brmsfamily("geometric", link = link, slink = slink) } # do not export yet! # @rdname brmsfamily # @export discrete_weibull <- function(link = "logit", link_shape = "log") { slink <- substitute(link) .brmsfamily("discrete_weibull", link = link, slink = slink, link_shape = link_shape) } # do not export yet! # @rdname brmsfamily # @export com_poisson <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("com_poisson", link = link, slink = slink, link_shape = link_shape) } #' @rdname brmsfamily #' @export lognormal <- function(link = "identity", link_sigma = "log") { slink <- substitute(link) .brmsfamily("lognormal", link = link, slink = slink, link_sigma = link_sigma) } #' @rdname brmsfamily #' @export shifted_lognormal <- function(link = "identity", link_sigma = "log", link_ndt = "log") { slink <- substitute(link) .brmsfamily("shifted_lognormal", link = link, slink = slink, link_sigma = link_sigma, link_ndt = link_ndt) } #' @rdname brmsfamily #' @export skew_normal <- function(link = "identity", link_sigma = "log", link_alpha = "identity") { slink <- substitute(link) .brmsfamily("skew_normal", link = link, slink = slink, link_sigma = link_sigma, link_alpha = link_alpha) } #' @rdname brmsfamily #' @export exponential <- function(link = "log") { slink <- substitute(link) .brmsfamily("exponential", link = link, slink = slink) } #' @rdname brmsfamily #' @export weibull <- function(link = "log", link_shape = "log") { slink <- substitute(link) .brmsfamily("weibull", link = link, slink = slink, link_shape = link_shape) } #' @rdname brmsfamily #' @export frechet <- function(link = "log", link_nu = "logm1") { slink <- substitute(link) .brmsfamily("frechet", link = link, slink = slink, link_nu = link_nu) } #' @rdname brmsfamily #' @export gen_extreme_value <- function(link = "identity", link_sigma = "log", link_xi = "log1p") { slink <- substitute(link) .brmsfamily("gen_extreme_value", link = link, slink = slink, link_sigma = link_sigma, link_xi = link_xi) } #' @rdname brmsfamily #' @export exgaussian <- function(link = "identity", link_sigma = "log", link_beta = "log") { slink <- substitute(link) .brmsfamily("exgaussian", link = link, slink = slink, link_sigma = link_sigma, link_beta = link_beta) } #' @rdname brmsfamily #' @export wiener <- function(link = "identity", link_bs = "log", link_ndt = "log", link_bias = "logit") { slink <- substitute(link) .brmsfamily("wiener", link = link, slink = slink, link_bs = link_bs, link_ndt = link_ndt, link_bias = link_bias) } #' @rdname brmsfamily #' @export Beta <- function(link = "logit", link_phi = "log") { slink <- substitute(link) .brmsfamily("beta", link = link, slink = slink, link_phi = link_phi) } #' @rdname brmsfamily #' @export dirichlet <- function(link = "logit", link_phi = "log", refcat = NULL) { slink <- substitute(link) .brmsfamily("dirichlet", link = link, slink = slink, link_phi = link_phi, refcat = refcat) } # not yet exported # @rdname brmsfamily # @export dirichlet2 <- function(link = "log") { slink <- substitute(link) .brmsfamily("dirichlet2", link = link, slink = slink, refcat = NA) } #' @rdname brmsfamily #' @export von_mises <- function(link = "tan_half", link_kappa = "log") { slink <- substitute(link) .brmsfamily("von_mises", link = link, slink = slink, link_kappa = link_kappa) } #' @rdname brmsfamily #' @export asym_laplace <- function(link = "identity", link_sigma = "log", link_quantile = "logit") { slink <- substitute(link) .brmsfamily("asym_laplace", link = link, slink = slink, link_sigma = link_sigma, link_quantile = link_quantile) } # do not export yet! # @rdname brmsfamily # @export zero_inflated_asym_laplace <- function(link = "identity", link_sigma = "log", link_quantile = "logit", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_asym_laplace", link = link, slink = slink, link_sigma = link_sigma, link_quantile = link_quantile, link_zi = link_zi) } #' @rdname brmsfamily #' @export cox <- function(link = "log", bhaz = NULL) { slink <- substitute(link) .brmsfamily("cox", link = link, bhaz = bhaz) } #' @rdname brmsfamily #' @export hurdle_poisson <- function(link = "log") { slink <- substitute(link) .brmsfamily("hurdle_poisson", link = link, slink = slink) } #' @rdname brmsfamily #' @export hurdle_negbinomial <- function(link = "log", link_shape = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_negbinomial", link = link, slink = slink, link_shape = link_shape, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_gamma <- function(link = "log", link_shape = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_gamma", link = link, slink = slink, link_shape = link_shape, link_hu = link_hu) } #' @rdname brmsfamily #' @export hurdle_lognormal <- function(link = "identity", link_sigma = "log", link_hu = "logit") { slink <- substitute(link) .brmsfamily("hurdle_lognormal", link = link, slink = slink, link_sigma = link_sigma, link_hu = link_hu) } #' @rdname brmsfamily #' @export zero_inflated_beta <- function(link = "logit", link_phi = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_beta", link = link, slink = slink, link_phi = link_phi, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_one_inflated_beta <- function(link = "logit", link_phi = "log", link_zoi = "logit", link_coi = "logit") { slink <- substitute(link) .brmsfamily("zero_one_inflated_beta", link = link, slink = slink, link_phi = link_phi, link_zoi = link_zoi, link_coi = link_coi) } #' @rdname brmsfamily #' @export zero_inflated_poisson <- function(link = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_poisson", link = link, slink = slink, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_inflated_negbinomial <- function(link = "log", link_shape = "log", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_negbinomial", link = link, slink = slink, link_shape = link_shape, link_zi = link_zi) } #' @rdname brmsfamily #' @export zero_inflated_binomial <- function(link = "logit", link_zi = "logit") { slink <- substitute(link) .brmsfamily("zero_inflated_binomial", link = link, slink = slink, link_zi = link_zi) } #' @rdname brmsfamily #' @export categorical <- function(link = "logit", refcat = NULL) { slink <- substitute(link) .brmsfamily("categorical", link = link, slink = slink, refcat = refcat) } #' @rdname brmsfamily #' @export multinomial <- function(link = "logit", refcat = NULL) { slink <- substitute(link) .brmsfamily("multinomial", link = link, slink = slink, refcat = refcat) } #' @rdname brmsfamily #' @export cumulative <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("cumulative", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export sratio <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("sratio", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export cratio <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("cratio", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' @rdname brmsfamily #' @export acat <- function(link = "logit", link_disc = "log", threshold = "flexible") { slink <- substitute(link) .brmsfamily("acat", link = link, slink = slink, link_disc = link_disc, threshold = threshold) } #' Finite Mixture Families in \pkg{brms} #' #' Set up a finite mixture family for use in \pkg{brms}. #' #' @param ... One or more objects providing a description of the #' response distributions to be combined in the mixture model. #' These can be family functions, calls to family functions or #' character strings naming the families. For details of supported #' families see \code{\link{brmsfamily}}. #' @param flist Optional list of objects, which are treated in the #' same way as objects passed via the \code{...} argument. #' @param nmix Optional numeric vector specifying the number of times #' each family is repeated. If specified, it must have the same length #' as the number of families passed via \code{...} and \code{flist}. #' @param order Ordering constraint to identify mixture components. #' If \code{'mu'} or \code{TRUE}, population-level intercepts #' of the mean parameters are ordered in non-ordinal models #' and fixed to the same value in ordinal models (see details). #' If \code{'none'} or \code{FALSE}, no ordering constraint is applied. #' If \code{NULL} (the default), \code{order} is set to \code{'mu'} #' if all families are the same and \code{'none'} otherwise. #' Other ordering constraints may be implemented in the future. #' #' @return An object of class \code{mixfamily}. #' #' @details #' #' Most families supported by \pkg{brms} can be used to form mixtures. The #' response variable has to be valid for all components of the mixture family. #' Currently, the number of mixture components has to be specified by the user. #' It is not yet possible to estimate the number of mixture components from the #' data. #' #' Ordering intercepts in mixtures of ordinal families is not possible as each #' family has itself a set of vector of intercepts (i.e. ordinal thresholds). #' Instead, \pkg{brms} will fix the vector of intercepts across components in #' ordinal mixtures, if desired, so that users can try to identify the mixture #' model via selective inclusion of predictors. #' #' For most mixture models, you may want to specify priors on the #' population-level intercepts via \code{\link{set_prior}} to improve #' convergence. In addition, it is sometimes necessary to set \code{inits = 0} #' in the call to \code{\link{brm}} to allow chains to initialize properly. #' #' For more details on the specification of mixture #' models, see \code{\link{brmsformula}}. #' #' @examples #' \dontrun{ #' ## simulate some data #' set.seed(1234) #' dat <- data.frame( #' y = c(rnorm(200), rnorm(100, 6)), #' x = rnorm(300), #' z = sample(0:1, 300, TRUE) #' ) #' #' ## fit a simple normal mixture model #' mix <- mixture(gaussian, gaussian) #' prior <- c( #' prior(normal(0, 7), Intercept, dpar = mu1), #' prior(normal(5, 7), Intercept, dpar = mu2) #' ) #' fit1 <- brm(bf(y ~ x + z), dat, family = mix, #' prior = prior, chains = 2) #' summary(fit1) #' pp_check(fit1) #' #' ## use different predictors for the components #' fit2 <- brm(bf(y ~ 1, mu1 ~ x, mu2 ~ z), dat, family = mix, #' prior = prior, chains = 2) #' summary(fit2) #' #' ## fix the mixing proportions #' fit3 <- brm(bf(y ~ x + z, theta1 = 1, theta2 = 2), #' dat, family = mix, prior = prior, #' inits = 0, chains = 2) #' summary(fit3) #' pp_check(fit3) #' #' ## predict the mixing proportions #' fit4 <- brm(bf(y ~ x + z, theta2 ~ x), #' dat, family = mix, prior = prior, #' inits = 0, chains = 2) #' summary(fit4) #' pp_check(fit4) #' #' ## compare model fit #' LOO(fit1, fit2, fit3, fit4) #' } #' #' @export mixture <- function(..., flist = NULL, nmix = 1, order = NULL) { dots <- c(list(...), flist) if (length(nmix) == 1L) { nmix <- rep(nmix, length(dots)) } if (length(dots) != length(nmix)) { stop2("The length of 'nmix' should be the same ", "as the number of mixture components.") } dots <- dots[rep(seq_along(dots), nmix)] family <- list( family = "mixture", link = "identity", mix = lapply(dots, validate_family) ) class(family) <- c("mixfamily", "brmsfamily", "family") # validity checks if (length(family$mix) < 2L) { stop2("Expecting at least 2 mixture components.") } if (use_real(family) && use_int(family)) { stop2("Cannot mix families with real and integer support.") } is_ordinal <- ulapply(family$mix, is_ordinal) if (any(is_ordinal) && any(!is_ordinal)) { stop2("Cannot mix ordinal and non-ordinal families.") } no_mixture <- ulapply(family$mix, no_mixture) if (any(no_mixture)) { stop2("Some of the families are not allowed in mixture models.") } for (fam in family$mix) { if (is.customfamily(fam) && "theta" %in% fam$dpars) { stop2("Parameter name 'theta' is reserved in mixture models.") } } if (is.null(order)) { if (any(is_ordinal)) { family$order <- "none" message("Setting order = 'none' for mixtures of ordinal families.") } else if (length(unique(family_names(family))) == 1L) { family$order <- "mu" message("Setting order = 'mu' for mixtures of the same family.") } else { family$order <- "none" message("Setting order = 'none' for mixtures of different families.") } } else { if (length(order) != 1L) { stop2("Argument 'order' must be of length 1.") } if (is.character(order)) { valid_order <- c("none", "mu") if (!order %in% valid_order) { stop2("Argument 'order' is invalid. Valid options are: ", collapse_comma(valid_order)) } family$order <- order } else { family$order <- ifelse(as.logical(order), "mu", "none") } } family } #' Custom Families in \pkg{brms} Models #' #' Define custom families (i.e. response distribution) for use in #' \pkg{brms} models. It allows users to benefit from the modeling #' flexibility of \pkg{brms}, while applying their self-defined likelihood #' functions. All of the post-processing methods for \code{brmsfit} #' objects can be made compatible with custom families. #' See \code{vignette("brms_customfamilies")} for more details. #' For a list of built-in families see \code{\link{brmsfamily}}. #' #' @aliases customfamily #' #' @param name Name of the custom family. #' @param dpars Names of the distributional parameters of #' the family. One parameter must be named \code{"mu"} and #' the main formula of the model will correspond to that #' parameter. #' @param links Names of the link functions of the #' distributional parameters. #' @param type Indicates if the response distribution is #' continuous (\code{"real"}) or discrete (\code{"int"}). This controls #' if the corresponding density function will be named with #' \code{_lpdf} or \code{_lpmf}. #' @param lb Vector of lower bounds of the distributional #' parameters. Defaults to \code{NA} that is no lower bound. #' @param ub Vector of upper bounds of the distributional #' parameters. Defaults to \code{NA} that is no upper bound. #' @param vars Names of variables that are part of the likelihood function #' without being distributional parameters. That is, \code{vars} can be used #' to pass data to the likelihood. Such arguments will be added to the list of #' function arguments at the end, after the distributional parameters. See #' \code{\link{stanvar}} for details about adding self-defined data to the #' generated \pkg{Stan} model. Addition arguments \code{vreal} and \code{vint} #' may be used for this purpose as well (see Examples below). See also #' \code{\link{brmsformula}} and \code{\link{addition-terms}} for more #' details. #' @param loop Logical; Should the likelihood be evaluated via a loop #' (\code{TRUE}; the default) over observations in Stan? #' If \code{FALSE}, the Stan code will be written in a vectorized #' manner over observations if possible. #' @param specials A character vector of special options to enable #' for this custom family. Currently for internal use only. #' @param threshold Optional threshold type for custom ordinal families. #' Ignored for non-ordinal families. #' @param log_lik Optional function to compute log-likelihood values of #' the model in \R. This is only relevant if one wants to ensure #' compatibility with method \code{\link[brms:log_lik.brmsfit]{log_lik}}. #' @param posterior_predict Optional function to compute posterior prediction of #' the model in \R. This is only relevant if one wants to ensure compatibility #' with method \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}. #' @param posterior_epred Optional function to compute expected values of the #' posterior predictive distribution of the model in \R. This is only relevant #' if one wants to ensure compatibility with method #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. #' @param predict Deprecated alias of `posterior_predict`. #' @param fitted Deprecated alias of `posterior_epred`. #' @param env An \code{\link{environment}} in which certain post-processing #' functions related to the custom family can be found, if there were not #' directly passed to \code{custom_family}. This is only #' relevant if one wants to ensure compatibility with the methods #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, #' \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}}, or #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}. #' By default, \code{env} is the environment from which #' \code{custom_family} is called. #' #' @details The corresponding probability density or mass \code{Stan} #' functions need to have the same name as the custom family. #' That is if a family is called \code{myfamily}, then the #' \pkg{Stan} functions should be called \code{myfamily_lpdf} or #' \code{myfamily_lpmf} depending on whether it defines a #' continuous or discrete distribution. #' #' @return An object of class \code{customfamily} inheriting #' from class \code{\link{brmsfamily}}. #' #' @seealso \code{\link{brmsfamily}}, \code{\link{brmsformula}}, #' \code{\link{stanvar}} #' #' @examples #' \dontrun{ #' ## demonstrate how to fit a beta-binomial model #' ## generate some fake data #' phi <- 0.7 #' n <- 300 #' z <- rnorm(n, sd = 0.2) #' ntrials <- sample(1:10, n, replace = TRUE) #' eta <- 1 + z #' mu <- exp(eta) / (1 + exp(eta)) #' a <- mu * phi #' b <- (1 - mu) * phi #' p <- rbeta(n, a, b) #' y <- rbinom(n, ntrials, p) #' dat <- data.frame(y, z, ntrials) #' #' # define a custom family #' beta_binomial2 <- custom_family( #' "beta_binomial2", dpars = c("mu", "phi"), #' links = c("logit", "log"), lb = c(NA, 0), #' type = "int", vars = "vint1[n]" #' ) #' #' # define the corresponding Stan density function #' stan_density <- " #' real beta_binomial2_lpmf(int y, real mu, real phi, int N) { #' return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); #' } #' " #' stanvars <- stanvar(scode = stan_density, block = "functions") #' #' # fit the model #' fit <- brm(y | vint(ntrials) ~ z, data = dat, #' family = beta_binomial2, stanvars = stanvars) #' summary(fit) #' #' #' # define a *vectorized* custom family (no loop over observations) #' # notice also that 'vint' no longer has an observation index #' beta_binomial2_vec <- custom_family( #' "beta_binomial2", dpars = c("mu", "phi"), #' links = c("logit", "log"), lb = c(NA, 0), #' type = "int", vars = "vint1", loop = FALSE #' ) #' #' # define the corresponding Stan density function #' stan_density_vec <- " #' real beta_binomial2_lpmf(int[] y, vector mu, real phi, int[] N) { #' return beta_binomial_lpmf(y | N, mu * phi, (1 - mu) * phi); #' } #' " #' stanvars_vec <- stanvar(scode = stan_density_vec, block = "functions") #' #' # fit the model #' fit_vec <- brm(y | vint(ntrials) ~ z, data = dat, #' family = beta_binomial2_vec, #' stanvars = stanvars_vec) #' summary(fit_vec) #' } #' #' @export custom_family <- function(name, dpars = "mu", links = "identity", type = c("real", "int"), lb = NA, ub = NA, vars = NULL, loop = TRUE, specials = NULL, threshold = "flexible", log_lik = NULL, posterior_predict = NULL, posterior_epred = NULL, predict = NULL, fitted = NULL, env = parent.frame()) { name <- as_one_character(name) dpars <- as.character(dpars) links <- as.character(links) type <- match.arg(type) lb <- as.character(lb) ub <- as.character(ub) vars <- as.character(vars) loop <- as_one_logical(loop) specials <- as.character(specials) env <- as.environment(env) posterior_predict <- use_alias(posterior_predict, predict) posterior_epred <- use_alias(posterior_epred, fitted) if (any(duplicated(dpars))) { stop2("Duplicated 'dpars' are not allowed.") } if (!"mu" %in% dpars) { stop2("All families must have a 'mu' parameter.") } if (any(grepl("_|\\.", dpars))) { stop2("Dots or underscores are not allowed in 'dpars'.") } if (any(grepl("[[:digit:]]+$", dpars))) { stop2("'dpars' should not end with a number.") } for (arg in c("links", "lb", "ub")) { obj <- get(arg) if (length(obj) == 1L) { obj <- rep(obj, length(dpars)) assign(arg, obj) } if (length(dpars) != length(obj)) { stop2("'", arg, "' must be of the same length as 'dpars'.") } } if (!is.null(log_lik)) { log_lik <- as.function(log_lik) args <- names(formals(log_lik)) if (!is_equal(args[1:2], c("i", "draws"))) { stop2("The first two arguments of 'log_lik' ", "should be 'i' and 'draws'.") } } if (!is.null(posterior_predict)) { posterior_predict <- as.function(posterior_predict) args <- names(formals(posterior_predict)) if (!is_equal(args[1:3], c("i", "draws", "..."))) { stop2("The first three arguments of 'posterior_predict' ", "should be 'i', 'draws', and '...'.") } } if (!is.null(posterior_epred)) { posterior_epred <- as.function(posterior_epred) args <- names(formals(posterior_epred)) if (!is_equal(args[1], "draws")) { stop2("The first argument of 'posterior_epred' should be 'draws'.") } } lb <- named_list(dpars, lb) ub <- named_list(dpars, ub) is_mu <- "mu" == dpars link <- links[is_mu] normalized <- "" out <- nlist( family = "custom", link, name, dpars, lb, ub, type, vars, loop, specials, log_lik, posterior_predict, posterior_epred, env, normalized ) if (length(dpars) > 1L) { out[paste0("link_", dpars[!is_mu])] <- links[!is_mu] } class(out) <- c("customfamily", "brmsfamily", "family") if (is_ordinal(out)) { threshold <- match.arg(threshold) out$threshold <- threshold } out } # get post-processing methods for custom families custom_family_method <- function(family, name) { if (!is.customfamily(family)) { return(NULL) } out <- family[[name]] if (!is.function(out)) { out <- paste0(name, "_", family$name) out <- get(out, family$env) } out } # get valid distributional parameters for a family valid_dpars <- function(family, ...) { UseMethod("valid_dpars") } #' @export valid_dpars.default <- function(family, ...) { if (!length(family)) { return("mu") } family <- validate_family(family) family_info(family, "dpars", ...) } #' @export valid_dpars.mixfamily <- function(family, ...) { out <- lapply(family$mix, valid_dpars, ...) for (i in seq_along(out)) { out[[i]] <- paste0(out[[i]], i) } c(unlist(out), paste0("theta", seq_along(out))) } #' @export valid_dpars.brmsformula <- function(family, ...) { valid_dpars(family$family, ...) } #' @export valid_dpars.mvbrmsformula <- function(family, ...) { ulapply(family$forms, valid_dpars, ...) } #' @export valid_dpars.brmsterms <- function(family, ...) { valid_dpars(family$family, ...) } #' @export valid_dpars.mvbrmsterms <- function(family, ...) { ulapply(family$terms, valid_dpars, ...) } #' @export valid_dpars.brmsfit <- function(family, ...) { valid_dpars(family$formula, ...) } # class of a distributional parameter dpar_class <- function(dpar, family = NULL) { out <- sub("[[:digit:]]*$", "", dpar) if (!is.null(family)) { # TODO: avoid this special case by changing naming conventions if (conv_cats_dpars(family) && grepl("^mu", out)) { # categorical-like models have non-integer suffixes # that will not be caught by the standard procedure out <- "mu" } } out } # id of a distributional parameter dpar_id <- function(dpar) { out <- get_matches("[[:digit:]]+$", dpar, simplify = FALSE) ulapply(out, function(x) ifelse(length(x), x, "")) } # link functions for distributional parameters links_dpars <- function(dpar) { if (!length(dpar)) dpar <- "" switch(dpar, character(0), mu = "identity", # not actually used sigma = c("log", "identity", "softplus", "squareplus"), shape = c("log", "identity", "softplus", "squareplus"), nu = c("logm1", "identity"), phi = c("log", "identity", "softplus", "squareplus"), kappa = c("log", "identity", "softplus", "squareplus"), beta = c("log", "identity", "softplus", "squareplus"), zi = c("logit", "identity"), hu = c("logit", "identity"), zoi = c("logit", "identity"), coi = c("logit", "identity"), disc = c("log", "identity", "softplus", "squareplus"), bs = c("log", "identity", "softplus", "squareplus"), ndt = c("log", "identity", "softplus", "squareplus"), bias = c("logit", "identity"), quantile = c("logit", "identity"), xi = c("log1p", "identity"), alpha = c("identity", "log", "softplus", "squareplus"), theta = c("identity") ) } # generate a family object of a distributional parameter dpar_family <- function(family, dpar, ...) { UseMethod("dpar_family") } #' @export dpar_family.default <- function(family, dpar, ...) { dp_class <- dpar_class(dpar, family) if (dp_class == "mu") { if (conv_cats_dpars(family)) { link <- NULL if (!has_joint_link(family)) { link <- family$link } # joint links are applied directly in the likelihood function # so link is treated as 'identity' out <- .dpar_family(dpar, link) } else { # standard single mu parameters just store the original family out <- family } } else { # link_ is always defined for non-mu parameters link <- family[[paste0("link_", dp_class)]] out <- .dpar_family(dpar, link) } out } #' @export dpar_family.mixfamily <- function(family, dpar, ...) { dp_id <- as.numeric(dpar_id(dpar)) if (!(length(dp_id) == 1L && is.numeric(dp_id))) { stop2("Parameter '", dpar, "' is not a valid mixture parameter.") } out <- dpar_family(family$mix[[dp_id]], dpar, ...) out$order <- family$order out } # set up special family objects for distributional parameters # @param dpar name of the distributional parameter # @param link optional link function of the parameter .dpar_family <- function(dpar = NULL, link = NULL) { links <- links_dpars(dpar_class(dpar)) if (!length(link)) { if (!length(links)) { link <- "identity" } else { link <- links[1] } } link <- as_one_character(link) structure( nlist(family = "", link, dpar), class = c("brmsfamily", "family") ) } #' @export print.brmsfamily <- function(x, links = FALSE, newline = TRUE, ...) { cat("\nFamily:", x$family, "\n") cat("Link function:", x$link, "\n") if (!is.null(x$threshold)) { cat("Threshold:", x$threshold, "\n") } if (isTRUE(links) || is.character(links)) { dp_links <- x[grepl("^link_", names(x))] names(dp_links) <- sub("^link_", "", names(dp_links)) if (is.character(links)) { dp_links <- rmNULL(dp_links[links]) } for (dp in names(dp_links)) { cat(paste0( "Link function of '", dp, "' (if predicted): ", dp_links[[dp]], "\n" )) } } if (newline) { cat("\n") } invisible(x) } #' @export print.mixfamily <- function(x, newline = TRUE, ...) { cat("\nMixture\n") for (i in seq_along(x$mix)) { print(x$mix[[i]], newline = FALSE, ...) } if (newline) { cat("\n") } invisible(x) } #' @export print.customfamily <- function(x, links = FALSE, newline = TRUE, ...) { cat("\nCustom family:", x$name, "\n") cat("Link function:", x$link, "\n") cat("Parameters:", paste0(x$dpars, collapse = ", "), "\n") if (isTRUE(links) || is.character(links)) { dp_links <- x[grepl("^link_", names(x))] names(dp_links) <- sub("^link_", "", names(dp_links)) if (is.character(links)) { dp_links <- rmNULL(dp_links[links]) } for (dp in names(dp_links)) { cat(paste0( "Link function of '", dp, "' (if predicted): ", dp_links[[dp]], "\n" )) } } if (newline) { cat("\n") } invisible(x) } #' @method summary family #' @export summary.family <- function(object, link = TRUE, ...) { out <- object$family if (link) { out <- paste0(out, "(", object$link, ")") } out } #' @method summary mixfamily #' @export summary.mixfamily <- function(object, link = FALSE, ...) { families <- ulapply(object$mix, summary, link = link, ...) paste0("mixture(", paste0(families, collapse = ", "), ")") } #' @method summary customfamily #' @export summary.customfamily <- function(object, link = TRUE, ...) { object$family <- object$name summary.family(object, link = link, ...) } summarise_families <- function(x) { # summary of families used in summary.brmsfit UseMethod("summarise_families") } #' @export summarise_families.mvbrmsformula <- function(x, ...) { out <- ulapply(x$forms, summarise_families, ...) paste0("MV(", paste0(out, collapse = ", "), ")") } #' @export summarise_families.brmsformula <- function(x, ...) { summary(x$family, link = FALSE, ...) } summarise_links <- function(x, ...) { # summary of link functions used in summary.brmsfit UseMethod("summarise_links") } #' @export summarise_links.mvbrmsformula <- function(x, wsp = 0, ...) { str_wsp <- collapse(rep(" ", wsp)) links <- ulapply(x$forms, summarise_links, mv = TRUE, ...) paste0(links, collapse = paste0("\n", str_wsp)) } #' @export summarise_links.brmsformula <- function(x, mv = FALSE, ...) { x <- brmsterms(x) dpars <- valid_dpars(x) links <- setNames(rep("identity", length(dpars)), dpars) links_pred <- ulapply(x$dpars, function(x) x$family$link) links[names(links_pred)] <- links_pred if (conv_cats_dpars(x)) { links[grepl("^mu", names(links))] <- x$family$link } resp <- if (mv) usc(combine_prefix(x)) names(links) <- paste0(names(links), resp) paste0(names(links), " = ", links, collapse = "; ") } is.family <- function(x) { inherits(x, "family") } is.brmsfamily <- function(x) { inherits(x, "brmsfamily") } is.mixfamily <- function(x) { inherits(x, "mixfamily") } is.customfamily <- function(x) { inherits(x, "customfamily") } family_names <- function(x) { family_info(x, "family") } # indicate if family uses real responses use_real <- function(family) { "real" %in% family_info(family, "type") } # indicate if family uses integer responses use_int <- function(family) { "int" %in% family_info(family, "type") } is_binary <- function(family) { "binary" %in% family_info(family, "specials") } is_categorical <- function(family) { "categorical" %in% family_info(family, "specials") } is_ordinal <- function(family) { "ordinal" %in% family_info(family, "specials") } is_multinomial <- function(family) { "multinomial" %in% family_info(family, "specials") } is_dirichlet <- function(family) { "dirichlet" %in% family_info(family, "specials") } is_polytomous <- function(family) { is_categorical(family) || is_ordinal(family) || is_multinomial(family) || is_dirichlet(family) } is_cox <- function(family) { "cox" %in% family_info(family, "specials") } # has joint link function over multiple inputs has_joint_link <- function(family) { "joint_link" %in% family_info(family, "specials") } allow_factors <- function(family) { specials <- c("binary", "categorical", "ordinal") any(specials %in% family_info(family, "specials")) } # check if the family has natural residuals has_natural_residuals <- function(family) { "residuals" %in% family_info(family, "specials") } # check if the family allows for residual correlations has_rescor <- function(family) { "rescor" %in% family_info(family, "specials") } # check if category specific effects are allowed allow_cs <- function(family) { any(c("cs", "ocs") %in% family_info(family, "specials")) } # check if category specific effects should be ordered needs_ordered_cs <- function(family) { "ocs" %in% family_info(family, "specials") } # choose dpar names based on categories? conv_cats_dpars <- function(family) { is_categorical(family) || is_multinomial(family) || is_dirichlet(family) } # check if mixtures of the given families are allowed no_mixture <- function(family) { is_categorical(family) || is_multinomial(family) || is_dirichlet(family) } # indicate if the response should consist of multiple columns has_multicol <- function(family) { is_multinomial(family) || is_dirichlet(family) } # indicate if the response is modeled on the log-scale # even if formally the link function is not 'log' has_logscale <- function(family) { "logscale" %in% family_info(family, "specials") } # indicate if family makes use of argument trials has_trials <- function(family) { "trials" %in% family_info(family, "ad") && !"custom" %in% family_names(family) } # indicate if family has more than two response categories has_cat <- function(family) { is_categorical(family) || is_multinomial(family) || is_dirichlet(family) } # indicate if family has thresholds has_thres <- function(family) { is_ordinal(family) } # indicate if family has equidistant thresholds has_equidistant_thres <- function(family) { "equidistant" %in% family_info(family, "threshold") } # indicate if family has sum-to-zero thresholds has_sum_to_zero_thres <- function(family) { "sum_to_zero" %in% family_info(family, "threshold") } # indicate if family has ordered thresholds has_ordered_thres <- function(family) { "ordered_thres" %in% family_info(family, "specials") } # compute threshold - eta in the likelihood has_thres_minus_eta <- function(family) { "thres_minus_eta" %in% family_info(family, "specials") } # compute eta - threshold in the likelihood has_eta_minus_thres <- function(family) { "eta_minus_thres" %in% family_info(family, "specials") } # get names of response categories # @param group name of a group for which to extract categories get_cats <- function(family) { family_info(family, "cats") } # get names of ordinal thresholds for prior specification # @param group name of a group for which to extract categories get_thres <- function(family, group = "") { group <- as_one_character(group) thres <- family_info(family, "thres") subset2(thres, group = group)$thres } # get group names of ordinal thresholds get_thres_groups <- function(family) { thres <- family_info(family, "thres") unique(thres$group) } # has the model group specific thresholds? has_thres_groups <- function(family) { groups <- get_thres_groups(family) any(nzchar(groups)) } has_ndt <- function(family) { "ndt" %in% dpar_class(family_info(family, "dpars")) } has_sigma <- function(family) { "sigma" %in% dpar_class(family_info(family, "dpars")) } # check if sigma should be explicitely set to 0 no_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) if (is.formula(bterms$adforms$se)) { se <- eval_rhs(bterms$adforms$se) se_only <- isFALSE(se$flags$sigma) if (se_only && use_ac_cov_time(bterms)) { stop2("Please set argument 'sigma' of function 'se' ", "to TRUE when modeling time-series covariance matrices.") } } else { se_only <- FALSE } se_only } # has the model a non-predicted but estimated sigma parameter? simple_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) has_sigma(bterms) && !no_sigma(bterms) && !pred_sigma(bterms) } # has the model a predicted sigma parameter? pred_sigma <- function(bterms) { stopifnot(is.brmsterms(bterms)) "sigma" %in% dpar_class(names(bterms$dpars)) } # do not include a 'nu' parameter in a univariate model? no_nu <- function(bterms) { # the multi_student_t family only has a single 'nu' parameter isTRUE(bterms$rescor) && "student" %in% family_names(bterms) } # does the family-link combination have a built-in Stan function? has_built_in_fun <- function(family, link = NULL, dpar = NULL, cdf = FALSE) { link <- link %||% family$link glm_special <- paste0("sbi", usc(dpar), "_", link, str_if(cdf, "_cdf")) all(glm_special %in% family_info(family, "specials")) } # suffixes of Stan lpdfs or lpmfs for which only a normalized version exists always_normalized <- function(family) { family_info(family, "normalized") } # prepare for calling family specific post-processing functions prepare_family <- function(x) { stopifnot(is.brmsformula(x) || is.brmsterms(x)) family <- x$family acef <- tidy_acef(x) if (use_ac_cov_time(acef) && has_natural_residuals(x)) { family$fun <- paste0(family$family, "_time") } else if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") if (has_ac_subset(acef_sar, type = "lag")) { family$fun <- paste0(family$family, "_lagsar") } else if (has_ac_subset(acef_sar, type = "error")) { family$fun <- paste0(family$family, "_errorsar") } } else if (has_ac_class(acef, "fcor")) { family$fun <- paste0(family$family, "_fcor") } else { family$fun <- family$family } family } # order intercepts to help identifying mixture components? # does not work in ordinal models as they have vectors of intercepts order_intercepts <- function(bterms) { dpar <- dpar_class(bterms[["dpar"]]) if (!length(dpar)) dpar <- "mu" isTRUE(!is_ordinal(bterms) && dpar %in% bterms$family[["order"]]) } # fix intercepts to help identifying mixture components? # currently enabled only in ordinal models fix_intercepts <- function(bterms) { dpar <- dpar_class(bterms[["dpar"]]) if (!length(dpar)) dpar <- "mu" isTRUE(is_ordinal(bterms) && dpar %in% bterms$family[["order"]]) } # does the mixture have a joint parameter vector 'theta' has_joint_theta <- function(bterms) { stopifnot(is.brmsterms(bterms)) is.mixfamily(bterms$family) && !"theta" %in% dpar_class(names(c(bterms$dpars, bterms$fdpars))) } # extract family boundaries family_bounds <- function(x, ...) { UseMethod("family_bounds") } # @return a named list with one element per response variable #' @export family_bounds.mvbrmsterms <- function(x, ...) { lapply(x$terms, family_bounds, ...) } # bounds of likelihood families # @return a list with elements 'lb' and 'ub' #' @export family_bounds.brmsterms <- function(x, ...) { family <- x$family$family if (is.null(family)) { return(list(lb = -Inf, ub = Inf)) } resp <- usc(x$resp) # TODO: define in family-lists.R pos_families <- c( "poisson", "negbinomial", "negbinomial2", "geometric", "gamma", "weibull", "exponential", "lognormal", "frechet", "inverse.gaussian", "hurdle_poisson", "hurdle_negbinomial", "hurdle_gamma", "hurdle_lognormal", "zero_inflated_poisson", "zero_inflated_negbinomial" ) beta_families <- c("beta", "zero_inflated_beta", "zero_one_inflated_beta") ordinal_families <- c("cumulative", "cratio", "sratio", "acat") if (family %in% pos_families) { out <- list(lb = 0, ub = Inf) } else if (family %in% c("bernoulli", beta_families)) { out <- list(lb = 0, ub = 1) } else if (family %in% c("categorical", ordinal_families)) { out <- list(lb = 1, ub = paste0("ncat", resp)) } else if (family %in% c("binomial", "zero_inflated_binomial")) { out <- list(lb = 0, ub = paste0("trials", resp)) } else if (family %in% "von_mises") { out <- list(lb = -pi, ub = pi) } else if (family %in% c("wiener", "shifted_lognormal")) { out <- list(lb = paste("min_Y", resp), ub = Inf) } else { out <- list(lb = -Inf, ub = Inf) } out } brms/R/brm_multiple.R0000644000175000017500000002150514126515671014401 0ustar nileshnilesh#' Run the same \pkg{brms} model on multiple datasets #' #' Run the same \pkg{brms} model on multiple datasets and then combine the #' results into one fitted model object. This is useful in particular for #' multiple missing value imputation, where the same model is fitted on multiple #' imputed data sets. Models can be run in parallel using the \pkg{future} #' package. #' #' @inheritParams brm #' @param data A \emph{list} of data.frames each of which will be used to fit a #' separate model. Alternatively, a \code{mids} object from the \pkg{mice} #' package. #' @param data2 A \emph{list} of named lists each of which will be used to fit a #' separate model. Each of the named lists contains objects representing data #' which cannot be passed via argument \code{data} (see \code{\link{brm}} for #' examples). The length of the outer list should match the length of the list #' passed to the \code{data} argument. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled for every imputed data set. Defaults to \code{FALSE}. If #' \code{NULL}, \code{brm_multiple} tries to figure out internally, if recompilation #' is necessary, for example because data-dependent priors have changed. #' Using the default of no recompilation should be fine in most cases. #' @param combine Logical; Indicates if the fitted models should be combined #' into a single fitted model object via \code{\link{combine_models}}. #' Defaults to \code{TRUE}. #' @param fit An instance of S3 class \code{brmsfit_multiple} derived from a #' previous fit; defaults to \code{NA}. If \code{fit} is of class #' \code{brmsfit_multiple}, the compiled model associated with the fitted #' result is re-used and all arguments modifying the model code or data are #' ignored. It is not recommended to use this argument directly, but to call #' the \code{\link[brms:update.brmsfit_multiple]{update}} method, instead. #' @param ... Further arguments passed to \code{\link{brm}}. #' #' @details The combined model may issue false positive convergence warnings, as #' the MCMC chains corresponding to different datasets may not necessarily #' overlap, even if each of the original models did converge. To find out #' whether each of the original models converged, investigate #' \code{fit$rhats}, where \code{fit} denotes the output of #' \code{brm_multiple}. #' #' @return If \code{combine = TRUE} a \code{brmsfit_multiple} object, which #' inherits from class \code{brmsfit} and behaves essentially the same. If #' \code{combine = FALSE} a list of \code{brmsfit} objects. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' \dontrun{ #' library(mice) #' imp <- mice(nhanes2) #' #' # fit the model using mice and lm #' fit_imp1 <- with(lm(bmi ~ age + hyp + chl), data = imp) #' summary(pool(fit_imp1)) #' #' # fit the model using brms #' fit_imp2 <- brm_multiple(bmi ~ age + hyp + chl, data = imp, chains = 1) #' summary(fit_imp2) #' plot(fit_imp2, pars = "^b_") #' # investigate convergence of the original models #' fit_imp2$rhats #' #' # use the future package for parallelization #' library(future) #' plan(multiprocess) #' fit_imp3 <- brm_multiple(bmi~age+hyp+chl, data = imp, chains = 1) #' summary(fit_imp3) #' } #' #' @export brm_multiple <- function(formula, data, family = gaussian(), prior = NULL, data2 = NULL, autocor = NULL, cov_ranef = NULL, sample_prior = c("no", "yes", "only"), sparse = NULL, knots = NULL, stanvars = NULL, stan_funs = NULL, silent = 1, recompile = FALSE, combine = TRUE, fit = NA, seed = NA, file = NULL, file_refit = "never", ...) { combine <- as_one_logical(combine) file_refit <- match.arg(file_refit, file_refit_options()) if (!is.null(file)) { if (file_refit == "on_change") { stop2("file_refit = 'on_change' is not supported for brm_multiple yet.") } # optionally load saved model object if (!combine) { stop2("Cannot use 'file' if 'combine' is FALSE.") } if (file_refit != "always") { fits <- read_brmsfit(file) if (!is.null(fits)) { return(fits) } } } silent <- validate_silent(silent) recompile <- as_one_logical(recompile) data_name <- substitute_name(data) if (inherits(data, "mids")) { require_package("mice", version = "3.0.0") data <- lapply(seq_len(data$m), mice::complete, data = data) } else if (!is_data_list(data)) { stop2("'data' must be a list of data.frames.") } if (!is.null(data2)) { if (!is_data2_list(data2)) { stop2("'data2' must be a list of named lists.") } if (length(data2) != length(data)) { stop2("'data2' must have the same length as 'data'.") } } if (is.brmsfit(fit)) { # avoid complications when updating the model class(fit) <- setdiff(class(fit), "brmsfit_multiple") } else { args <- nlist( formula, data = data[[1]], family, prior, data2 = data2[[1]], autocor, cov_ranef, sample_prior, sparse, knots, stanvars, stan_funs, silent, seed, ... ) args$chains <- 0 if (silent < 2) { message("Compiling the C++ model") } fit <- suppressMessages(do_call(brm, args)) } dots <- list(...) # allow compiling the model without sampling (#671) if (isTRUE(dots$chains == 0) || isTRUE(dots$iter == 0)) { class(fit) <- c("brmsfit_multiple", class(fit)) return(fit) } fits <- futures <- rhats <- vector("list", length(data)) for (i in seq_along(data)) { futures[[i]] <- future::future( update(fit, newdata = data[[i]], data2 = data2[[i]], recompile = recompile, silent = silent, ...), packages = "brms", seed = TRUE ) } for (i in seq_along(data)) { if (silent < 2) { message("Fitting imputed model ", i) } fits[[i]] <- future::value(futures[[i]]) rhats[[i]] <- data.frame(as.list(rhat(fits[[i]]))) if (any(rhats[[i]] > 1.1, na.rm = TRUE)) { warning2("Imputed model ", i, " did not converge.") } } if (combine) { fits <- combine_models(mlist = fits, check_data = FALSE) attr(fits$data, "data_name") <- data_name fits$rhats <- do_call(rbind, rhats) class(fits) <- c("brmsfit_multiple", class(fits)) } if (!is.null(file)) { fits <- write_brmsfit(fits, file) } fits } #' Combine Models fitted with \pkg{brms} #' #' Combine multiple \code{brmsfit} objects, which fitted the same model. #' This is usefully for instance when having manually run models in parallel. #' #' @param ... One or more \code{brmsfit} objects. #' @param mlist Optional list of one or more \code{brmsfit} objects. #' @param check_data Logical; indicates if the data should be checked #' for being the same across models (defaults to \code{TRUE}). #' Setting it to \code{FALSE} may be useful for instance #' when combining models fitted on multiple imputed data sets. #' #' @details This function just takes the first model and replaces #' its \code{stanfit} object (slot \code{fit}) by the combined #' \code{stanfit} objects of all models. #' #' @return A \code{brmsfit} object. #' #' @export combine_models <- function(..., mlist = NULL, check_data = TRUE) { models <- c(list(...), mlist) check_data <- as_one_logical(check_data) if (!length(models)) { stop2("No models supplied to 'combine_models'.") } for (i in seq_along(models)) { if (!is.brmsfit(models[[i]])) { stop2("Model ", i, " is no 'brmsfit' object.") } models[[i]] <- restructure(models[[i]]) } ref_formula <- formula(models[[1]]) ref_pars <- variables(models[[1]]) ref_mf <- model.frame(models[[1]]) for (i in seq_along(models)[-1]) { if (!is_equal(formula(models[[i]]), ref_formula)) { stop2("Models 1 and ", i, " have different formulas.") } if (!is_equal(variables(models[[i]]), ref_pars)) { stop2("Models 1 and ", i, " have different parameters.") } if (check_data && !is_equal(model.frame(models[[i]]), ref_mf)) { stop2( "Models 1 and ", i, " have different data. ", "Set 'check_data' to FALSE to turn off checking of the data." ) } } sflist <- lapply(models, "[[", "fit") models[[1]]$fit <- rstan::sflist2stanfit(sflist) models[[1]] } # validity check for 'data' input of 'brm_multiple' is_data_list <- function(x) { is.list(x) && is.vector(x) } # validity check for 'data2' input of 'brm_multiple' is_data2_list <- function(x) { is.list(x) && all(ulapply(x, function(y) is.list(y) && is_named(y))) } warn_brmsfit_multiple <- function(x, newdata = NULL) { if (is.brmsfit_multiple(x) && is.null(newdata)) { warning2( "Using only the first imputed data set. Please interpret the results ", "with caution until a more principled approach has been implemented." ) } invisible(x) } brms/R/lsp.R0000644000175000017500000000372014010776133012475 0ustar nileshnilesh# find all namespace entries of a package, which are of # a particular type for instance all exported objects # retrieved from https://github.com/raredd/rawr # @param package the package name # @param what type of the objects to retrieve ("all" for all objects) # @param pattern regex that must be matches by the object names # @return a character vector of object names lsp <- function(package, what = "all", pattern = ".*") { if (!is.character(substitute(package))) package <- deparse(substitute(package)) ns <- asNamespace(package) ## base package does not have NAMESPACE if (isBaseNamespace(ns)) { res <- ls(.BaseNamespaceEnv, all.names = TRUE) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } else { ## for non base packages if (exists('.__NAMESPACE__.', envir = ns, inherits = FALSE)) { wh <- get('.__NAMESPACE__.', inherits = FALSE, envir = asNamespace(package, base.OK = FALSE)) what <- if (missing(what)) 'all' else if ('?' %in% what) return(ls(wh)) else ls(wh)[pmatch(what[1], ls(wh))] if (!is.null(what) && !any(what %in% c('all', ls(wh)))) stop('\'what\' should be one of ', paste0(shQuote(ls(wh)), collapse = ', '), ', or \'all\'', domain = NA) res <- sapply(ls(wh), function(x) getNamespaceInfo(ns, x)) res <- rapply(res, ls, classes = 'environment', how = 'replace', all.names = TRUE) if (is.null(what)) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) if (what %in% 'all') { res <- ls(getNamespace(package), all.names = TRUE) return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } if (any(what %in% ls(wh))) { res <- res[[what]] return(res[grep(pattern, res, perl = TRUE, ignore.case = TRUE)]) } } else stop(sprintf('no NAMESPACE file found for package %s', package)) } } brms/R/formula-sm.R0000644000175000017500000000621613701270367013770 0ustar nileshnilesh# This file contains functions dealing with the extended # formula syntax to specify smooth terms via mgcv #' Defining smooths in \pkg{brms} formulas #' #' Functions used in definition of smooth terms within a model formulas. #' The function does not evaluate a (spline) smooth - it exists purely #' to help set up a model using spline based smooths. #' #' @param ... Arguments passed to \code{\link[mgcv:s]{mgcv::s}} or #' \code{\link[mgcv:t2]{mgcv::t2}}. #' #' @details The function defined here are just simple wrappers #' of the respective functions of the \pkg{mgcv} package. #' #' @seealso \code{\link{brmsformula}}, #' \code{\link[mgcv:s]{mgcv::s}}, \code{\link[mgcv:t2]{mgcv::t2}} #' #' @examples #' \dontrun{ #' # simulate some data #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' #' # fit univariate smooths for all predictors #' fit1 <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), #' data = dat, chains = 2) #' summary(fit1) #' plot(conditional_smooths(fit1), ask = FALSE) #' #' # fit a more complicated smooth model #' fit2 <- brm(y ~ t2(x0, x1) + s(x2, by = x3), #' data = dat, chains = 2) #' summary(fit2) #' plot(conditional_smooths(fit2), ask = FALSE) #' } #' #' @export s <- function(...) { mgcv::s(...) } #' @rdname s #' @export t2 <- function(...) { mgcv::t2(...) } # extract information about smooth terms # @param x either a formula or a list containing an element "sm" # @param data data.frame containing the covariates tidy_smef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["sm"]] if (!is.formula(form)) { return(empty_data_frame()) } out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- nrow(out) out$sfun <- get_matches("^[^\\(]+", out$term) out$vars <- out$byvars <- out$covars <- vector("list", nterms) for (i in seq_len(nterms)) { sm <- eval2(out$term[i]) out$covars[[i]] <- sm$term if (sm$by != "NA") { out$byvars[[i]] <- sm$by } out$vars[[i]] <- c(out$covars[[i]], out$byvars[[i]]) } out$label <- paste0(out$sfun, rename(ulapply(out$vars, collapse))) # prepare information inferred from the data sdata <- data_sm(x, data) bylevels <- attr(sdata$Xs, "bylevels") nby <- lengths(bylevels) tmp <- vector("list", nterms) for (i in seq_len(nterms)) { tmp[[i]] <- out[i, , drop = FALSE] tmp[[i]]$termnum <- i if (nby[i] > 0L) { tmp[[i]] <- do_call(rbind, repl(tmp[[i]], nby[i])) tmp[[i]]$bylevel <- rm_wsp(bylevels[[i]]) tmp[[i]]$byterm <- paste0(tmp[[i]]$term, tmp[[i]]$bylevel) str_add(tmp[[i]]$label) <- rename(tmp[[i]]$bylevel) } else { tmp[[i]]$bylevel <- NA tmp[[i]]$byterm <- tmp[[i]]$term } } out <- do_call(rbind, tmp) out$knots <- sdata[grepl("^knots_", names(sdata))] out$nbases <- lengths(out$knots) attr(out, "Xs_names") <- colnames(sdata$Xs) rownames(out) <- NULL out } # check if smooths are present in the model has_smooths <- function(bterms) { length(get_effect(bterms, target = "sm")) > 0L } brms/R/misc.R0000644000175000017500000007574114116432047012646 0ustar nileshnilesh# type-stable indexing of vector and matrix type objects # @param x an R object typically a vector or matrix # @param i optional index; if NULL, x is returned unchanged # @param row indicating if rows or cols should be indexed # only relevant if x has two or three dimensions p <- function(x, i = NULL, row = TRUE) { # TODO: replace by "slice" if (isTRUE(length(dim(x)) > 3L)) { stop2("'p' can only handle objects up to 3 dimensions.") } if (!length(i)) { out <- x } else if (length(dim(x)) == 2L) { if (row) { out <- x[i, , drop = FALSE] } else { out <- x[, i, drop = FALSE] } } else if (length(dim(x)) == 3L) { if (row) { out <- x[i, , , drop = FALSE] } else { out <- x[, i, , drop = FALSE] } } else { out <- x[i] } out } # extract parts of an object with selective dropping of dimensions # @param x,...,drop same as in x[..., drop] # @drop_dim: Optional numeric or logical vector controlling # which dimensions to drop. Will overwrite argument 'drop'. extract <- function(x, ..., drop = FALSE, drop_dim = NULL) { if (!length(dim(x))) { return(x[...]) } if (length(drop_dim)) { drop <- FALSE } else { drop <- as_one_logical(drop) } out <- x[..., drop = drop] if (drop || !length(drop_dim) || any(dim(out) == 0L)) { return(out) } if (is.numeric(drop_dim)) { drop_dim <- seq_along(dim(x)) %in% drop_dim } if (!is.logical(drop_dim)) { stop2("'drop_dim' needs to be logical or numeric.") } keep <- dim(out) > 1L | !drop_dim new_dim <- dim(out)[keep] if (length(new_dim) == 1L) { # use vectors instead of 1D arrays new_dim <- NULL } dim(out) <- new_dim out } # extract slices of one array dimension without dropping other dimensions # @param x an array # @param dim dimension from which to take the slice # @param i slice index # @param drop Logical (length 1) indicating whether to drop dimension `dim`. slice <- function(x, dim, i, drop = TRUE) { ndim <- length(dim(x)) commas1 <- collapse(rep(", ", dim - 1)) commas2 <- collapse(rep(", ", ndim - dim)) drop_dim <- ifelse(drop, ", drop_dim = dim", "") expr <- paste0("extract(x, ", commas1, "i", commas2, drop_dim, ")") eval2(expr) } # slice out columns without dropping other dimensions # @param x an array; a vector or 1D array is treated as already sliced # @param i column index slice_col <- function(x, i) { if (length(dim(x)) < 2L) { # a vector or 1D array is treated as already sliced return(x) } slice(x, 2, i) } seq_rows <- function(x) { seq_len(NROW(x)) } seq_cols <- function(x) { seq_len(NCOL(x)) } seq_dim <- function(x, dim) { dim <- as_one_numeric(dim) if (dim == 1) { len <- NROW(x) } else if (dim == 2) { len <- NCOL(x) } else { len <- dim(x)[dim] } if (length(len) == 1L && !isNA(len)) { out <- seq_len(len) } else { out <- integer(0) } out } # match rows in x with rows in y match_rows <- function(x, y, ...) { x <- as.data.frame(x) y <- as.data.frame(y) x <- do.call("paste", c(x, sep = "\r")) y <- do.call("paste", c(y, sep = "\r")) match(x, y, ...) } # find elements of 'x' matching sub-elements passed via 'ls' and '...' find_elements <- function(x, ..., ls = list(), fun = '%in%') { x <- as.list(x) if (!length(x)) { return(logical(0)) } out <- rep(TRUE, length(x)) ls <- c(ls, list(...)) if (!length(ls)) { return(out) } if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } for (name in names(ls)) { tmp <- lapply(x, "[[", name) out <- out & do_call(fun, list(tmp, ls[[name]])) } out } # find rows of 'x' matching columns passed via 'ls' and '...' # similar to 'find_elements' but for matrix like objects find_rows <- function(x, ..., ls = list(), fun = '%in%') { x <- as.data.frame(x) if (!nrow(x)) { return(logical(0)) } out <- rep(TRUE, nrow(x)) ls <- c(ls, list(...)) if (!length(ls)) { return(out) } if (is.null(names(ls))) { stop("Argument 'ls' must be named.") } for (name in names(ls)) { out <- out & do_call(fun, list(x[[name]], ls[[name]])) } out } # subset 'x' using arguments passed via 'ls' and '...' subset2 <- function(x, ..., ls = list(), fun = '%in%') { x[find_rows(x, ..., ls = ls, fun = fun), , drop = FALSE] } # convert array to list of elements with reduced dimension # @param x an arrary of dimension d # @return a list of arrays of dimension d-1 array2list <- function(x) { if (is.null(dim(x))) { return(as.list(x)) } ndim <- length(dim(x)) out <- list(length = dim(x)[ndim]) ind <- collapse(rep(",", ndim - 1)) for (i in seq_len(dim(x)[ndim])) { out[[i]] <- eval2(paste0("x[", ind, i, "]")) if (length(dim(x)) > 2) { # avoid accidental dropping of other dimensions dim(out[[i]]) <- dim(x)[-ndim] } } names(out) <- dimnames(x)[[ndim]] out } # move elements to the start of a named object move2start <- function(x, first) { x[c(first, setdiff(names(x), first))] } # wrapper around replicate but without simplifying repl <- function(expr, n) { replicate(n, expr, simplify = FALSE) } # find the first element in A that is greater than target # @param A a matrix # @param target a vector of length nrow(A) # @param i column of A being checked first # @return a vector of the same length as target containing the # column ids where A[,i] was first greater than target first_greater <- function(A, target, i = 1) { ifelse(target <= A[, i] | ncol(A) == i, i, first_greater(A, target, i + 1)) } # check if an object is NULL isNULL <- function(x) { is.null(x) || ifelse(is.vector(x), all(sapply(x, is.null)), FALSE) } # recursively removes NULL entries from an object rmNULL <- function(x, recursive = TRUE) { x <- Filter(Negate(isNULL), x) if (recursive) { x <- lapply(x, function(x) if (is.list(x)) rmNULL(x) else x) } x } # find the first argument that is not NULL first_not_null <- function(...) { dots <- list(...) out <- NULL i <- 1L while (isNULL(out) && i <= length(dots)) { if (!isNULL(dots[[i]])) { out <- dots[[i]] } i <- i + 1L } out } isNA <- function(x) { length(x) == 1L && is.na(x) } is_equal <- function(x, y, check.attributes = FALSE, ...) { isTRUE(all.equal(x, y, check.attributes = check.attributes, ...)) } # check if 'x' will behave like a factor in design matrices is_like_factor <- function(x) { is.factor(x) || is.character(x) || is.logical(x) } # as.factor but allows to pass levels as_factor <- function(x, levels = NULL) { if (is.null(levels)) { out <- as.factor(x) } else { out <- factor(x, levels = levels) } out } # coerce 'x' to a single logical value as_one_logical <- function(x, allow_na = FALSE) { s <- substitute(x) x <- as.logical(x) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse_combine(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single logical value.") } x } # coerce 'x' to a single integer value as_one_integer <- function(x, allow_na = FALSE) { s <- substitute(x) x <- SW(as.integer(x)) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse_combine(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single integer value.") } x } # coerce 'x' to a single numeric value as_one_numeric <- function(x, allow_na = FALSE) { s <- substitute(x) x <- SW(as.numeric(x)) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse_combine(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single numeric value.") } x } # coerce 'x' to a single character string as_one_character <- function(x, allow_na = FALSE) { s <- substitute(x) x <- as.character(x) if (length(x) != 1L || anyNA(x) && !allow_na) { s <- deparse_combine(s, max_char = 100L) stop2("Cannot coerce '", s, "' to a single character value.") } x } # coerce 'x' to a single character variable name as_one_variable <- function(x, allow_na = TRUE) { x <- as_one_character(x) if (x == "NA" && allow_na) { return(x) } if (!nzchar(x) || !is_equal(x, all_vars(x))) { stop2("Cannot coerce '", x, "' to a single variable name.") } x } has_rows <- function(x) { isTRUE(nrow(x) > 0L) } has_cols <- function(x) { isTRUE(ncol(x) > 0L) } # expand arguments to the same length # @param ... arguments to expand # @param length optional expansion length # otherwise taken to be the largest supplied length # @return a data.frame with one variable per element in '...' expand <- function(..., dots = list(), length = NULL) { dots <- c(dots, list(...)) max_dim <- NULL if (is.null(length)) { lengths <- lengths(dots) length <- max(lengths) max_dim <- dim(dots[[match(length, lengths)]]) } out <- as.data.frame(lapply(dots, rep, length.out = length)) structure(out, max_dim = max_dim) } # structure but ignore NULL structure_not_null <- function(.Data, ...) { if (!is.null(.Data)) { .Data <- structure(.Data, ...) } .Data } # remove specified attributes rm_attr <- function(x, attr) { attributes(x)[attr] <- NULL x } # unidimensional subsetting while keeping attributes subset_keep_attr <- function(x, y) { att <- attributes(x) x <- x[y] att$names <- names(x) attributes(x) <- att x } '%||%' <- function(x, y) { if (is.null(x)) x <- y x } # check if 'x' is a whole number (integer) is_wholenumber <- function(x, tol = .Machine$double.eps) { if (is.numeric(x)) { out <- abs(x - round(x)) < tol } else { out <- rep(FALSE, length(x)) } dim(out) <- dim(x) out } # helper function to check symmetry of a matrix is_symmetric <- function(x, tol = sqrt(.Machine$double.eps)) { isSymmetric(x, tol = tol, check.attributes = FALSE) } # unlist lapply output ulapply <- function(X, FUN, ..., recursive = TRUE, use.names = TRUE) { unlist(lapply(X, FUN, ...), recursive, use.names) } # rbind lapply output rblapply <- function(X, FUN, ...) { do.call(rbind, lapply(X, FUN, ...)) } # cbind lapply output cblapply <- function(X, FUN, ...) { do.call(cbind, lapply(X, FUN, ...)) } # parallel lapply sensitive to the operating system plapply <- function(X, FUN, cores = 1, ...) { if (cores == 1) { out <- lapply(X, FUN, ...) } else { if (!os_is_windows()) { out <- parallel::mclapply(X = X, FUN = FUN, mc.cores = cores, ...) } else { cl <- parallel::makePSOCKcluster(cores) on.exit(parallel::stopCluster(cl)) out <- parallel::parLapply(cl = cl, X = X, fun = FUN, ...) } } out } # check if the operating system is Windows os_is_windows <- function() { isTRUE(Sys.info()[['sysname']] == "Windows") } # find variables in a character string or expression all_vars <- function(expr, ...) { if (is.character(expr)) { expr <- str2expression(expr) } all.vars(expr, ...) } # reimplemented for older R versions # see ?parse in R 3.6 or higher str2expression <- function(x) { parse(text = x, keep.source = FALSE) } # reimplemented for older R versions # see ?parse in R 3.6 or higher str2lang <- function(x) { str2expression(x)[[1]] } # append list(...) to x lc <- function(x, ...) { dots <- rmNULL(list(...), recursive = FALSE) c(x, dots) } 'c<-' <- function(x, value) { c(x, value) } 'lc<-' <- function(x, value) { lc(x, value) } collapse <- function(..., sep = "") { paste(..., sep = sep, collapse = "") } collapse_comma <- function(...) { paste0("'", ..., "'", collapse = ", ") } # add characters to an existing string 'str_add<-' <- function(x, start = FALSE, value) { if (start) paste0(value, x) else paste0(x, value) } # add list of characters to an existing list 'str_add_list<-' <- function(x, start = FALSE, value) { stopifnot(is.list(x), is.list(value)) out <- if (start) list(value, x) else list(x, value) collapse_lists(ls = out) } # type-stable if clause for strings with default else output str_if <- function(cond, yes, no = "") { cond <- as_one_logical(cond) if (cond) as.character(yes) else as.character(no) } # select elements which match a regex pattern str_subset <- function(x, pattern, ...) { x[grepl(pattern, x, ...)] } # similar to glue::glue but specialized for generating Stan code glue <- function(..., sep = "", collapse = NULL, envir = parent.frame(), open = "{", close = "}", na = "NA") { dots <- list(...) dots <- dots[lengths(dots) > 0L] args <- list( .x = NULL, .sep = sep, .envir = envir, .open = open, .close = close, .na = na, .trim = FALSE, .transformer = zero_length_transformer ) out <- do.call(glue::glue_data, c(dots, args)) if (!is.null(collapse)) { collapse <- as_one_character(collapse) out <- paste0(out, collapse = collapse) } out } # used in 'glue' to handle zero-length inputs zero_length_transformer <- function(text, envir) { out <- glue::identity_transformer(text, envir) if (!length(out)) { out <- "" } out } # collapse strings evaluated with glue cglue <- function(..., envir = parent.frame()) { glue(..., envir = envir, collapse = "") } # check if a certain package is installed # @param package package name # @param version optional minimal version number to require require_package <- function(package, version = NULL) { if (!requireNamespace(package, quietly = TRUE)) { stop2("Please install the '", package, "' package.") } if (!is.null(version)) { version <- as.package_version(version) if (utils::packageVersion(package) < version) { stop2("Please install package '", package, "' version ", version, " or higher.") } } invisible(TRUE) } # rename specified patterns in a character vector # @param x a character vector to be renamed # @param pattern the regular expressions in x to be replaced # @param replacement the replacements # @param fixed same as for 'gsub' # @param check_dup: logical; check for duplications in x after renaming # @param ... passed to 'gsub' # @return renamed character vector of the same length as x rename <- function(x, pattern = NULL, replacement = NULL, fixed = TRUE, check_dup = FALSE, ...) { pattern <- as.character(pattern) replacement <- as.character(replacement) if (!length(pattern) && !length(replacement)) { # default renaming to avoid special characters in coeffcient names pattern <- c( " ", "(", ")", "[", "]", ",", "\"", "'", "?", "+", "-", "*", "/", "^", "=" ) replacement <- c(rep("", 9), "P", "M", "MU", "D", "E", "EQ") } if (length(replacement) == 1L) { replacement <- rep(replacement, length(pattern)) } stopifnot(length(pattern) == length(replacement)) # avoid zero-length pattern error has_chars <- nzchar(pattern) pattern <- pattern[has_chars] replacement <- replacement[has_chars] out <- x for (i in seq_along(pattern)) { out <- gsub(pattern[i], replacement[i], out, fixed = fixed, ...) } dup <- duplicated(out) if (check_dup && any(dup)) { dup <- x[out %in% out[dup]] stop2("Internal renaming led to duplicated names. \n", "Occured for: ", collapse_comma(dup)) } out } # collapse strings having the same name in different lists # @param ... named lists # @param ls a list of named lists # @param a named list containing the collapsed strings collapse_lists <- function(..., ls = list()) { ls <- c(list(...), ls) elements <- unique(unlist(lapply(ls, names))) args <- c(FUN = collapse, lapply(ls, "[", elements), SIMPLIFY = FALSE) out <- do.call(mapply, args) names(out) <- elements out } # create a named list using object names nlist <- function(...) { m <- match.call() dots <- list(...) no_names <- is.null(names(dots)) has_name <- if (no_names) FALSE else nzchar(names(dots)) if (all(has_name)) return(dots) nms <- as.character(m)[-1] if (no_names) { names(dots) <- nms } else { names(dots)[!has_name] <- nms[!has_name] } dots } # initialize a named list # @param names names of the elements # @param values optional values of the elements named_list <- function(names, values = NULL) { if (!is.null(values)) { if (length(values) <= 1L) { values <- replicate(length(names), values) } values <- as.list(values) stopifnot(length(values) == length(names)) } else { values <- vector("list", length(names)) } setNames(values, names) } # is an object named? is_named <- function(x) { names <- names(x) if (is.null(names)) { return(FALSE) } if (any(!nzchar(names) | is.na(names))) { return(FALSE) } TRUE } #' Execute a Function Call #' #' Execute a function call similar to \code{\link{do.call}}, but without #' deparsing function arguments. For large number of arguments (i.e., more #' than a few thousand) this function currently is somewhat inefficient #' and should be used with care in this case. #' #' @param what Either a function or a non-empty character string naming the #' function to be called. #' @param args A list of arguments to the function call. The names attribute of #' \code{args} gives the argument names. #' @param pkg Optional name of the package in which to search for the #' function if \code{what} is a character string. #' @param envir An environment within which to evaluate the call. #' #' @return The result of the (evaluated) function call. #' #' @keywords internal #' @export do_call <- function(what, args, pkg = NULL, envir = parent.frame()) { call <- "" if (length(args)) { if (!is.list(args)) { stop2("'args' must be a list.") } fun_args <- names(args) if (is.null(fun_args)) { fun_args <- rep("", length(args)) } else { nzc <- nzchar(fun_args) fun_args[nzc] <- paste0("`", fun_args[nzc], "` = ") } names(args) <- paste0(".x", seq_along(args)) call <- paste0(fun_args, names(args), collapse = ",") } else { args <- list() } if (is.function(what)) { args$.fun <- what what <- ".fun" } else { what <- paste0("`", as_one_character(what), "`") if (!is.null(pkg)) { what <- paste0(as_one_character(pkg), "::", what) } } call <- paste0(what, "(", call, ")") eval2(call, envir = args, enclos = envir) } # create an empty data frame empty_data_frame <- function() { as.data.frame(matrix(nrow = 0, ncol = 0)) } # replace elements in x with elements in value # @param x named list-like object # @param value another named list-like object # @param dont_replace names of elements that cannot be replaced 'replace_args<-' <- function(x, dont_replace = NULL, value) { value_name <- deparse_combine(substitute(value), max_char = 100L) value <- as.list(value) if (length(value) && is.null(names(value))) { stop2("Argument '", value_name, "' must be named.") } invalid <- names(value)[names(value) %in% dont_replace] if (length(invalid)) { invalid <- collapse_comma(invalid) stop2("Argument(s) ", invalid, " cannot be replaced.") } x[names(value)] <- value x } # deparse 'x' if it is no string deparse_no_string <- function(x) { if (!is.character(x)) { x <- deparse_combine(x) } x } # combine deparse lines into one string deparse_combine <- function(x, max_char = NULL) { out <- collapse(deparse(x)) if (isTRUE(max_char > 0)) { out <- substr(out, 1L, max_char) } out } # like 'eval' but parses characters before evaluation eval2 <- function(expr, envir = parent.frame(), ...) { if (is.character(expr)) { expr <- str2expression(expr) } eval(expr, envir, ...) } # evaluate an expression without printing output or messages # @param expr expression to be evaluated # @param type type of output to be suppressed (see ?sink) # @param try wrap evaluation of expr in 'try' and # not suppress outputs if evaluation fails? # @param silent actually evaluate silently? eval_silent <- function(expr, type = "output", try = FALSE, silent = TRUE, ...) { try <- as_one_logical(try) silent <- as_one_logical(silent) type <- match.arg(type, c("output", "message")) expr <- substitute(expr) envir <- parent.frame() if (silent) { if (try && type == "message") { try_out <- try(utils::capture.output( out <- eval(expr, envir), type = type, ... )) if (is(try_out, "try-error")) { # try again without suppressing error messages out <- eval(expr, envir) } } else { utils::capture.output(out <- eval(expr, envir), type = type, ...) } } else { out <- eval(expr, envir) } out } # find the name that 'x' had in a specific environment substitute_name <- function(x, envir = parent.frame(), nchar = 50) { out <- substitute(x) out <- eval2(paste0("substitute(", out, ")"), envir = envir) if (missing(out)) { return(NULL) } substr(collapse(deparse(out)), 1, nchar) } # recursive sorting of dependencies # @param x named list of dependencies per element # @param sorted already sorted element names # @return a vector of sorted element names sort_dependencies <- function(x, sorted = NULL) { if (!length(x)) { return(NULL) } if (length(names(x)) != length(x)) { stop2("Argument 'x' must be named.") } take <- !ulapply(x, function(dep) any(!dep %in% sorted)) new <- setdiff(names(x)[take], sorted) out <- union(sorted, new) if (length(new)) { out <- union(out, sort_dependencies(x, sorted = out)) } else if (!all(names(x) %in% out)) { stop2("Cannot handle circular dependency structures.") } out } stop2 <- function(...) { stop(..., call. = FALSE) } warning2 <- function(...) { warning(..., call. = FALSE) } # get first occurrence of 'x' in '...' objects # @param x The name of the required element # @param ... named R objects that may contain 'x' get_arg <- function(x, ...) { dots <- list(...) i <- 1 out <- NULL while (i <= length(dots) && is.null(out)) { if (!is.null(dots[[i]][[x]])) { out <- dots[[i]][[x]] } else { i <- i + 1 } } out } SW <- function(expr) { base::suppressWarnings(expr) } # get pattern matches in text as vector # @param simplify return an atomic vector of matches? # @param first only return the first match in each string? # @return character vector containing matches get_matches <- function(pattern, text, simplify = TRUE, first = FALSE, ...) { x <- regmatches(text, gregexpr(pattern, text, ...)) if (first) { x <- lapply(x, function(t) if (length(t)) t[1] else t) } if (simplify) { if (first) { x <- lapply(x, function(t) if (length(t)) t else "") } x <- unlist(x) } x } # find matches in the parse tree of an expression # @param pattern pattern to be matched # @param expr expression to be searched in # @return character vector containing matches get_matches_expr <- function(pattern, expr, ...) { if (is.character(expr)) { expr <- str2expression(expr) } out <- NULL for (i in seq_along(expr)) { sexpr <- try(expr[[i]], silent = TRUE) if (!is(sexpr, "try-error")) { sexpr_char <- deparse_combine(sexpr) out <- c(out, get_matches(pattern, sexpr_char, ...)) } if (is.call(sexpr) || is.expression(sexpr)) { out <- c(out, get_matches_expr(pattern, sexpr, ...)) } } unique(out) } # like 'grepl' but handles (parse trees of) expressions grepl_expr <- function(pattern, expr, ...) { as.logical(ulapply(expr, function(e) length(get_matches_expr(pattern, e, ...)) > 0L)) } # combine character vectors into a joint regular 'or' expression # @param x a character vector # @param escape escape all special characters in 'x'? regex_or <- function(x, escape = FALSE) { if (escape) { x <- escape_all(x) } paste0("(", paste0("(", x, ")", collapse = "|"), ")") } # escape dots in character strings escape_dot <- function(x) { gsub(".", "\\.", x, fixed = TRUE) } # escape all special characters in character strings escape_all <- function(x) { specials <- c(".", "*", "+", "?", "^", "$", "(", ")", "[", "]", "|") for (s in specials) { x <- gsub(s, paste0("\\", s), x, fixed = TRUE) } x } # add an underscore to non-empty character strings # @param x a character vector # @param pos position of the underscore usc <- function(x, pos = c("prefix", "suffix")) { pos <- match.arg(pos) x <- as.character(x) if (!length(x)) x <- "" if (pos == "prefix") { x <- ifelse(nzchar(x), paste0("_", x), "") } else { x <- ifelse(nzchar(x), paste0(x, "_"), "") } x } # round using the largest remainder method round_largest_remainder <- function(x) { x <- as.numeric(x) total <- round(sum(x)) out <- floor(x) diff <- x - out J <- order(diff, decreasing = TRUE) I <- seq_len(total - floor(sum(out))) out[J[I]] <- out[J[I]] + 1 out } # add leading and trailing white spaces # @param x object accepted by paste # @param nsp number of white spaces to add wsp <- function(x = "", nsp = 1) { sp <- collapse(rep(" ", nsp)) if (length(x)) { out <- ifelse(nzchar(x), paste0(sp, x, sp), sp) } else { out <- NULL } out } # add white space per line the the strings # @param x object accepted by paste # @param nsp number of white spaces to add wsp_per_line <- function(x, nsp) { sp <- collapse(rep(" ", nsp)) x <- paste0(sp, x) x <- gsub("\\n(?=.+)", paste0("\n", sp), x, perl = TRUE) x } # remove whitespaces in character strings rm_wsp <- function(x) { out <- gsub("[ \t\r\n]+", "", x, perl = TRUE) dim(out) <- dim(x) out } # trim whitespaces in character strings trim_wsp <- function(x) { out <- gsub("[ \t\r\n]+", " ", x, perl = TRUE) dim(out) <- dim(x) out } # limit the number of characters of a vector # @param x a character vector # @param chars maximum number of characters to show # @param lsuffix number of characters to keep at the end of the strings # @return possible truncated character vector limit_chars <- function(x, chars = NULL, lsuffix = 4) { stopifnot(is.character(x)) if (!is.null(chars)) { chars_x <- nchar(x) - lsuffix suffix <- substr(x, chars_x + 1, chars_x + lsuffix) x <- substr(x, 1, chars_x) x <- ifelse(chars_x <= chars, x, paste0(substr(x, 1, chars - 3), "...")) x <- paste0(x, suffix) } x } # ensure that deprecated arguments still work # @param arg input to the new argument # @param alias input to the deprecated argument # @param default the default value of alias # @param warn should a warning be printed if alias is specified? use_alias <- function(arg, alias = NULL, default = NULL, warn = TRUE) { arg_name <- Reduce(paste, deparse(substitute(arg))) alias_name <- Reduce(paste, deparse(substitute(alias))) if (!is_equal(alias, default)) { arg <- alias if (grepl("^dots\\$", alias_name)) { alias_name <- gsub("^dots\\$", "", alias_name) } else if (grepl("^dots\\[\\[", alias_name)) { alias_name <- gsub("^dots\\[\\[\"|\"\\]\\]$", "", alias_name) } if (warn) { warning2("Argument '", alias_name, "' is deprecated. ", "Please use argument '", arg_name, "' instead.") } } arg } warn_deprecated <- function(new, old = as.character(sys.call(sys.parent()))[1]) { msg <- paste0("Function '", old, "' is deprecated.") if (!missing(new)) { msg <- paste0(msg, " Please use '", new, "' instead.") } warning2(msg) invisible(NULL) } # check if verbose mode is activated is_verbose <- function() { as_one_logical(getOption("brms.verbose", FALSE)) } viridis6 <- function() { c("#440154", "#414487", "#2A788E", "#22A884", "#7AD151", "#FDE725") } expect_match2 <- function(object, regexp, ..., all = TRUE) { testthat::expect_match(object, regexp, fixed = TRUE, ..., all = all) } # Copied from package 'vctrs' (more precisely: # , version # 0.3.8.9001; identical to the code from version 0.3.8), as offered on the help # page for vctrs::s3_register() (version 0.3.8): s3_register_cp <- function(generic, class, method = NULL) { stopifnot(is.character(generic), length(generic) == 1) stopifnot(is.character(class), length(class) == 1) pieces <- strsplit(generic, "::")[[1]] stopifnot(length(pieces) == 2) package <- pieces[[1]] generic <- pieces[[2]] caller <- parent.frame() get_method_env <- function() { top <- topenv(caller) if (isNamespace(top)) { asNamespace(environmentName(top)) } else { caller } } get_method <- function(method, env) { if (is.null(method)) { get(paste0(generic, ".", class), envir = get_method_env()) } else { method } } register <- function(...) { envir <- asNamespace(package) # Refresh the method each time, it might have been updated by # `devtools::load_all()` method_fn <- get_method(method) stopifnot(is.function(method_fn)) # Only register if generic can be accessed if (exists(generic, envir)) { registerS3method(generic, class, method_fn, envir = envir) } else if (identical(Sys.getenv("NOT_CRAN"), "true")) { warning(sprintf( "Can't find generic `%s` in package %s to register S3 method.", generic, package )) } } # Always register hook in case package is later unloaded & reloaded setHook(packageEvent(package, "onLoad"), register) # Avoid registration failures during loading (pkgload or regular) if (isNamespaceLoaded(package)) { register() } invisible() } # startup messages for brms .onAttach <- function(libname, pkgname) { version <- utils::packageVersion("brms") packageStartupMessage( "Loading 'brms' package (version ", version, "). Useful instructions\n", "can be found by typing help('brms'). A more detailed introduction\n", "to the package is available through vignette('brms_overview')." ) invisible(NULL) } # code to execute when loading brms .onLoad <- function(libname, pkgname) { # ensure compatibility with older R versions backports::import(pkgname) # dynamically register the 'recover_data' and 'emm_basis' # methods needed by 'emmeans', if that package is installed if (requireNamespace("emmeans", quietly = TRUE) && utils::packageVersion("emmeans") >= "1.4.0") { emmeans::.emm_register("brmsfit", pkgname) } # dynamically register the 'get_refmodel.brmsfit' method for the # 'get_refmodel' generic from 'projpred', if that package is installed if (requireNamespace("projpred", quietly = TRUE)) { s3_register_cp("projpred::get_refmodel", "brmsfit") } invisible(NULL) } brms/R/data-helpers.R0000644000175000017500000005551014111751666014262 0ustar nileshnilesh# update data for use in brms functions # @param data the data passed by the user # @param bterms object of class brmsterms # @param na.action function defining how to treat NAs # @param drop.unused.levels should unused factor levels be removed? # @param attr_terms a list of attributes of the terms object of # the original model.frame; only used with newdata; # this ensures that (1) calls to 'poly' work correctly # and (2) that the number of variables matches the number # of variable names; fixes issue #73 # @param knots: a list of knot values for GAMMs # @return model.frame for use in brms functions validate_data <- function(data, bterms, data2 = list(), knots = NULL, na.action = na.omit2, drop.unused.levels = TRUE, attr_terms = NULL) { if (missing(data)) { stop2("Data must be specified using the 'data' argument.") } if (is.null(knots)) { knots <- get_knots(data) } data <- try(as.data.frame(data), silent = TRUE) if (is(data, "try-error")) { stop2("Argument 'data' must be coercible to a data.frame.") } if (!isTRUE(nrow(data) > 0L)) { stop2("Argument 'data' does not contain observations.") } data <- data_rsv_intercept(data, bterms = bterms) all_vars_formula <- bterms$allvars missing_vars <- setdiff(all_vars(all_vars_formula), names(data)) if (length(missing_vars)) { missing_vars2 <- setdiff(missing_vars, names(data2)) if (length(missing_vars2)) { stop2("The following variables can neither be found in ", "'data' nor in 'data2':\n", collapse_comma(missing_vars2)) } # all initially missing variables can be found in 'data2' # they are not necessarily of the length required for 'data' # so need to be excluded from the evaluation of 'model.frame' missing_vars_formula <- paste0(". ~ . ", collapse(" - ", missing_vars)) all_vars_formula <- update(all_vars_formula, missing_vars_formula) } all_vars_terms <- terms(all_vars_formula) # ensure that 'data2' comes first in the search path # during the evaluation of model.frame terms_env <- environment(all_vars_terms) environment(all_vars_terms) <- as.environment(as.list(data2)) parent.env(environment(all_vars_terms)) <- terms_env attributes(all_vars_terms)[names(attr_terms)] <- attr_terms # 'terms' prevents correct validation in 'model.frame' attr(data, "terms") <- NULL data <- model.frame( all_vars_terms, data, na.action = na.pass, drop.unused.levels = drop.unused.levels ) data <- na.action(data, bterms = bterms) if (any(grepl("__|_$", colnames(data)))) { stop2("Variable names may not contain double underscores ", "or underscores at the end.") } if (!isTRUE(nrow(data) > 0L)) { stop2("All observations in the data were removed ", "presumably because of NA values.") } groups <- get_group_vars(bterms) data <- combine_groups(data, groups) data <- fix_factor_contrasts(data, ignore = groups) attr(data, "knots") <- knots data } # validate the 'data2' argument # @param data2 a named list of data objects # @param bterms object returned by 'brmsterms' # @param ... more named list to pass objects to data2 from other sources # only required for backwards compatibility with deprecated arguments # @return a validated named list of data objects validate_data2 <- function(data2, bterms, ...) { # TODO: specify spline-related matrices in 'data2' # this requires adding another parser layer with bterms and data as input if (is.null(data2)) { data2 <- list() } if (!is.list(data2)) { stop2("'data2' must be a list.") } if (length(data2) && !is_named(data2)) { stop2("All elements of 'data2' must be named.") } dots <- list(...) for (i in seq_along(dots)) { if (length(dots[[i]])) { stopifnot(is.list(dots[[i]]), is_named(dots[[i]])) data2[names(dots[[i]])] <- dots[[i]] } } # validate autocorrelation matrices acef <- tidy_acef(bterms) sar_M_names <- get_ac_vars(acef, "M", class = "sar") for (M in sar_M_names) { data2[[M]] <- validate_sar_matrix(get_from_data2(M, data2)) attr(data2[[M]], "obs_based_matrix") <- TRUE } car_M_names <- get_ac_vars(acef, "M", class = "car") for (M in car_M_names) { data2[[M]] <- validate_car_matrix(get_from_data2(M, data2)) # observation based CAR matrices are deprecated and # there is no need to label them as observation based } fcor_M_names <- get_ac_vars(acef, "M", class = "fcor") for (M in fcor_M_names) { data2[[M]] <- validate_fcor_matrix(get_from_data2(M, data2)) attr(data2[[M]], "obs_based_matrix") <- TRUE } # validate within-group covariance matrices cov_names <- ulapply(get_re(bterms)$gcall, "[[", "cov") cov_names <- cov_names[nzchar(cov_names)] for (cov in cov_names) { data2[[cov]] <- validate_recov_matrix(get_from_data2(cov, data2)) } data2 } # get an object from the 'data2' argument get_from_data2 <- function(x, data2) { if (!x %in% names(data2)) { stop2("Object '", x, "' was not found in 'data2'.") } get(x, data2) } # index observation based elements in 'data2' # @param data2 a named list of objects # @param i observation based indices # @return data2 with potentially indexed elements subset_data2 <- function(data2, i) { if (!length(data2)) { return(data2) } stopifnot(is.list(data2), is_named(data2)) for (var in names(data2)) { if (isTRUE(attr(data2[[var]], "obs_based_matrix"))) { # matrices with dimensions equal to the number of observations data2[[var]] <- data2[[var]][i, i, drop = FALSE] attr(data2[[var]], "obs_based_matrix") <- TRUE } } data2 } # add the reserved intercept variables to the data data_rsv_intercept <- function(data, bterms) { fe_forms <- get_effect(bterms, "fe") if (any(ulapply(fe_forms, no_int))) { if ("intercept" %in% ulapply(fe_forms, all_vars)) { warning2("Reserved variable name 'intercept' is deprecated. ", "Please use 'Intercept' instead.") } if (any(data[["intercept"]] != 1)) { stop2("Variable name 'intercept' is reserved in models ", "without a population-level intercept.") } if (any(data[["Intercept"]] != 1)) { stop2("Variable name 'Intercept' is reserved in models ", "without a population-level intercept.") } data$intercept <- data$Intercept <- rep(1, length(data[[1]])) } data } # combine grouping factors to form new variables # @param data data.frame to be updated # @param ... the grouping factors to be combined # @return 'data' including the new combined grouping factors combine_groups <- function(data, ...) { group <- c(...) for (i in seq_along(group)) { sgroup <- unlist(strsplit(group[[i]], ":")) if (length(sgroup) > 1L && !group[[i]] %in% names(data)) { new_var <- get(sgroup[1], data) for (j in 2:length(sgroup)) { new_var <- paste0(new_var, "_", get(sgroup[j], data)) } data[[group[[i]]]] <- new_var } } data } # hard code factor contrasts to be independent of the global "contrasts" option # @param data data.frame to be updated # @param olddata: optional data.frame from which contrasts are taken if present # @param ignore: names of variables for which not to fix contrasts # @return 'data' with amended contrasts attributes fix_factor_contrasts <- function(data, olddata = NULL, ignore = NULL) { stopifnot(is(data, "data.frame")) stopifnot(is.null(olddata) || is.list(olddata)) olddata <- as.data.frame(olddata) # fixes issue #105 for (i in seq_along(data)) { needs_contrast <- is.factor(data[[i]]) && !names(data)[i] %in% ignore if (needs_contrast && is.null(attr(data[[i]], "contrasts"))) { old_contrasts <- attr(olddata[[names(data)[i]]], "contrasts") if (!is.null(old_contrasts)) { # take contrasts from olddata contrasts(data[[i]]) <- old_contrasts } else if (length(unique(data[[i]])) > 1L) { # avoid error when supplying only a single level # hard code current global "contrasts" option contrasts(data[[i]]) <- contrasts(data[[i]]) } } } data } # order data for use in time-series models # @param data data.frame to be ordered # @param bterms brmsterms of mvbrmsterms object # @return 'data' potentially ordered differently order_data <- function(data, bterms) { # ordering does only matter for time-series models time <- get_ac_vars(bterms, "time", dim = "time") gr <- get_ac_vars(bterms, "gr", dim = "time") if (length(time) > 1L || length(gr) > 1L) { stop2("All time-series structures must have the same ", "'time' and 'gr' variables.") } if (length(time) || length(gr)) { if (length(gr)) { gv <- data[[gr]] } else { gv <- rep(1L, nrow(data)) } if (length(time)) { tv <- data[[time]] } else { tv <- seq_rows(data) } if (any(duplicated(data.frame(gv, tv)))) { stop2("Time points within groups must be unique.") } new_order <- do_call(order, list(gv, tv)) data <- data[new_order, , drop = FALSE] # old_order will allow to retrieve the initial order of the data attr(data, "old_order") <- order(new_order) } data } # subset data according to addition argument 'subset' subset_data <- function(data, bterms) { if (has_subset(bterms)) { # only evaluate a subset of the data subset <- as.logical(get_ad_values(bterms, "subset", "subset", data)) if (length(subset) != nrow(data)) { stop2("Length of 'subset' does not match the rows of 'data'.") } if (anyNA(subset)) { stop2("Subset variables may not contain NAs.") } # cross-formula indexing is no longer trivial for subsetted models check_cross_formula_indexing(bterms) data <- data[subset, , drop = FALSE] } if (!NROW(data)) { stop2( "All rows of 'data' were removed via 'subset'. ", "Please make sure that variables do not contain NAs ", "for observations in which they are supposed to be used. ", "Please also make sure that each subset variable is ", "TRUE for at least one observation." ) } data } # like stats:::na.omit.data.frame but allows to certain NA values na.omit2 <- function(object, bterms, ...) { stopifnot(is.data.frame(object)) nobs <- nrow(object) if (is.mvbrmsterms(bterms)) { responses <- names(bterms$terms) subsets <- lapply(bterms$terms, get_ad_values, "subset", "subset", object) vars_sub <- lapply(bterms$terms, function(x) all_vars(x$allvars)) } vars_keep_na <- vars_keep_na(bterms) omit <- logical(nobs) for (v in names(object)) { x <- object[[v]] vars_v <- all_vars(v) keep_all_na <- all(vars_v %in% vars_keep_na) if (!is.atomic(x) || keep_all_na) { next } if (!is.mvbrmsterms(bterms)) { # remove all NAs in this variable keep_na <- rep(FALSE, nobs) } else { # allow to retain NAs in subsetted variables keep_na <- rep(TRUE, nobs) for (r in responses) { if (any(vars_v %in% vars_sub[[r]])) { if (!is.null(subsets[[r]])) { # keep NAs ignored because of 'subset' keep_na <- keep_na & !subsets[[r]] } else { # remove all NAs in this variable keep_na <- keep_na & FALSE } } } } is_na <- is.na(x) d <- dim(is_na) if (is.null(d) || length(d) != 2L) { omit <- omit | (is_na & !keep_na) } else { for (ii in seq_len(d[2L])) { omit <- omit | (is_na[, ii] & !keep_na) } } } if (any(omit > 0L)) { out <- object[!omit, , drop = FALSE] temp <- setNames(seq(omit)[omit], attr(object, "row.names")[omit]) attr(temp, "class") <- "omit" attr(out, "na.action") <- temp warning2("Rows containing NAs were excluded from the model.") } else { out <- object } out } # get a single value per group # @param x vector of values to extract one value per group # @param gr vector of grouping values # @return a vector of the same length as unique(group) get_one_value_per_group <- function(x, gr) { stopifnot(length(x) == length(gr)) not_dupl_gr <- !duplicated(gr) gr_unique <- gr[not_dupl_gr] to_order <- order(gr_unique) gr_unique <- gr_unique[to_order] out <- x[not_dupl_gr][to_order] names(out) <- gr_unique out } # extract knots values for use in spline terms get_knots <- function(data) { attr(data, "knots", TRUE) } # extract name of the data as originally passed by the user get_data_name <- function(data) { out <- attr(data, "data_name", TRUE) if (is.null(out)) { out <- "NULL" } out } #' Validate New Data #' #' Validate new data passed to post-processing methods of \pkg{brms}. Unless you #' are a package developer, you will rarely need to call \code{validate_newdata} #' directly. #' #' @inheritParams prepare_predictions #' @param newdata A \code{data.frame} containing new data to be validated. #' @param object A \code{brmsfit} object. #' @param check_response Logical; Indicates if response variables should #' be checked as well. Defaults to \code{TRUE}. #' @param group_vars Optional names of grouping variables to be validated. #' Defaults to all grouping variables in the model. #' @param req_vars Optional names of variables required in \code{newdata}. #' If \code{NULL} (the default), all variables in the original data #' are required (unless ignored for some other reason). #' @param ... Currently ignored. #' #' @return A validated \code{'data.frame'} based on \code{newdata}. #' #' @export validate_newdata <- function( newdata, object, re_formula = NULL, allow_new_levels = FALSE, newdata2 = NULL, resp = NULL, check_response = TRUE, incl_autocor = TRUE, group_vars = NULL, req_vars = NULL, ... ) { newdata <- try(as.data.frame(newdata), silent = TRUE) if (is(newdata, "try-error")) { stop2("Argument 'newdata' must be coercible to a data.frame.") } object <- restructure(object) object <- exclude_terms(object, incl_autocor = incl_autocor) resp <- validate_resp(resp, object) new_formula <- update_re_terms(formula(object), re_formula) bterms <- brmsterms(new_formula, resp_rhs_all = FALSE) # fill values of not required variables all_vars <- all.vars(bterms$allvars) if (is.null(req_vars)) { req_vars <- all_vars } else { req_vars <- as.character(req_vars) req_vars <- intersect(req_vars, all_vars) } if (is.mvbrmsterms(bterms) && !is.null(resp)) { # variables not used in the included model parts # do not need to be specified in newdata resp <- validate_resp(resp, bterms$responses) form_req_vars <- lapply(bterms$terms[resp], "[[", "allvars") form_req_vars <- allvars_formula(form_req_vars) req_vars <- intersect(req_vars, all.vars(form_req_vars)) } not_req_vars <- setdiff(all_vars, req_vars) not_req_vars <- setdiff(not_req_vars, names(newdata)) newdata <- fill_newdata(newdata, not_req_vars, object$data) # check response and addition variables only_resp <- all.vars(bterms$respform) only_resp <- setdiff(only_resp, all.vars(rhs(bterms$allvars))) # always require 'dec' variables to be specified dec_vars <- get_ad_vars(bterms, "dec") missing_resp <- setdiff(c(only_resp, dec_vars), names(newdata)) if (length(missing_resp)) { if (check_response) { stop2("Response variables must be specified in 'newdata'.\n", "Missing variables: ", collapse_comma(missing_resp)) } else { newdata <- fill_newdata(newdata, missing_resp) } } # censoring and weighting vars are unused in post-processing methods cens_vars <- get_ad_vars(bterms, "cens") for (v in setdiff(cens_vars, names(newdata))) { newdata[[v]] <- 0 } weights_vars <- get_ad_vars(bterms, "weights") for (v in setdiff(weights_vars, names(newdata))) { newdata[[v]] <- 1 } mf <- model.frame(object) for (i in seq_along(mf)) { if (is_like_factor(mf[[i]])) { mf[[i]] <- as.factor(mf[[i]]) } } # fixes issue #279 newdata <- data_rsv_intercept(newdata, bterms) new_group_vars <- get_group_vars(bterms) if (allow_new_levels && length(new_group_vars)) { # grouping factors do not need to be specified # by the user if new levels are allowed mis_group_vars <- new_group_vars[!grepl(":", new_group_vars)] mis_group_vars <- setdiff(mis_group_vars, names(newdata)) newdata <- fill_newdata(newdata, mis_group_vars) } newdata <- combine_groups(newdata, new_group_vars) # validate factor levels in newdata if (is.null(group_vars)) { group_vars <- get_group_vars(object) } do_check <- union(get_pred_vars(bterms), get_int_vars(bterms)) dont_check <- union(group_vars, cens_vars) dont_check <- setdiff(dont_check, do_check) dont_check <- names(mf) %in% dont_check is_factor <- ulapply(mf, is.factor) factors <- mf[is_factor & !dont_check] if (length(factors)) { factor_names <- names(factors) for (i in seq_along(factors)) { new_factor <- newdata[[factor_names[i]]] if (!is.null(new_factor)) { if (!is.factor(new_factor)) { new_factor <- factor(new_factor) } old_levels <- levels(factors[[i]]) if (length(old_levels) <= 1L) { # contrasts are not defined for factors with 1 or fewer levels next } new_levels <- levels(new_factor) old_contrasts <- contrasts(factors[[i]]) old_ordered <- is.ordered(factors[[i]]) to_zero <- is.na(new_factor) | new_factor %in% "zero__" # don't add the 'zero__' level to response variables is_resp <- factor_names[i] %in% all.vars(bterms$respform) if (!is_resp && any(to_zero)) { levels(new_factor) <- c(new_levels, "zero__") new_factor[to_zero] <- "zero__" old_levels <- c(old_levels, "zero__") old_contrasts <- rbind(old_contrasts, zero__ = 0) } if (any(!new_levels %in% old_levels)) { stop2( "New factor levels are not allowed.", "\nLevels allowed: ", collapse_comma(old_levels), "\nLevels found: ", collapse_comma(new_levels) ) } newdata[[factor_names[i]]] <- factor(new_factor, old_levels, ordered = old_ordered) # don't use contrasts(.) here to avoid dimension checks attr(newdata[[factor_names[i]]], "contrasts") <- old_contrasts } } } # check if originally numeric variables are still numeric num_names <- names(mf)[!is_factor] num_names <- setdiff(num_names, group_vars) for (nm in intersect(num_names, names(newdata))) { if (!anyNA(newdata[[nm]]) && !is.numeric(newdata[[nm]])) { stop2("Variable '", nm, "' was originally ", "numeric but is not in 'newdata'.") } } # validate monotonic variables mo_vars <- get_sp_vars(bterms, "mo") if (length(mo_vars)) { # factors have already been checked num_mo_vars <- names(mf)[!is_factor & names(mf) %in% mo_vars] for (v in num_mo_vars) { new_values <- get(v, newdata) min_value <- min(mf[[v]]) invalid <- new_values < min_value | new_values > max(mf[[v]]) invalid <- invalid | !is_wholenumber(new_values) if (sum(invalid)) { stop2("Invalid values in variable '", v, "': ", collapse_comma(new_values[invalid])) } attr(newdata[[v]], "min") <- min_value } } # update_data expects all original variables to be present used_vars <- c(names(newdata), all.vars(bterms$allvars)) used_vars <- union(used_vars, rsv_vars(bterms)) all_vars <- all.vars(str2formula(names(mf))) unused_vars <- setdiff(all_vars, used_vars) newdata <- fill_newdata(newdata, unused_vars) # validate grouping factors new_ranef <- tidy_ranef(bterms, data = mf) new_meef <- tidy_meef(bterms, data = mf) old_levels <- get_levels(new_ranef, new_meef) if (!allow_new_levels) { new_levels <- get_levels( tidy_ranef(bterms, data = newdata), tidy_meef(bterms, data = newdata) ) for (g in names(old_levels)) { unknown_levels <- setdiff(new_levels[[g]], old_levels[[g]]) if (length(unknown_levels)) { unknown_levels <- collapse_comma(unknown_levels) stop2( "Levels ", unknown_levels, " of grouping factor '", g, "' ", "cannot be found in the fitted model. ", "Consider setting argument 'allow_new_levels' to TRUE." ) } } } # ensure correct handling of functions like 'poly' or 'scale' old_terms <- attr(object$data, "terms") attr_terms <- c("variables", "predvars") attr_terms <- attributes(old_terms)[attr_terms] newdata <- validate_data( newdata, bterms = bterms, na.action = na.pass, drop.unused.levels = FALSE, attr_terms = attr_terms, data2 = current_data2(object, newdata2), knots = get_knots(object$data) ) newdata } # fill newdata with values for not required variables # @param newdata data.frame to be filled # @param vars character vector of not required variables # @param olddata optional data.frame to take values from # @param n row number of olddata to extract values from fill_newdata <- function(newdata, vars, olddata = NULL, n = 1L) { stopifnot(is.data.frame(newdata), is.character(vars)) vars <- setdiff(vars, names(newdata)) if (is.null(olddata)) { if (length(vars)) { newdata[, vars] <- NA } return(newdata) } stopifnot(is.data.frame(olddata), length(n) == 1L) for (v in vars) { # using NA for variables is not safe in all cases # for example when processing splines using mgcv # hence it is safer to use existing data values cval <- olddata[n, v] %||% NA if (length(dim(cval)) == 2L) { # matrix columns don't have automatic broadcasting apparently cval <- matrix(cval, nrow(newdata), ncol(cval), byrow = TRUE) } newdata[[v]] <- cval } newdata } # validate new data2 validate_newdata2 <- function(newdata2, object, ...) { stopifnot(is.brmsfit(object)) bterms <- brmsterms(object$formula) validate_data2(newdata2, bterms = bterms, ...) } # extract the current data current_data <- function(object, newdata = NULL, ...) { stopifnot(is.brmsfit(object)) if (is.null(newdata)) { data <- object$data } else { data <- validate_newdata(newdata, object = object, ...) } data } # extract the current data2 current_data2 <- function(object, newdata2 = NULL, ...) { stopifnot(is.brmsfit(object)) if (is.null(newdata2)) { data2 <- object$data2 } else { data2 <- validate_newdata2(newdata2, object = object, ...) } data2 } brms/R/posterior.R0000644000175000017500000002350114111751666013732 0ustar nileshnilesh#' Index \code{brmsfit} objects #' #' @aliases variables nvariables niterations nchains ndraws #' #' Index variables, iterations, chains, and draws. #' #' @param x A \code{brmsfit} object or another \R object for which #' the methods are defined. #' @param ... Arguments passed to individual methods (if applicable). #' #' @name draws-index-brms NULL #' @rdname draws-index-brms #' @importFrom posterior variables #' @method variables brmsfit #' @export #' @export variables variables.brmsfit <- function(x, ...) { # TODO: simplify once rstan and cmdstanr support these methods out <- dimnames(x$fit) if (is.list(out)) { out <- out$parameters } out } #' @method variables data.frame variables.data.frame <- function(x, ...) { names(x) } #' @rdname draws-index-brms #' @importFrom posterior nvariables #' @method nvariables brmsfit #' @export #' @export nvariables nvariables.brmsfit <- function(x, ...) { length(variables(x, ...)) } #' @rdname draws-index-brms #' @importFrom posterior niterations #' @method niterations brmsfit #' @export #' @export niterations niterations.brmsfit <- function(x) { if (!is.stanfit(x$fit)) return(0) niterations <- x$fit@sim$n_save[1] %||% 0 niterations - nwarmup(x) } #' @rdname draws-index-brms #' @importFrom posterior nchains #' @method nchains brmsfit #' @export #' @export nchains nchains.brmsfit <- function(x) { if (!is.stanfit(x$fit)) return(0) x$fit@sim$chains %||% 0 } #' @rdname draws-index-brms #' @importFrom posterior ndraws #' @method ndraws brmsfit #' @export #' @export ndraws ndraws.brmsfit <- function(x) { niterations(x) * nchains(x) } nwarmup <- function(x) { if (!is.stanfit(x$fit)) return(0) x$fit@sim$warmup2[1] %||% 0 } nthin <- function(x) { if (!is.stanfit(x$fit)) return(1) x$fit@sim$thin %||% 1 } #' Transform \code{brmsfit} to \code{draws} objects #' #' Transform a \code{brmsfit} object to a format supported by the #' \pkg{posterior} package. #' #' @aliases as_draws as_draws_matrix as_draws_array as_draws_df #' @aliases as_draws_rvars as_draws_list #' #' @param x A \code{brmsfit} object or another \R object for which #' the methods are defined. #' @param variable A character vector providing the variables to extract. #' By default, all variables are extracted. #' @param regex Logical; Should variable should be treated as a (vector of) #' regular expressions? Any variable in \code{x} matching at least one of the #' regular expressions will be selected. Defaults to \code{FALSE}. #' @param inc_warmup Should warmup draws be included? Defaults to \code{FALSE}. #' @param ... Arguments passed to individual methods (if applicable). #' #' @details To subset iterations, chains, or draws, use the #' \code{\link[posterior:subset_draws]{subset_draws}} method after #' transforming the \code{brmsfit} to a \code{draws} object. #' #' @seealso \code{\link[posterior:draws]{draws}} #' \code{\link[posterior:subset_draws]{subset_draws}} #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' #' # extract posterior draws in an array format #' (draws_fit <- as_draws_array(fit)) #' posterior::summarize_draws(draws_fit) #' #' # extract only certain variables #' as_draws_array(fit, variable = "r_patient") #' as_draws_array(fit, variable = "^b_", regex = TRUE) #' #' # extract posterior draws in a random variables format #' as_draws_rvars(fit) #' } #' #' @name draws-brms NULL #' @rdname draws-brms #' @importFrom posterior as_draws #' @method as_draws brmsfit #' @export #' @export as_draws as_draws.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { # draws_list is the fastest format to convert to at the moment as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... ) } #' @rdname draws-brms #' @importFrom posterior as_draws_matrix #' @method as_draws_matrix brmsfit #' @export #' @export as_draws_matrix as_draws_matrix.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_matrix(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_array #' @method as_draws_array brmsfit #' @export #' @export as_draws_array as_draws_array.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_array(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_df #' @method as_draws_df brmsfit #' @export #' @export as_draws_df as_draws_df.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_df(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } #' @rdname draws-brms #' @importFrom posterior as_draws_list #' @method as_draws_list brmsfit #' @export #' @export as_draws_list as_draws_list.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { .as_draws_list( x$fit, variable = variable, regex = regex, inc_warmup = inc_warmup, ... ) } #' @rdname draws-brms #' @importFrom posterior as_draws_rvars #' @method as_draws_rvars brmsfit #' @export #' @export as_draws_rvars as_draws_rvars.brmsfit <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { as_draws_rvars(as_draws_list( x, variable = variable, regex = regex, inc_warmup = inc_warmup, ... )) } # in stanfit objects draws are stored in a draws_list-like format # so converting from there will be most efficient # may be removed once rstan supports posterior natively .as_draws_list <- function(x, variable = NULL, regex = FALSE, inc_warmup = FALSE, ...) { stopifnot(is.stanfit(x)) inc_warmup <- as_one_logical(inc_warmup) if (!length(x@sim$samples)) { stop2("The model does not contain posterior draws.") } out <- as_draws_list(x@sim$samples) # first subset variables then remove warmup as removing warmup # will take a lot of time when extracting many variables out <- subset_draws(out, variable = variable, regex = regex) if (!inc_warmup) { nwarmup <- x@sim$warmup2[1] %||% 0 warmup_ids <- seq_len(nwarmup) iteration_ids <- posterior::iteration_ids(out) if (length(warmup_ids)) { iteration_ids <- iteration_ids[-warmup_ids] } out <- subset_draws(out, iteration = iteration_ids) } out } #' Extract Posterior Draws #' #' Extract posterior draws in conventional formats #' as data.frames, matrices, or arrays. #' #' @inheritParams as_draws.brmsfit #' @param pars Deprecated alias of \code{variable}. For reasons of backwards #' compatibility, \code{pars} is interpreted as a vector of regular #' expressions by default unless \code{fixed = TRUE} is specified. #' @param draw The draw indices to be select. Subsetting draw indices will lead #' to an automatic merging of chains. #' @param subset Deprecated alias of \code{draw}. #' @param row.names,optional Unused and only added for consistency with #' the \code{\link[base:as.data.frame]{as.data.frame}} generic. #' @param ... Further arguments to be passed to the corresponding #' \code{\link[brms:draws-brms]{as_draws_*}} methods as well as to #' \code{\link[posterior:subset_draws]{subset_draws}}. #' #' @return A data.frame, matrix, or array containing the posterior draws. #' #' @seealso \code{\link[brms:draws-brms]{as_draws}}, #' \code{\link[posterior:subset_draws]{subset_draws}} #' #' @export as.data.frame.brmsfit <- function(x, row.names = NULL, optional = TRUE, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_df(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } #' @rdname as.data.frame.brmsfit #' @export as.matrix.brmsfit <- function(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_matrix(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } #' @rdname as.data.frame.brmsfit #' @export as.array.brmsfit <- function(x, pars = NA, variable = NULL, draw = NULL, subset = NULL, ...) { variable <- use_variable_alias(variable, x, pars = pars, ...) draw <- use_alias(draw, subset) out <- as_draws_array(x, variable = variable, ...) out <- suppressMessages(subset_draws(out, draw = draw, ...)) unclass_draws(out) } # use the deprecated 'pars' alias to 'variable' use_variable_alias <- function(variable, object, pars = NA, ...) { if (!anyNA(pars)) { warning2("Argument 'pars' is deprecated. Please use 'variable' instead.") variable <- extract_pars(pars, variables(object), ...) } variable } # remove the posterior draws format classes from objects unclass_draws <- function(x, ...) { UseMethod("unclass_draws") } #' @export unclass_draws.default <- function(x, ...) { unclass(x) } #' @export unclass_draws.draws_df <- function(x, ...) { x <- as.data.frame(x) x$.chain <- x$.iteration <- x$.draw <- NULL x } brms/R/sysdata.rda0000644000175000017500000664055714111751666013747 0ustar nileshnileshBZh91AY&SYc(@nTU}3EH$rw]IOԧ N`PNd_d;hr$ x؋)xWs 19E;Ť:m[O35ǗuІ bI>^sW1$G޽B(Q%ϋ"kNrglutyL/ݍ{?kᵣV4U6Yvԟ.makOhC#,,3lSVr{}6ܑ#$Λ,E[QԾ>k=pu]KdowuHCBO| 7|dT#uvli\ffN`h3kjM^yp |/: {pW@pQ'tI93b>WM~%O% iQbВƠY0K#dJ9"a])bHŦӥ6wv2,b# `{QD 1BţMcXyO2ΌtyfrMm_~J֜*؁UqF̪L8ӸL.Jp$eV;w,QI_bH{oÒkȳ66:ՉAL̆vѼ֯eUҹ}kxYs0n`kb)f)R !wt J(BBCD" #]s -Vzr4-xroSs:lz|f{Ģt%(E}߼x2'#OQEEFdDa @Hj X){#S}VWP302 4~fl6q;FH*ES $h+@% RG٠_KIH| SR(-joX,"mRms7}@ۭ;BdeekRM if̜))Y2@evBSsui :#nR$U" @DL ~M2[&ӫo}ߥ=>E}鼩l4s-np%FDQ]fDsRI!B1$W6s>/tzm]j9$%?|Sk4zDxnNӺ64$5NllYF?c)y]m s TDɓ'I9[ܻX2Rd?942c48s}칰=.sKNS|k4]}\.tuzNVsBaIp;*\\Ӄ{[s[jBrKRj%KutZut5&٤{_7Zʏw:Nukyo`Yŵ(u#$\,X7 [Z}ISW-% b3fg嵾Ifda]gff=U ?uKIp f ! 0`ttwKJEPFM j۲CQPIx:5Ⲭ$Mb x>(dqq^Nj"κ:9_ +ĕ`2,fP={5m|*H0l* `F0fF"'ļ/DJM 52 = ǶqjU -dY7 NՑE^-JPvza20\[z6iIJNQ fQlXݧfɏcn՜U GhǽHHs?a$.B A뼥&giKdcX D:5t_#YQ2|x˨I& ;&% k& 5B4B$~IאRgQZJ$%6ӧ~fno={֮% da!7D0))n'jsbHcDH@ar3()zU^zG$d (y{j(oSe `\r` E3#zWկ#x+gr!?8?*8=B>sYKq.ڇb0RiCYtJ egHْ&ڹDsg_%"L(H<_4W2>o{/-Nsj.@[[iWd'asS[XʋA2PtVP^WJj犕:_pr舘L&J!-H{3ĪG!ö-a7uK_g@ZhGTYmműw}bmAE?-+g( Q!@eǣ C1g12ȧޮs]g9 ]3xuDUc ɜ EFJ&"meC<9Aqe^ȷkh(~\%}>݋bJwxw%N_H "uqw\.x"^>=\g>hDnns˻JB}׷s2Q1)4ez2oNAr1C+QD=wJe~y}wuddk\wYlK&(߁~NO[pIklIS]uy%C &!j-MH:u^qGG7Q;=#N< Ys~%an[Vz.K;޵zܣ(4vtfl۶{E5*zԘZds'Kn Q暭K{K'oY6.@3nƯ>Wޛ}[K}N5S_xlvk<3ʰ{smcW2j?%oU\/v8kY7a| "|'*/7 }~-h"+Xd9"k͢ao囌&'_؂옥$@Q PH`P?q[ gwmik;GܺttK}r ֛omcg:[ym]s9ktUf1 u\WɯW15˺pQss,ܻ%ڂ0-eNDA06,<|UEs}y^eC)L QJ)CʔAJ'E(+H"88⃠:O0tEtqGl<0!L&0˜`S `)|JNLPTʺ)0&˜:8.B-bhV)FvLT)ҝ)ҕ[@{a.u7<Αaz!<#<= /x'7lȧylOwmoQp;te 6z`O(&^+/ JqLxp^$U⟦&r@ !}GwCN 㦽T `-ܩ-ר!<`hDC<3K^^}` ߆z7OqC3]@~K>~U n_z>N&UrWKGKQ6+{_0*d*)/vK( ˲9#ʎT:((DQe.i@8t>:UO'\D|N{[&+HGG$ϖi 2UIk!#(tpOhxs/&b?")ځ5I"Cz~b ?9WuQU&ŭ[/jn,?e>ec4#|߱K%|G;t7qcqgGHʟQ,fr/Uywe0tbA/{&0 W;%[ ha2 V)׾pv 'vP r Dر DB f-}UiXL1/Ob _ 7%L!Sk#%'*p,ˑC' 3I\&Y;;:jږ0@N8Ьy q̝bQ+7RHyET=x$pxM^pJ{2X=(_xeO^Dk 3E*:g!MA(lx C sD>X'_y~P(1=?NklcDb}s_G:z`7//#$0?#x^?j>d+T?~ƐddcpZ0:ŗM2xpeP `e<Ŏ,`,IaÖ`FEe;c.\.-.es|^jf--o>0p{-~7e74 Z[a0ɵRvbt4c~yKNh5ݎϕ,cc,jf3Xb^"0Y`rq+!vP2gQ- R Py6͕T-]v5EA},~v!ޯ2@']R9o0B *De9{'zϤNZ TxvS [n^&!aJUOm͒^Z5G"њX hG3ltD\`-+HOvu}D]ӬգËa@I7 yVnj P9Ӝc8ϔ]5 A14itggS J09CVn:T~=3ϑMkmr\NAx>cc{y#fn:EH5kˣl,<.gZ,΋ӫؠvѽqʨ<6+!v3e vwbiieDC +CC8dR^pI89K+f1u%U 2-^sC7'yuaZ]20yxˍR"utrn0z=mݷ)T⮱H0_ߡ NqfAY k]ڛ؉ mU~(Y;H5?>t}i/U\L#*B@]U dcTݘV8h1+u]=Y˕݅}oA-sC@~1BuClOZ}>k0qӊ ly_u]1+<j0U"($:dUaE/Ǩq,2=gN.)UJҳ:w=k;>hBgl . }#c%F7-W8bE.F/1bEB*<Y>w;fj34Uͬgl "m#|&8랺&&$PH:L\Vʻmߏa̢j=g #7i>LCǽ,rD5XwE5|$X:߿傾~ycx'{xzm6G.[lyU2 gVYx;LM=_hi٤9{Kқ"sW 㼲Vո"%1ZzҤU_qBnyiճ6z`~5I |OKCAozG Qd`}"MFF!JX#0d`3do7CDvj]3tGރr=nٸN Gzם[Gv,mk0?Oc/^qyZLwRv^yHݮx8=Mzp%fY]ܵsjX!(GRZf .jg2vvRWyG6PF##57d2i]]}32fft$D"1 AC`|}GGꐐ%ٷ\z2`?/O?ԕ~y6nNX^e[Xo8ǝj>@S$;tC0BDr =`6X,V &WPO N0$LLn[r pn 7I7&ܛrnMean-ɶ8nM~r5f{j=bI)h1_PA%`zð!e0XpT&#&Mrȯꑾ2~ z`wa௡֏}hC֏]ׁO(Q\F&=pcC~22%-L~1 )CB1t,c*_ཏle@ց/ v4}Xv ;6C/`1r$A04Qةq|!#E  ]\E[Exppٌ`XF Qm p;CH(`B7bB'˒'|&0wώHH( B(E4++p/TqBQDzS8!8̛nO!?5TUF? 7>KBPcFToRzA@RbA}qHj12r }Z6'BdPX0$OQJ DԩI`m PJbCʄ+rY)K'% &}qf($ !(;*?~AO;&Wv{s=isss[.clL +a g_?3 ҉:T~TCyxQp}cSqSorΜ>X<C򖳟cccY@whqC!&|#KkHMM#;ԻӹWKy<̩q!.P,/z:c>a,8 !9NH#nF`a~『DP#Fny8V}S1C=p#=YDi I.<@OY¼`Q]N:\w3-cץyp1lA%\9cW7>!:TEssrBLX7%kըm6XpjإZ6~ͳNXTiۂG$} al \ ew1jșGೖ6mۙ0Rۃ\A;ϵ*G,2v|aZfavՊk<ҵmR.j`Õ0 0ACI-q0X; $; T揱P*Ebhg>ld6#!.j~ x=ranɭv72"il''|3wJ17vRy=o~;HĊr^3r1_辤,ٶ=nۅ~}f_/y}/zJtݺt s+85Y-Z@1&E),;0PTjBD?'<ϕvVSPl&g_Oq聇؇ 5}nKoamc=)F wI<.(0jy ,Zk;3e)L4!,`)&:Ips1N6ClڷjlEdOerG˴v_:o7 (vz3=}<8sEї@o/'E _6 2$ܭʒm}Vs)Ü^.Y˅5; Q5iR?5:ޟڍ.7?b d_;.AD[clttfMϏ7LCs^ѧ+=Lji2'+7;z㓞9qѷ F& l*zDU|7*U }v_-uټ؁Mh^Bo;ϥӢg|ygMU;_/o׃ P1H.,a8+j6:|ۼt; 7w vb(a>u:?? [ ;;IU_iʓ۶эƟ=WOs/}mmHp $@,rv~!cIKCu{jĉXd¸ؼgr+ P.ZUwP#@%@aNPL@_=Ik`hB:LФ\X:X\lRMG:gqnf[n|HcebAzJ_sQ/4Զ]Ҡqg>7)N1o]*-X{ S~O{?iK@O?s࿗)M5*=瞲4SI@PyGeP ~g1/y^oOvBFzd30~CJ={8էj Fył8; 9P; SFSs;h_'yXƼB/¢h_eQ}uefI,h B$"_]?5ZJvsP%ߞ9BGK̓p6w' /<>vާK\}Geyj 'kkRޠӤPOD@TZO7@Ga X@> 2=K17V[Ji2_闣Ԁ؏dMl]suP+f\S. %a0;f(N@s"HECG~v!zz|oz=+6[BiJ,n_} "_,Ϡ@GҮ43/2._R"ڡ+l'U2F0Aݤ1kZ}) 2]&"xwAcgPO}k;l22ƥ8pPhn0co|=HJ*SA7xRW"CSߌ%}'0CT2I97>֞9n >dz*xxnt-ػ Μ;_\_ڿh?2!0׃{V~&!۶w{d۷nzn] Y: CbA?{we:ڜALRi@̉.9jsX3ɲ?_!y%5ɀUl|8ixժ.|ۻva2$1LP=˃Ч -8'N;{'<(bxˆN[T#b>Ip4bN~oǦ} D;>ލ;%2'Ph CתyM_COs8}ƶɩW<v`9y8sBю6" o,? rt@ eew,7C%Ogr Dd6Wy~ FAj* ! p&QE ~~kBY|`X2KBJLD-V JKC : Lvyp(持Ϙ~牑NkK"><( Ni.@}EW@=dߩ7ȭ @\bFPgAǯ>-0`Æ|?oԸX5m@ګh4Yl;Uˇ5t|c 3zCW h z:(8zb8'Iۑm'mD@^я_w7_?zm+Z^kaeSZw(i6􈼳jTv Pvv܍ax|>ݣ#Tłl BKD ]d՘U5fSi`KºZ@6 zL~D8?s$p|u|h95f:׏Г/$0$˗yJJPLyȜb?y?֙8̩ z></7];uEwvx앿gg4XvO%XKfС}/$SoJw\Ve79ʵzOvl.4N5_y k', Rxp彿=%Uӌw8e~k7n*"aFwVh70m;O _.~;&[|i~>gg a֥=Mu٪h:^eca@Ge@טiooݛbZƈ;~M %rWw߬~mvtZyiA5Κ<:6y 46RJM4ߟ2gwS(5AIj nN?_wk^jI3nYoI .d if ޞDK;GD?OӤͤ9nacƣ xc?<@7onހse9 i+Aދ [pLb}=P1':f!n40G'M#+etyv2*2g 0)QNDЇ}3=?"bkZXnjD ~$dNF (L*D3M%TPe׉p-9ZdžU`e4fK.hA# a2Jb>Cb"Z9BBNv(fPg"4$A]}.zD +&6X B:ڝ+:i.r4zUׯj ]^c .1\Ֆ7'vhh7 hLV(ܛ ̣IL5xPz:tSOtpC@S_biVoHt?CDq/ ⾜SR u"8A Yvni߼? FyiB! ļX|6C!kS\Ob%~ίvw7ąK6*jeQQqAp}72 :,ZF,,2I76h9W?bhvY|V{U@M 2̣p"^SǛ鄤b¾KB"Cw Z4.Eq1Wqt3Mkܪ}4f`Z^!Ά7 O}`>nnozlfHo@_AiBڸ]i$  P;٣m FDf^"4`i'kRDo065Pɝ Gpw6imzii4ϯw_"^Rן* ϙ}3Y4ap6˗JnV ~k?khz+,UPS/z ܒfT$dtÜBrKVI*feeګbk ZypE+F>9&ĈdܨEº11`Io/ɚ:3$LbN#|m]]LƍUzq#ۗe#*aa]R32QL0 Yf-O7 uAG;6zvۑw\>XXF WЎA0߀rA@bOK\+ fCn87Čjm=淺v|YOofޮmzAG`yU[&iGB~S F%^Z7z]oځ/;ېɷ&+A-Xi hHHd˃$}G>"5IAv{מX>B{̮\&<`-Tzq+ge`(^&_Y^32G #CuF"1H\\~h 9<)WajH[ZEYHRigN2֕>č2e!qoM`FbYpG(3Ր rk0;@_\@20Fe JG]`Szc _ZU}90zm2{&`-ssqmG?Jzq9.U+*YIr6ܢZx^n޿6780` 70ῂ p W90ə w%ᝩfSmPa=Yy=jFοT7%潧YU B lXa (1?x0rIP^ajmk.MM4Jަ0ȑ eRKXߗsz_gx^g཭c]3>*j`Y";S{I2BWJkrWnBVA#&-,vRv},3&J$b1mTWqB%:b,NjY& .$S(%376V#>'*֖cU׊Ws"!4S޳~E/Y:=>/TFf`=_Odr7 9~C_rSkj?<1ݍkG|zZaХ,֨CUMN3T#ro a8#޷"w{Ʒzm6f|w;`pv v,ow 7O8<&?c?߶z~.T@As,4p֏ל<~YWkU07 ->|UL.ݹ`G~X za; }͑<>@z| yVKtY /< \OZ'ud@vBW50.W Ov,G?bUسR?~-y}u8P vpR?}] {; jc p퀜^0QCoJRpd?&4䒟?)w.*{j$;mzjnfU JQdQd{g  )L8`( "mvHAzc)䷹zlWJ790c&;:?"?t:ͺ&`ZCͮ)(khA; ;}g u9N--R?ْuWdx|w4d,FAGK_FR^RTƪ8xuyÉ R^{sa ja({qywK7]QkZwG GLO/tu WL*+nl8dI@⺦Qnv 419*Y#rxf!=bz9rAȞb)xBxHP[zzfTFŒ!no۟~}CE htB->ZG3pL.ݫ6;>clr.IK! E[2S,?ߏB^ߑd-޲>|fYfX˗oT(LU ! L8פT<"?pqmuC?"ۮ\X6l+i*ޥِNu>_3s R@WJ FGp̗.VkoɟW mT0Jpxe#DJr6󴄢|7K;Z!LϜprgU+D,9e/J2-=߫|/sUy:AdPCDxM5?pjAlĖN j#]J?x~! Xļso0Dyg`vh㨃Њ8u_;xtӯq҇ }A$K?Jlt-9 dYǷnu g6E=Qynw VZA%!Die4 ZFH}uݮoF)YNu'_,j)fϏ8{AV];";[Tس 5^#!Dz.=Gjyv4yҷدbcZ raCA\5"0f $)#ssYdWeFc p]Z#-q5i?ÇO nCc喌.'(S2=:'JlYkW9 P=vg_X_B;`Q}IAz_[&0yFO{!/QrR8,PNz5|R5kM0lȨԳzt`[wly79C?̽ǿ0}7jy=D\`!˜^n2Z ʼn6P$ 2TCw?}?sT弶2%paJph48|zSM.?Y#_q+}gq*8]-0̖͸@%̑ͽf~ð{$B-m'ye쬝Z/xL AWbm@x@=ɐ0#g}І9+CoOF p:{jdگz'&b.(DBz3'Cy[{g\+4!X@D3`$aG# $X8fIW~˪!l7<1օ#ϰӆ>ljn7l-\J5QD4.iralSYhw93J t rT:4DEڻV ]d8$`1 ~6m4杖:҇2p >!ʃȊ7r;lǐe&3!˂h5HMwn6Ifdab:^:֬9X+Z 6f3seˡog<^p#H r>Z+ům@[K\:p<1Q)@^ F::2p{&_ՇcY| Me[o& ?J3abEbƖW:Uz/0AxqxpeR բUƁʅFB}j1z2Ӊ/V>}rs5*'W]N>č9d:0-% a/%~(1xpٙcxY`#B7OH6ȳ/?];|nc/<\ jMh?ϣ:[UŜp>d~$xt8)vA:53J}PӅ &rSb4.CuEJZWhC Sy-EO^.nr"t+ Ǒ,*`I9#]3Lq1 ddcf]p9~lM;en=Nu(PTFv\7gJc @-`4*6aÑp;X^Ӝ#i3eUv;ʇc7&ޣc 'ҧ]Mxxzۢa&L!5F#/ bd$u!R \D57oZv >חH"? S$a1*4Z4{!L9D*ĄTrд˃rIM?*)S^ KV)1gszXHyڕM?~JTs^W&BnjǍn1` U"8S$ㇷCn*!/MӖq e^$(e3gx'>? ϮwY@kxrD8ǕrÛK8cU*xv/6|mxZ:Q@:Σ;oc°A7 p`vpoUI'3pѶuwiv! ShzjYé8?oY-QAw^w< Non'qd_jGYϣe+Scy~Nn, 2!#= Ӏa J<@ܤOkOvKh||r޳]`*r_&eh%yu"J,}ls'E0A= ={o9MD;7Ci;2ک#9*e<~Wbd_n5aa\XdV˪+obb,iSdr.:0pcٽ2,e}E58^՝/ܿ,=+..+^8_AaC"2{9lQ/9ڿ7##󬤼W{:N;1/d;; ' s'>a$ۈ/a?uF;8uˆC|Ulbac!JL3\gg;r.hWكk#Sw2h??tMx]6.OS)Ձ7˨`n-t,/# ·ɶc^l; r4ظկ;8Np@^Ak6헴%&:!@5N`Hehl39^tD6g8 @~7A`ܒ)ZK йio~7= 728k/hIbu97X1~X"db#b#p,_g05Ÿq6΀6׳18r+'EvJlw_z\ yCA[80aA7F',;NƄDE}OC+{ʃ"k`Q-vw ?r΂l,nw6 .SKc!ᏐCn;|QAcCKŇђ0d,}ew(3RizKyypCV"]/m`+/52}G*Ѳ85rkUׯ8V寏2,Eec 6C}Qr1|g |cgmmbE, ?5$d}%ֶ$Xf ;@J$JL V$aeFkJ\=KVBcPeI+S\g>#Τ@jŜ6iC/)lZVY˘ߘmre;^R݊ɑA!V hkQHCK]*ӬvA Uc^s~aƼ53tf>3^VVCn9Hs.R +X0r孽JY_N,nK IBtñ ] V9Sz{)0YiGbSQD|*i6l<9zEN18AWA1 ?&Q?R?BjōVV)uB+cm9&d!!ߺi)j/ӈ}y\,Ss\R ~.+Ee`8ϷQd9KùNr| wQkᚚᰲ0)r[VZdpҕ*!y/l**6=?ߺ$ xF&*>7f6^/ %H'ηbf/wWCC<,;*o)C(L^*9VX46/9TQObI 9FBj%TlF8/zaK*ԯPyRyf&.q%[jUSc_*5Nprv14۸, YdUEO7&SJ<3h ۧA`=ff8>:_AڕYy%E) ZT@aӘo&"x~X2))ZӋ^Key9Wd10<*;J |l~dJ$#tx8ު,X(- d-""$~JtT̕61lܕXZ⯜J ],&ȭ2W8%um!-a hx0Aюtu]eܩlIU ) Q{ RYQ(ُ? Tϰ`$SD#PYgp JPq@TʒG*90dn?U1 J2?|) 4\XGc2 ))XUT1c.-G@)@ h??Vc`⥠ FTb ˌpUGwYRJzkJuڮB*M)H&"WYSp:=gNmᅕ\ 6$>IHp̈8>tz9'y )g(3v~}6`(ͨT74ᓋ'ra^H/16 LHT >.A:*N<:S*ʑFBRݧPFр_ْa71B$K'eZQ`tXQπz7λS0&B鵿A1cP%{b*0*W17z}ݽT=/r:WTwxSC&\='RYBlia\ T FbrMNm[h…5a[nдEm(n j/%ʨw B5} ts[9֬7/3MuE~Dr@qDDP7d& #EJ">R6 ӇBoq!6 rޟ-o5=fs@2"$|bA)I9JbHHHm2`LqQRhF֢ _Dy~QKaH B_:y8My1nC@Ț[\%>6 DʈwN]6t7\QPG`jXx|iDqQ P:%HlS!%M?ɋʾ##%6i0MJɖWTg2QRr7)o[|ƈڻϡ=u\tI&8Qu\IR!NVM4EUѳ+S5QÒD~ncD]^h X]!@t/\r"®{;SܮY,.jr<ҷ8f(:\q{4HAf?N4bLC[?p;(hHrH&r :ne[8A3[^(^gf̖P#fvU0FT#EbZ; :N5GV,L}zēY;a+^d&faH\S;_sPgݬybޕ?'{lo+'mͻU[Z%]aA6vJ#S>O1$XL IMtz"˃3QOƛ^ENC+o__+HE*-wbseTaY\>OKG;!hD z]NWM y*rukяE CG`ujĠ:c礡_mr{apU{|$rڏy_7̪?Ui%opZY5c?C޸__%+==WgE[*|^sR e4 hK4!R5Or }3,W?ƿKB#ۿoJ/nZ*CJdzcmM&%ΥݤHd̺KrÇ˥Z "#4i 2v@(y$KI3,jH_ߗ(}Gk?Tξ_d5C=i_o_?*)@n4w{}!^^w缳'x?yBF.i!2vIi$bNKBX[?_~O}D?Mh'fI=>'ޛR`cOLu*bF:RާӘ 0 Cxr TP)DGi2rPP>$@DԔ@*5U(uA?s$(OH!~nY %@n$ZyF>mZ,L L1)s7lm(EjI2+ NҹPѴ!<>FTd[Qd{; UzEY׼x:>뒬Pņ+S ë-\'gc#/-Z.U o#ӲjYgWH𩨂@zPK?Vz‰H,TEq$q7qމjHk0UgRW* OY ZL@$0aEd (IBTf+U*hdV&f&`ZKZeK)M)cSZ4EJZ&)6MM2"QB2 $LLFՋj+ETL)3( J)H"B*J-ĈQB,**@ #$$ B2*!J1*J-)@8Rk&f`lc%&d0D66C` X4lf4D6Q2b#RA-)PM ֤d 0ITf`i)bH5;24IPƤ(\E% m͑ɫw:vT$0K56TVYeV7mu[T͌PA0$2Fdf04kX|G;jZ@Ql5Lc"Y&(K%$iѴ%5$ERI!JE@LTR)TUD* Qj6f؉5HJS.Y -`/u4P)ܺn\]݂";;gNS$ݺwvLK7;Μ8NUӧ'w.7w7%.wP0@0#t]"W+r丝f 9r2&@dMuZ1rb$:mU xDsJ;{z(gD%Q^$P(6&+jo^FZLF)JD"B!У4#JB-"RqSM.M^?x~gz9%&21HKZ*@5S S\ȳ)02:10s[A?Qb@"IIͰ?a`)`eYmiȐ؀,Щ+lBr"b]aE<&k;)BHeK;GOa1umZB02fr8i7. } r,nh؁R]W9AlKW7E vJjBp봨I-ren)!t:=tJ^'J #9bH^^gȗ'o~V6Kee8Gr][\UZHɆG)U@9=ZM+_p_k-ʡiTɒ}LA1`+[ .@8tێƌXQHEHeX1 ײ'FK9;͑9HѰE#qJx3lX?u5`}eAW ġj#ր3gJXϰRv%p3b4"$FC{s Yj"5J%ȠTR%U[*U` j**hk^c!uB*br\U *@$2%iTPp8V$ 2§=°l"^a*gUt((pIlg(ZԢ6c-XNA.p5Ή7G ף.򗴁Q]x=lNyG܇~u@g#2ӜcLvԟxICj8`(sS5o?$# -1F<~uzb/2mX;k[~. wjpbSe*id\ TuP8)fmņq T9@=GQ3x/,*s+1QV@Ev_{IJ)4N XVE%-jj*VUF %"8y+_+p BR-eB%u,8WR&ck]ŒUP#Y>ծZ- ^j)p S6l־'Sȿܫou)F仜6_|8<Ȫ"*sip\%s3M7nI*()U7mpקOr- T<.o Ag23Qgʠ!o&xs}ZybZ"|",X8z蛜S9|VJ҂,qESg=**T8\]:VfApIU-⩻_5NdlSH(:[dXزضV9!(7rHIl&*v-N'gRBOP8Fm*_* r ?jѐkJQ<earṬJu*'w"[4IdZUxˑ{?eI!'K<.Uqΐ$aYa2Lnx16Y5JJK!gDu"HSFD״R)ZEIvF=}xK!hfp?K׼GfFMBᲶdmc/UdgG ҾnCĦlֺ y vp%`NÃdʥ]\F׺rY,VY XRflZe1Tsa>|d$CQ.$쒝x Q[܆cqLM)/׮1hv{njl`Ʀʲ/EXReR$d[ԼJ_$~BX$c]B %jfˆ`6Ԇ;?N"HOINY)4ȧxIEyw "lJͦS"LwWM"!#^6(%#XDq9l dZJ8Sns]4(a1:h;zUCMk>3(u=>[ȄĎD`(!SWL{}n~FP:i"%+sܚ5pEt}ˤ%H!Q I?Z?s{8&\VUP'RQgG|؍ H.¥^YaQ1nec̊vKdK;XQ8jWʼn/! )"XnV/aDm'**Xd,//;v쥎mȡ캷K,NK^; ;> HȱNr0Y.}pUe+1`Ռ[*8[Ӷ6Pv9$eĔ*:,{TE^\ݨ~9\Md J<%ap"RI/@T"cB?GD9\eTI$Vcep] Vz 9xN Gv.gCKA~i((*>c ׷]A_j`a e1p%[ Ҩ㹩Wq5+ BZBJ%ZFA/^mw;9չrO)L2r¿+O1OW"B?<옪~4f>JH"ükyHɴi rU,E!VoC̊[JiZRI!ZBWK+,g[JHH<ħE2$_vZO#ejJ;`ţRBH_oI 6 !GJvd!$?E d1[Pb(7=0륚BN#xђz7PHgx'kc6,W|K k(x+ 1r |>DeG50 y'|ԌDS9Gk^~KIGTI>H \uU_ЩX{ iRnw8#M11Q)&;>t죌ȹw'wL+3w@+^ 3 Zdžoڋ!TKdؕhQz}b|U%HKV]lc#nAY:שVC-?=>v?=8GP2h<?z{ws%(D}? Wa2H "ga~ ~y*/0z tdJ:HP$|F~gxICY*,a_e<=q[t]$}$O?_|8$tdJ\ {)2ӓ^'A%=j%H)=Da |I;ZXdMQI P)xT: 6yy'P/UZ/ª.'hIJǴ-paҭ"h"C>eH6%l)`^#w-6i U,vU o5""P"⚟n +{ȈNﬓ` ƞ{p$TʱVDUiD2ݺhdHPSco;̘ݹӶ0͇=޽G~HpI)`Лi%F+VJX!* Q<#u;wkF//'\3pκN^oݸO]p^x'÷"{uz۽y;'ur DW^'uwuۀDI iSikWcIBչpN=^x1F1F)1AAs?n`{}/^܍@D=uqYq"|wqw ݸ|+u]w mЇw3yxȇܞ\'=wn@Ow]Qy{;oab,T!)s%$[3ttQ tțUZ踋/Bѯ3'f0Sߩ#H{taZYY{ .|8wQAa+EI:?,;2%v ?^7 ;qwАG>υ"D|Tfšç;H4+FFi;;3pn?\>Mu|"ynh_ůRTe#O"OˆLyX$"Mvƒs_Ank1Jx˃$"? ̖9{^ `qn 17YV=6,ʤB^}]@! Q+(TT CL0¨ t5Od(5[M$?۷[aDRrP~8\zOBxLkS>/˜;dL9W $Na߉tS  ^kf ;h R- ޒ6 ?%_?o =}DPkdD0k?"B=?~BK \*',@"rb[ZL;@^-!@u@n$e=q 34Ml~ ]Y&L[|i9뽼hvqL']?"U7e>>g \)J&6m*A @AA'b!Lث%/PIɥ֤WV'Š8}MAU$FD=nUM NDTAwY/d)`wyˆQ_1 ,Ub\ł1x͝. פ\ٽٖ5]a|7燇gb!lkϦ{d(4Zj˓YEUU>>,-Կi6FNK jKkg`ۭgh;j[\;gg?_xTֵ:,|ςM40 1VEZ^'s ʒ=QsG''2mgSg=nw]3]3t7+ū/quͶu<@^O.۟pve<;AA??@=?BB??Cƥ^@X1q1}_Jm1lۦssnaQu\\VypP(((((88EЙ@Jl`+_d`mTxi~MҲ,W_bq\:Cwk9^ZU7WbXx6+euݭUQQ~.W+Ezުou57 @=|Tg`WabVc4wIZrYaض4y>=ا%&_sJ5]Wc#"⧧ev!Qs u7*MbĺꥠF$iw6(D+bս\f~fR-m!۸=?EXgj3ܥĨܞ8[k~YbyvO:%zfyP6%ѵ.% ƵFIͣjΎ?6rXed|vP>ۃ{ւn':M?׳Y\dV=!eXRSGGL48'))+ NSuaج',y+aY1Y,&S-4_ O'd|c-ej17"+MTz , =#eXbv ޛ5˵u˕\uˍZyeo1wssnR2wH]&7}(7:6~sswqmm||||{o..3S7_[Ems % #cc'nfjfffjff^b799:.E=ޫzMАlpf>kZm:Ex^sskmippppqrrs"Y7;ojd0?%~'83w~7|cP,]SM*l\f-p^0Ç5; Nc}XbՋZ*IWߪbڱlZ.60h5l[[ۛ_Xnct&4XbV-Z=fNM4 :K;fMm+X|l^yqp4ߏt  +ƍ壹ۮ{ϻ\:E`{5R69J[ϙq-\s||b:5fKw+m֢O8JՖy%߄}?)|Xx OWd9_GlnϷOye(hk5 bbrNBJZAAA^_iv{cpW톦SMSSSSlZ~t,KmLج <֫'p,rL&|^O"}͎7[ tUfg-A[ttK#atlا\OݔN=P6H{\otwg+KKMoek}_}uMkv?l^mmZsKD͚'IIae%7morضN!׳u0V遲iU=*ru*m£QӨܮ=kgܺ~n}˽]_7|mAi?/+nd}OfOٔernG͘s^^_9ooONӦjUZ SUUh4*+WUj]U/hvzfG{M&ozo{;~Oz;o?sGs;;ebmPP7Oxg'|33n.>z~x|Gz~Wi 3DC`#3Z ͂$2\zYo?}ϙ~}L)ܤLRK^L1Ա21dI,'-$1(BuWNZi&IWS#h <Zz"ЂȒ$V0[Ǖ q}&@& eyhUx|?|cTwڦUƑ]8;TαE&}%'=1`Y [6O0h;Q* yPUH#]˳~y6&_ξ6^]Wa@ys#K7z*~x!Uԉe7E CDt⇡DQW߇!1o V/ '\tPrLxlz<|)?է€? ڿVȢG+O9O0ߖc 9^R;+d&rǣg}gTFX$$bQ t, Xq߬Gzw#(8G(Q<I3~ǠOLP0A0A8馊*(A sYKܷf84d8d|e A]}RC ~}XP< (y]ApGZZ Or]rc¸n&&-`#1hUȗ8PrZi9 bD 4A ^<5 QƛEEǔ+ì P#ɘk˙7{9f$(nFrlD>vō|~ven XD^Lc [糛3/@_5_#ڄS*线},?~ /Я̸?戠~ :<pom/A[Nkڀ'ϊ `}xŞ OC_q '*wD/,/þT P|oo-osRytP@Mh:QmQ90櫘Ĥ-ѝR (FRt%k̎@zuuL8v "4 $oٛaC$=}\f́P߯VF%i^qЃq9/FyZD^lXT\&d/ؖ rn@2X}`(@f?@pC+vk?@Hr7_Ǯ=zMi ;$Z[86'<Շ 5ƵփT6ZhɖēT$[2B޴C\Yp0+(}uù>ݕQ8+KHd% g^~EkRBWA5Ȣs}wK?'u`A#sqb("ohJ?R!|ǽY+ϯ<v:#j9!`^jD݆$ XIG~[Z4MU#=<"_~ zs3FCiޯT^TERCukQAB {\d=xzpha.Y|Apu.  gO(} !@щѢ cCi-y~ g-9z"7|-cK@u hRK!9M)7A3x #ڲxJqC7KN70h/$X`ľ&8_L@n{HwN9[) 7^xK]S ֔ =Q@, xLRAj ZKWXZGB}c?T3P*#IBF> }効S^{۞;rbe<`|o^6{HH }?)Ax&aH;@S}=@R &_ptC?i |PEE*r7Fr=Q>75OxQa^=H/7ġo@W|uamO@D ~We>:41ɇlȸqK] $l7MoP"Rw⬍zW`&".഍-L؜ h<o{~'z_0i;6 @F^Zq)P8CVMqW*<%u$9<\WąĮ[v\RxhH`^\58~}h@P臮ɂ@8T"&d '{暇 i CTܟUs;xobS~*R62:8 :DUN*Z[1[j+k^%Ep-¡q-#ȐA؁ dlJaqK$\0 Oް$§$֩{p)˘IZ|j^]aq?.qr详%i7ŕ} !%i7Um̏|2\kB^RLӌ4c;\, }h*@Cf44{;x c,B\HZa#&r۠YŐ2 >&5P ;5yR PL*g@P!n/ُ"<ֺ9ͮ6۰<M欞 X]Nܕ-L8GFϳ|ATT-7!qdⰜzkYxFk<3}XpMw>X_1x, 8(AxTz7As"95O&D@L$qaSκZ%ֺ ` p%=%m^h@V hr =eR T.M3d:/FZ(1 |b$0 ‡*Bn ^" IRpL*~HL6 ]KBAr`2?,/IOrHB*r&J ܄>Q0 cUnTނ}*[+o08n~BlOu@ 0MS^\xǝ*}煈TQ@$@RUA@JDVP)B"@$Q6gln)G =~hUv|1Qsnc?[諒l)_\xtGNA+v%v*_ޘe7͸ݻm_y{'m^ʂojU$U_~py]g1eYFi+waC7Ebx *YTzbVszb!O/_L*yBXFd|ewCҶ҈* zzd2OU=X$(2Ure(Hwz}Ռ}OPNP5/ZT=h~PN2I q͓$֒#:b2Vsw.= y|G9ӕls% DŽhA7aBifNMS6H%B/`B P:1#rPey /u;92[-p@~Xۇ]D(2bVpXrqM|O3 G"B(Rt+Q^P t]_dHgB\l`C|M`^*ٯ:2‘~a} !У`<`_8zA܇q?  qbYQ9PADR A8<1⟈\p<~'hj}֜6#抄5~C̆AL*/`f!f@A|ZELE7 N$L&j5668ױ\ (2EΊ)d"6("o`=@ e;Wpkš$-Q)MV8/P70/:LF6D ,/HDD̈́/W@] ,7bŽbC-> yM:@:h 5Hy;m_8*`. QO)B|2E @(ȁteSp s qE0 dP|$6yd,, 0WI@(#bUQm)-zM1[ճLz;FmM;lVMB-"и|ȱD2B`.100)#@0`` 2 #* @ 8%*$@"ĪC HR4aE 1D  LpVL1$LL10P3qXd%X$h@Q\% 0@bTPP  t , h4H ʁ2` B.aXС9 *(b R 0R"&("1 JR A13 eRJD`H$P&@bA)A(P"U $RaH%@ViC@6!1 U@UsE3U\L s W0X\ `B  Y$RP0pQIdIeVa A*+hڥ**i B  !J〴 H *@ -+8 $īB҂P 8`898,2J 4!!(Rfa", DD"Ĥ`bX 6 8X a D@bء )bAB2a`ba`XX`a@*R! (aXH-!8H`a4R[hiTURVV+d6Vji2,mFk)TڊjVƱڙk&ԖVH#%#H4_ $:I\q1F bEBHW%֒Vd)"P!*( +kQTj6JڒشmVXll&fP``8#H RR!H- "ҴFba ckFRUo-WeFdM"RUF5QlmUX@DtcR)Rࠣ.ՒlkѨ-wu%ͥ[XlEF3*iFYRTiRZQRVQ4P)ZA hBYX)-mFœ3U̚EEl[XF1h$B 2J 1+BҀf-RR4)BP4&K%j64j+P*ATSH*;h"!zqyPhRSx?KG 4@E<*rH0Qؠ "%!V%P!F@TEB` TdXTd`hEBjlشF6[%dʒٲҚ֖-SILZ Z@)TPF!(IEQ)DZD $dABYDZ@(@ PHT)A"Z)BEB UbBeE@&PD Th(iiFPeAZH&PHDA6k@:qDԨ @HH3 q@ `)u\ 4$"a H2)%`k1" b0o&Q QC*QeS(2E0 L 2E `F$"TX$0%%pVpSP(D`PD hTBLL(UThQpS&pԥmM%6jJUu5YU)mi*UIZmk$!2A0Hd "&@Ȥ0䫁H@Ja+H3B1dP!&J   B#94&9"!HT IHP$!B3(VJJ"39b &Bd"Dd. %9* R2- kK55ULm*c[$ 9dJ9d) R9 e9Jd*`a*P1@d*.J@ "[r-nUIk6ٖ2TەtMZ&H`@!B(*+d+)!  P$%B #%HZ ֑ Je1/vCE( BZfѨ[VjHj5HTTlVa(J,bQUkQEQ"ɨ)6h)Di5bIXDH i2kFؚZ5,0j Bi# 6-AQ(65 idF"iF,Yd5% Zf iXBȭ )B 2m"5`! Eh)L@l"&KJ6b !XIbFc)Qh3dKcS1b0j#lŠ"$IM`LaFD ƒbeAER  AHcM6LmdƆ\pL]@"F0 Xa$; @.Ҫ)H -PQ ? {H"`(#umNdC^db Nƴ ZkiMZҒiWN&aٴF&lXPQN$DrA1 h\I CĕiB$`(UAJU!b+%e!2[+cUkBF @@ HVڵZQƱS+mZ 3lUR+Y( d1JD03 3VieQkX6bhjdQ_Q0HmXf6,F IUbU$ a-)mQe4֊M-6H5lRlTAc-3YRB+,ZCQJ&Z&[]ڷ JdYhi24 ZTͤ)5j+d&dhJ) [l i@ZхP*8 R 2CaRmba5(KhX4RFF6VZdS+[PPՃvV ) JP" BĨ0"ShDv!Au*Pҍ23DTT)A!-*PrXꛅ$5͉v P2P$Ƞ +J) 2BH"LHx@ URZ@0(bF!R$XFaHDJD)R @G$AEj5aKB^I ! iAr2Uű8CJ2[H< G<nw?PTPAdMHVIRPp&@ `0#J4B>OGB9,M}JnbMD-޵~6!º~s@4oO\AR&B8A@+B4%@)@4u5@@$4h= Ī)"!@Ad`bE(P vBl+" 8䀦JʎJٴEj6b$VYQGQ߆zp7 2 # GJEؑ(w½B4E PUEcmmXEcQj+kԛQcQFcThѣ[ű0<4PѬGT#,Bq%"o5 P)EP$JC @*kS%6ZXaFTZAHD)UPZ0P (MӨזoAq+؃B(>SH. YN!PЀ1* #\dCSbPJ8 BU(&SPQ(jr.?urr@$U1bWQ؈*4(S^l ]-Ik֚]e ІrQEC%FLQ!B QE(V Z AM!4̆Сx8|GPxx7 |Al|CM$P:I-uP(Rdi[QQVmsZJ $I5BK.Hprx|g =w`P}pݼP2P( TL2W!k I 9! &],&ÄdCpP @1,KDmlJx"(\ϝb 1F)׷t:vH;#EA&T 5"ڌf@qD Qv(npw$u-"a{ 4I@ ^Ҁd"1M -#)BQd"RR rBА@dL,@BJDi7PQ3l6Hz@C0 CS  Ma ԂI  ;!w4!A72P!I'D(E RJ$Ĕ)J9*$NpJH!N2 IԂD2ojCBCdMsu `g92:H4!%Nghhh bLɘdd4\5q׿i]c L;{[S2[ul S߆'XO<^o^{aBV:poܹneU@f""dÈ_Q=O;#"Kh88$c,H` a#4dlQB$0p#&Q%((`IbA.lHdltx~\ rKK"|6K4Hi4lX.3*60舌c2a`alFB"̉F2r\Pa#IuM9% \To"$8d 0\FDPj"Q8h2.80I=j7 L$q2 D``` e1c>@Jh#bf|LQ.2Mf B DꚆE( fDr 7[U3;I%G `ˈ"&eh`$Ff" !gBfĥ`R\8`4J"᩸0Z\!0ѿ! 9tYakCHChBF ]ۘk& Y!83ń0&ԙ lb@BLД ;GRBqֻ3pM7$;pPhL r وQOm& UG =qwh X : `d)$sP &v ff8 na10AMHDҮ`0k2bTFk31ew F"/bTW!B9 qA7m+2!VJQVkcHL%j|p{ Њ$%;A(yb#@jÐr {B@%H@=S%dG9"% jkp  ̵ -"yǒwEF있?6lvZ@Eq4@|@  a~(ZRd%_~-A!A@ //XIz>pHځM+$|D\)PPa!)b"n'y{kY"8PCԁ@ A`F5B*A꘎" 8>6a},ʹaxQ%d46bjԒ {smy>4B?i}|<1&Aѣ a(u }BY{x<{Kk߯zRmդ$cM\[j -qC#.:5IEBD SBlv6>?B|v>XwSX WM;J#a wd9D `ޒ3Ԅ!5 @[S*1NZ*ySKLISIƒ'<̋ ==',6SчQ̀ m[ac (^939jh\x2 {W, CP ZVY3aBk%$dH1"B70 <,CJT%M\J "T)Pȍ*à55Bo8;FGq*3$e@6 ]6sf LUBhhB&J8յJ ^xQ*vU&@TυfUU6˽oښewErmQPE G8"kSWRݴp P/".e}mKBǽ Avdm1 @c{ !7c 7950 H1NYO[nvy2c3<"c8^١ lJ@ أahaxd/u0C܀Re(FPZ8Mh A hG&uR:3l/VVe & $R{^0Zgw2; hF۳AR" BT0-i =x&m*,L ia㞑@&)fg 5'cыBKL# +J>qѐ-lDmBi!4  %k0oLҔK*Wz/3?p"~9^LU?nuWP>2Sػ!JR {!JA rgk&@Yf 10kQC_cpD0,>= F3&NTgUY0\u݄Ӷ_#NcA^a\F*0MK`F^0Db!q3O;$h>ͩZBt%CA  (sA}Q?Pz.ϛ]t`L"RCj DHDwux`agl7=Ӫfa8oK"LIh^'El Lm;N8\p P Kb1$BL0 qmξl Y\ gH:hL"*)<6 p(#9`Ldh Ȱ`m,}FasmDelmhKV)h 0$$颈T#o 1D\&(x[6I'sk[Ei qwA"hm`D֏1P jLܵ룢1룠cyDh2ib5|ijT%67k^"F&#g >"CEmʱ[2c3EF"t+F8iP arga!(bpF< 3aҹ~uBH2Dd0` Gߎ;o(o=8%@/RdEqрf9"Z+2 dHPSh<"h‰N"("k6Uq;$ 2+Cvh?ֶaP;ՄdȐ>XK3_BcpqBC#8AR($ר٦e0"UR+Vtt<&Ñd_GJZێH'l#/W9dxj (04C:”BmWě,_fv<a枵eK'ۀNb2>1}C8֤k+csbGr$]Xr"% wUD,B ʁφց"|+ѩ`>{P_Nfdh,T9#c(:nf4֩E; XQ / o"o&() &` gxzÌ 63ledvDGa>  BR/%K˯HE\KpR@P(ptƳ;yQw"0_h956@d02bݴmG8FPW,Wj4ڀ$R0 QTք~QK3McC/Ws'6&Q1JL̻0;M4&X=xsZ.@uz6+ 0̸I~ 8c=_gbS6*&*AYAtF{$Jua=‰baRY8 yqkKQ9 u*B|{n(eD"A#ZRMiIn^^Fcoե '79iJF3a@0!"La8vHQ B00% D1XiMTc"$XA*]@ Lq f]'XL0{:֢dF u|(fF8 H0"`䉑`B@@u9&,Q RT=RZNA^˫\DrsMG:i).I;aI#B0,ƛp26 fbG .6CGռF @3!B #lf8{MkW$Cq:˭JЁ"n-J+Kc 8kZ*̃NhJ[M,킖 Ř'Xa@UNFν6t0B<B;\l @{tNqpNQ <JQ8J[VMyc$\{Ը涏Ӥ<jO2 0Yf#!rE "2;$|%~A_u$"r`$*$!tױI F%ؔC gaL8Hk6n^-قCr:pd"qCsaCV0,$,`>*J3"QrMd쫧fSXm;R(#ʍQqu'].ps.*k" 6x\92$)k( 4Du4q%UUK%&6m6D  tҜ81*d3!$m]DzE34pFASRYcUHȔea޳JxC=.Nq8p{zb\GLYAT*4[;fP" '1#"~nd$wy{Ї # \눢t܌ "/)UّK`ßn^arxExfZL Թ. 5CQE6=kr4muF&O.Ch5ie gjU `r/^$aL d. jv q¸^c|",:p=^D=×`$0L"ʼnb t7OR[Vvz7IChīسfTIeW y!97M%i5)Ģ,9n a%qk:as\ˌϽY 4ǖba6t۞U-҈Ð2RKY53:6fobAdL;q XfɆ12`蘾WCQ:wq Ý$^8=U!_L{}dS2`oxhjV57LC rΤg_ p)&'ANĹ^C 0!Б-Ȑ`KQ9@ 㙘&d33   oS5@&HpnrcRB$BAD*qKp%VDžp700k*I;>-nzHJ-P {Yto\֞*I$μɒ@*0X%,0u& J9.3$C6 rP,5!" Q)a4vʄehxk+08~#]-&Ah'XDփdghUB^R6TW[XbpLΥ@ S (.!*4G B2/=8r3=pj"ط!4AlgLG@ȎDfU(`cA0),i fDshw=Yv&di"NWE[) G@%- P +#{o]@{ʿmo'ãerI <_tRHjy7ZϐԖ=9tdvp`YٙRkױKjjihi"-ͼAD4U+l)3&guGU6$$V rB:Ob=zf;gd_']Ѻ~O Dפe8a4oRPPv]֚ST sPC+3tf$ƂH"*~сkm`X2oT<1c؅}YSF`[\l:J8 G5 8@4ulɋ#DNⲬY*4ZJ:T U9 KGiƘH;k  Ci6d$Y.=zhG+Ҝ& ob "@Dn&$vϋz(k =AD/aMWU K 5 lUʔ Tj1B!QctN:}zn&N%8o&0fiGF!>nYx4Cdp K839Q@MYdȊ6-X"&lhבtzD3CU#E,,7D qvu'Ds,1Ņ*ä:')wl{qG㔔h8b̉(hλt]xI<81ׂ60LDB'2'ltllDdx8vt7.0"b9ε0(PC4lqgC`<ˁCM9g6 %fH=!06DǁE]^ Pwt_7F:tvwA 5&C i@y_p\Ddsjhw;\çAuN$90 p\ Ir/t7GjiiB'dCBR -[8s\ƃF8b`e2MkjMGk0 @읞^^CwA@ :z&4☆j>b9 .~LIUSDF󼥙u XM!`C[khu lkۊQcbDHPgA6;0r TA0FB*\x3rs "D!d@њb&(41@glm D@E24 "-}? hmjh˧ypw6 fx0K"'֘*UW#o8G5T(#V$edB뗣wm=^(*OT@@^O -@m!&E"a"jPچ (X{1($3XSP blOsϲ>- !+ h~@tprxx6w{\j C0J-bF^Z  36w;u+ ,Y&@8oGEqwх4CԆf_i}xH@8t^:zi4;ǀБ@YS/bxO h̰)o985Kq}I 0\ eFt:: AsS"%IhXzy;''O=[c/&Sup(%Rn·4&`h|mRơj"'+(Y8R(ti.*2 vmM "& f~`sILЕQjX! "Z8e[͍‰(e4뱤_3]Ѿ=j0/e2-4L6!%ntsq=//1g+$)SC3Ʈc '`c0ӱ8&10 f9}RarLarP0Ls4ld*㎟9la 3Y WTkC2 σM vr@ #n=\ Eoe6T.7 `x97*?!(\HYJ""!G,\JnE *d({$F BH hwMD4 -*RR P' {`(eB 4. hD4:POss%C@\U@449Ubtl\fLfKDxiBGzzx8뢷wtno'&&Aj 6 j)(;!/]ݻQrNvgx Y,@F:x89wncЇ84ӑk$CM≪=;DP3\aY3  mÇ'Ulp"'0К\GaW}C`=Lq032B .89r4ē4GP%GGGW};9=a=Oj׷TXib4:$RrC%dhVnNH[⴨m( PZ,JD 1 iECh C!H)6rQԦjQAeHDRZ%Lbmk1i) Rm9]6 5ݢX/gIML%llibXw (M5=Iwuz P(D.vSPD M5o(qi=bfQԆ;'y҂]M@@L(chQjM1*3ZJZCP*qH& d k mLPR%- 0 HP2Tj5QXb+ũ1FՍ|F(EEͱQ((T$@%OFD vP*%L{߆}zaXhN;>e݀Ks y]b~Wf?*+-RQMFф&> [ 0-Dcs''~*o[D3뫫p{ۿPp8fX1@f{Yr`nj6jq~ܲcc8/rk?^ Dh$uw7V^2v3o4]8+<ÛXOheuXab>FzdP:1 m 7 X\:jU~5L\i/n̙[{n@ fOR~#/[֙:OTۣJmLm1ssONm͊`- fDAk?~m99[(d3;_盿 *8:\Y"]JR;ñt51tbH7mbAL6sbЩWqg}_7O1co F=mvU١w׷;yO[©䟇\ֹ5!g[?l/9/I"TRum8A&_$.z|HBvCz: 7{߾.c=#rǘl.C , 7yfn{x1MJ?j) 7夕 ͱeJسbL<[T2^y~=o!})%EK9:Et9Mwj !-ĶF.ϳخǠh%,miㆂ Nɑ޺e3(T׻WǖCĢèU E4yJmKb18UGd #;@K<\`tՁ֝F^JFOgsu32$ɒeM@DW}ך!2iitة'7|rRf%:ͬƳͤ'"oG:pY .Gee!(̉ "UHd]xY~c5Wj e"RL 0HbuOs5{*C^0C﷟'l7D651W͚!T~ES! D}*%Gv(F Z}]VepmHHܹu~Ytԍ)N&46>.3Y!UAS0ՌιO fG ٣1BGSK: $vUz.}jkDm9ki{HSwVd}|q.F̆(#33f [F^dY«8##v!MA?x)+ tC|:6I$J(628HBACHMG0c2_2|NW'\uj(3jyGAK{[쐜f,BqgeMmkfƒ܄6ƠCm5-:c2LJY;Vcߺ-< V[:Zo9K }&~F>'EoASo!6lkourOR`Z5UD̑bW?"*uj;߻k*i]|xyFԿ*{.Xk2lj$q4Ptޣ&Ie yh"{uάg=]c.E'}c̀ۢиJbh7|[VVv ؊.{,yPWk` j^5r=t:fEMm4|)0GIZ{kcov-I`۬&ɲS][֖sUyAEGvVE> _BcEf{;d+^{?:=}-~=;g5]9oaks1+k/R_azG<}S=/*y-/k.>|m+<-wOrXf͚a2ɢ8at{rCUFkS47BTwWޕu0YY=qz[lW.K^wjҷ}&gIO=Nj[:Q//zkjhqU3VK{yy|~^ aw'8󣍮$O4Hb 218!yt:OU'fMtW;Bҟ}?+E"{kF$q"fUZvKUĻ}g.hźͿY$bYd٧vzJ]"232X5'xy aI؝Y"̛|tP9K}8"ROxJL+%NZks `xH IBvf%pxx}:i&OGtg19ϰ,5|%I>UTSO@^ծJPp3º٠Yir&F6 8DhkuZ,KD+r\zZ-KCcd=ЋZHUk`m7e y(˥#bqPj:5%<.Pk@*CPYo[Zn87uR[RhMW(0† Mb6! Wm;_z%zCs5vډ#34 HBlպ٨NxߖTEPPKy~Cd#m6a^>;ۙg Y 9H eY$bܕ&5+2jQZz[#5tVz>[TpQmjv^JI;0ۯ^{rY۟nf衽]4Ioo-nnmmzgQ'j@$<['+a ԂQ5V$p-ʷ@6ٙD[ɨ|)06_C2q^2Umed߭6'RKƵۛW5͹ڷQTmPn EY\q[n[&n p_mvQ4[y'ܚ닄Sy3.XdEd$Wޔclrn<֔&]| Wng?f`I}DyWKG )+ {Iyx.߁ |Mmmm8xA|e߽;[{h [;{.}Eҙ2/m q_[߽QTUTuo\ G_[6bV[r,]dBD@nwC={<=>+(<~ή5(!Jy V ,8hZ4z,z 7N$DƕWlGhC#ea[|AzHe!+~97&ֺvqo#vɁ rŷm7@B9*Z,e zZC_6l!nLqR]VdYk}4EFAss.M ߽e95#n;zmT]e`Ngc9$(y4[eD8Ybp|^pʪY:6wٺ,s)B#I$DJH2kjq񈌭˦Sɷ&WmkxKqT`o*'96شThCQ)ZIJrȹ[M)Gmo3\wYl:n^i;p3%YD.M ƚho"lF㸔rFk4He4 4.ᠨ8G5a"sn 9"8,Ѽ3}:_-9UK{qj\$ؒ@ W.ymȣ@sM-#g7;YDN7z)E2+miܭ83|b[q˶8鶚3첽u|k<ض첛SN*Qw1쭧%i]p,'Ea--j^Mq֦MM:2jfk9tKlBi:Ҝ!)DiJ4 D y+`Ɯm 9v eɗ5ߋld:W|k--ji-kvjZ*嶺{p~4nm5ثi{e{<5<ޝKgG*ꪦQw5R۔tۙiUNIU-i6چHSߞ6׷Ƹ16Y[\KpMm2Զ|ۙ.fSQ)[fcI $!!$ImneJUiMȚ&.n[II$$I0`Gsû9:/9sB$ Ii.)63mmm6M˖jSeJcCBCC;ywu뻻;ܤ1,,jJB>s l_Ro\EU=Τ-&LA!Ѿ}{{u_bd} GON6ZB).)I!h" w5$-U7Za޽o. *u>-㤀r)(+$k(2~L>sCN (h: uvPPoo x<=o=w@ 592w2&)yngƇ)m|\vڏ]9) mVٟc nWJ{5IuB .r\V+Z61Wbmّ QV6BGq+6DSD0v ۺ#ͳ( \!njDm.̆alX\p@LFҖUuT;a@`8 @lwbڭuOv˽W+[n^[K=;n7ܻ=n}׾z=^}voܾ\;=yw]|Z.ξ:kɺ,w7wRXUM}nk eU* 94A#^嗇wyR۩n/}{OrBswmk ϗeܝaޏٱ`>}^:]-=yö9O8٧'9垶AT}^^.@(hKXQTPYa[N6k9s9;iowOsr^=ؽklw/{`l;띆{uڛkY^M7ݏ}B 6ږZ$V{֋SspaUEMeϾ$ր죷ݜ7"%;M0iJ5}|{zNx\>lݗOo۾m-2޾h4ٝ}5;,i5]=H$ whz+>Sol=/lY)*o]Uj+m/{{f}rA ls͞yA>7J[}>{۹k6m*v`zཇN;khNB٦F6 b-wgMpv{aԽMkDj*=5ZJ[H$.̤%/YVe(DJ4n̑)T ((@ :ȭd5vk@QkAE jZж¶[CUհr-hhi%ձBfk'@P -*4/`S@({4OM:]ACM#wp51^̢JJ^nP)u}qѬesBuݴ]QaϱKa3㹤$ Ӈoz|>U0 ` `bi0&0b`LF && dɡ#LihiLP& 4L4d &&Cb`hddfD"h#@#&LL!&)=)$ )SibjzOMM'L j&&MM4$m =#Li=PyO)$@&L@h0ѠhL&LbdALLL!ɣM2bbdd4ihA 2 4M0f43 =L24Se=d56M2x4izMFi3AMO&j9?zJHБ@B 0 ҥЉ_yH,HPJ)C~2*d5 B4HBbi?دUUQMaaDSgQeZ!?9MZ 13YE'5n6*⢁;FJk3UM3AMƝCMےQ6 (Ӡ=Xj!S>`go["ÏWH3, ,Q/ c>-\NlYƽ ge3XDHɉjЪ-bd>&6ܷ}&zfb+$'dK0&nnxzoMM]o;B\Kա:Mku95'Q9בx3Q4ak߼D\Ɓ' Iŗ|{yZ~$ (Cx}ADW(TE$!q>iLhj8(-mbL1- Q4DaIHjʏNu ɤᩈ71EQTPEL gV85aQVޘh!y7A묡fӵ+۳j4^5IɿeMyx恵V0 $mxMW/ 8ۢ/RHi[C eb]$ˇ`!CB+HWdžNK(08߸_e(]t)ׇ3MO<4巼^fV16c;Wm|〾'2B$Zk7 O n80MkZ9֋Z& T~l'÷So\ԮEfo(5S*"(P@|]7x=?O{t?V19 ϤI ԃB[Q{Q$'v" vJ)+ӱ& AIn٣\kK3-6}f??5II@C Dr`Y@xߓ~G.޽0 i&y~s8bݟFۥÍ+7 D'<9,ٌϚçe\9-Tl&GaYfaQls#3Pς+>ϙm>BAf*jd5oq,L N-L^ٵDwC FdSaZhFg93. 4&xSsv s!kojJ01֛*<_{3@K%jsw[+OqAG TK͎~1Ok'Wus}zL׍dДtH#vO݇n2>RsJWR/avpZ.Z7cL*+T\JJzE$]/!4P2 k("S8Oy# _flln;8ɂ;'KkOƁqG&,sYGiG :uF&ǏwRQUzd *@[/Kcr2Gˈ 6_(*_ D4$MDPB!4J4*{uWuQE4ηPD IJ0ۢtR y8lC0s!EJp0xo.1YὛ*xRJ IDRw}&Ͻ=snn:hDR帻g4#}7)~4Ayou7jȞS\+NFƕoZ(^+C?LJ h V@5n]!$K @ȴR#A$Ȩ$ a"'IpFb@UH}uv(&h JP,Tbf%Ha!aV) "IbRI"($ 4Ph H& ЁNJuHA4X4]/ƀi]oCTK ⎯`'H2*"ɘ2@$ CtYD4HI:Bn:FRJ"͒H1<{iMf%U3I06K{M]N@ !DM5bmw,0Y)B%d0|BID2 #i.!=ͪ ⊧/l fOz.) V0r\#5ݤgoCJ> Y!9&i 9z W58竩@{qtUsP>x8LղGJ|kD|8 ~~)#ELjzj *$|6Xc8..L<uqkye\ N="_'%(9]`ݥ uu *!lһE+m.Nr~f!!`]uU8|A̔<"$ @>h=3ə {9A F F3`_['p  ֕+oiI#+ֿvצJK: 6;mxPnh` ǙV5hR!# B;Bphb2\}Y9j HPTRRS˺Ny AZDLqq X"^3"fX<1-RUnʂ*"b4Lr{&(=w(#,b`}30Ռ)0*# l5g6E8?kDUt>* h 9Eo2"p*圣ɉQ2Mf\hQ7)0J/aRSKM^{>GxWWMmO,K=\x>)wтo뒰E>J?y'R=yd{vx?42 !I"JX,"m~vCTEJi.UЃ b?Aji=!hBQ%H("V@$IH)QLI$I" *$. Myqr@+BTe (I@RГ B(R cPD/0{[t}03ظ(M;Ŷ-p| C*$qh[h8㌸7+| N*FB&T0‡"j A5!=6&ԖTjQ5FF9s,gYg,e2ve2Y9g,,c,e2YAAUֲQ`ERץ 5!Zwb!w(euOS]\Fuw0ipNq&p Ӽd#@bKC"{d!Ё'W ^_x7*`jz(xv >G_2EّWx .m%DV}C4vwr Eaޓ['z?7n9J8g99%Q~o32<'oXĜ!ܷ]#GOʞwC_BGO}: @O5@ɉ'kR-$2qO!(}hq~ǰV:%ﺫN5cU5P H˺{ &!iAy G$g>=p$xn$1)}<5WUuWU I::9z !ƀy B0 ,$ґ PBФM A{!ost :]]o/a96&2cJ\!U\~A"7sڟ?cSlkwk3a(%]J`A;j~Ua,8 .,*8|-9}RY:$PV=I`i[{|Ikyt=d~` XVc1K+-i>Yï?VGs~k={u2n;tRva^ȩ{Md}YS^̪]ZԜ揕-gIA- e0bVaoHuS!R :Zg?)c>9F.EupuK˴N9 P~'*@J!BD R!AJ]93frg9UY9SZLpg9L D"Xx#^L Fwl#3f_qys.̻r\˙s I]E EН5,=*8G9AO; @ʙ),qmKʧ]I!\ϰ0t=) SHot=$)kj3I{Y"uP9jCh;Ko0Qo]8y϶o^w͐$>gg-cd!xӻwqKޡCIK(l@pbs% :k{8gg_gg"] | ~BGK#6r"f$^6[!,x |+ews2~`NľܤOg=S`Է7q6ů.]c^ҤaC2R@!'+\ྐྵ}DLz[}+U}x=+{h d{ l4{=YɒL鹼.F/Ʃ;*Z! f|[O=sMMYJޢEYN!鰭yZ6"" 0r{5ݺ™F`z^[fw.M3]6RQx66{yj~g3̴ٟ0OEkieYBq p|<97Wьaa֧xHZa Ĩbh0WA`ⶑW ,cR|,2oILm4̶!UYYuͪȏW6| Q)0vpx\j^ƍMٚ[webyD1yF'Ps1pK;[iFAF#M=.!TO-BY\~yCrc}(iyl*pP.&%%خg !V Ctr`&XR^sWj^19:30n M2˭S&lˣDNM*p8.6WmN5Tpves_ p -q,*hӐW50 $JRj0m.IaZs4^x ~Gyxq՞Hdd# RM}"-> !QJD&s la?sMӺ7FEݛ&ܚ:tPD/9XV D c&7o;X fw:6jGn/ut^{9sx7{Oo ]6@ M8uc(N02p/$_4zM7)֧j`wfi<''î;:xGM='g3HM ޴z}?S6Tx ʯ$o He(<{)8ԅUL@?m'8︺mAq+w.}AM`V-!WMTHQe =|#0AA{m# Ȉ4 L`q"^]+@J4;; !q`6}0^P'滤ܛ` dͣb@{&ec$T, >T yqF #D|2`3Ezr œ)!'`tqڎC;$5 <S4eHpN\}Y>=C>W?_WeGwJ؀(r3GʒwE `X\$XAj_rQ/ߨze!, T ,(agL~?p %䗻xVR NF@Tmxm?3N,uP84BR|V\V5گ=vKjQ8T!U6@jrF0ZC6̠R MUwK'kw6ʫ, &: <.>$ +t|'!Ow{齇NT,6n9ӿtrjz%Sľj"j,0)xNm $"5|˝?^lx'=7=DeATQJ? U!]4|T7Dҫ@PЪH^[Oo:\hߣ\R)` >)fi8S]:~H-+cC ?l ELc1%2Nmh*ܠZH[/+~&ֶ=822L@vDSI8 تOB{x9a]\- +9J9e֎Ncv~)_웱#}ekm5.=~ⵇj^~86:";r_~G[8oI殰ds!K oɐ6P~Y]uWmd9^;;3&:$"y{۟3N 4&|_LDuyƺYCeXρ*J i;T3 48>5i,\[Ėyı ʍ&0!O9SmXy3+m3<(zvMq&Cst}5M.Kjk!`wM7' k&׷ 'Lm|m)N|Ux"1UQIpf7dqb܇jK!eLmXf˜Gp " G`n9ufUAe $`ϑiG /S_@7qɓbV,N8AHr[vfq~7?sݿ?N/mx~07N&:_r=ʍ@B\Jic\W($*HdGq}7r듛^`}Q{Numeo{3'0_+/p )Src׵>.3xiٝ(\݀tPQp_UV(7Ց0#pl79eK+{WbJFEX!y37,m>~swtdJT5_ d{,;~IW'R}{sy 9^̜@ٝJu( ͪ~'[ӯ;GM'責RC]PaDDD_Mx7 bLßM՗ Rn+2 _1nWڔ,wrX"ѲR@W%M.J< ɧu~#o7ڇkaTGYPSE.;f Gٝ p:zǯGW300pEfTQVF.f"E0=2Zb@A 6U,=,/iAAGǠ9Վ󹟋 ?֓\lx>ٶx|<<6uoeY឴D=5UbVf&*kCwzCv3+`\v| pmPс8!x>v$h,L )21 32SDEQ-:〿9Po; @%±^SȰ]Fl`T ;2=UPz׃3 5$PtF~ʒWKf?"{kZFDP6=tJhT)!_`FSsVIa_Q*#^$Ekp7DHIr9-g3fQќ ! Ӆχ MD͙pȄiX(f;>u72DM:Mh(j^ GbŬ9v ߫^BZ+l=;?.}uzkܶW>"s;-wnπ/4qOܐ_[u;.0QWmR @.R[LM7xw67eW&,5^ s^.HTV8X ]1jETkoR a1q'﯊`-ҫYp_<$f7EyԮ}w~/9<{:+tOC5Q }<h ~0oeH]OjPp*_4#s3 6hi0#e;Ggwn&ʆcB!gqtݏ;oYNlN{{X 0җ᥊K7b`s"DWXjw8T?k8dM Z;3?}6עòʸ`$!C ӣʵ_ܘDbJJNb(sJ~ l,Ql/Pbx`u%+(UJǂeLA;ǚPߞuPn"!_%>@g J=ejiɜ'+Su3\*9SLMOO`:vIgI%ՠLpm:nΪj]UA/A&Rj(`׮^Ȑ >U]ũ[Mɩ;qѿza!! 7m=oj>uše_h -?t0qr˩ x GlX_HGM& eV؃<Pz3=s؋$R!s.Ajk6< f; ^.1l3[B4yPdUnAjG|'IR}ߥ%j^ 5NT4H3Imcvo=JdNq ⲞwGByʼbNgɈpiR%M|_`/P=~$p#vEu 7Ӱ5 F&nZVfT4vEeZ`#Fٝdf8 BBu8E;’| uun]@~5;V~]6P}|!֊r|JQ+y R]r9}9>9%qŬVra*pp &a 5 x!@XʡCt H )] ;czRQT¾< VL5dīk-d`!???F :bQ֎ZjCB]:<cXrc`QZ/x-\S(N‡+;z>:1BZqs`]ڛy>4tkqm726n#^A=9鎹:Q7ws7&mQ.!Sm)|~"+[y$[]3 Uzod**R&8T<{<Ϗ|[S-sEcmW"Rf27xsulhqp}%m5mx|6csu{sډЅ({-<A"g) BWfcaC|So˔y==HsQia7L"Qߤ FtؼdHm娎6J"%U'T[z:=W~ڴ%SN,14`y8cf#*4Qr?me`-3C퐱PE+ƅߝ^-D,Hn$2ێ1"g8 :8q* _DO u-q(=@lmG$xF@:v8C}Vc]ePl%y9p!T19&ܻW>#.aagL97 7 Wi{N)M_Aל7 Xym-׫콟^ߔ؜Xqad< 1OqZ)?r&0QWm+NAȼ~s]Mwyͥ8N|珼RJd0dDa R)I,CS@ȆJgw,_3DEAR|Mi )pq"I}d$Rdϸ_pW%:>eшꦲuݷg=WtNShQ~I6-;eto`S;ma gc_f6G5G¢Tf@(iA$SPShr=227\-1f` qMf ,,צ4{ 9J[pZnTKes>;?K"p˞W*]It~` g8ssg!0sk9{_z1D tt;7MDu}`h88[sqU=q(!GQ (g9:V)og#&Z8 =v U=T!iΦʼ@06Z\]:GP!!/~1ކ9eZ8ejP2A:(!"oafq!r.EϹgG`(į+fr`;bhwQ<޽[ 16At3M"*Z}7Th+8d005X8L,PmL,N4xB.GYoOIi܀CߓaU \"h9eSMȆ,kra(6e4 ~Sn\*h Wjx|)Bڈj .0Ux*|}.+H*n2< !%(:I?!%MK T4,rg-qoN<=C W=@ 4k e~%Dt;?h_Bup11:>6 :gaKIPgzD3h%&TmJ,a LyZRC>kLS>68W8/Ϣ;G[~J̠benȠ9qTrõid-fa y?O"`HD3m86QKN7'#SzbnKɋRM@|yM!l/?nИy6' oseKu$?#uue SG%&GZm)IÅf!IC)$\Z^Mf:En7L gs7'2׻dҩ| $bPU0LK~3M% K4F(32H#i2[nȚB G(| |O%M|tK\V@_i죱j8޵UGrTE.ug]׃^(WQ=l6AyBW(. 28940?b k b=tt뢛ĩxgh@B7.g5 d RhOON:aٍ i@KT + Hfr8/T:rCN9c0\reL⦽N]7 W^Èva69ٜ[I?:;N7NyUٱPԍإ R#"L"fZqjPچ4h)χa:m~?jep:N!ETyӡ_G:23*ʟ pZXMåȽF:bxc ;Y)bs0It.:?#$fwG R<푽LABoE1Qdhwdq(bC[0J),%tW6ͯzgL6>(06a}1eNmުyUلduJ$iIKٲVv%Rvjj$W 2 +H8|&M"QD.HHȳƹ_Snt An'~)^(sJ;Mw'xabq~g.LE4cd3a& n9U%+7,x q7& e(_s/)6o</|yɷx[=NhH͆A탿OZ}!Cʏ0`T- ,=睊#O ld1A"T:BA*ĵ5PӅpBڵrEP(/dq||fNu녅ݪ_(ZKϛɸVLD3\t^6IXѢyL&v֋H91MrSPwr<Rދ?~usҏ=3Wa:gޠu?(rQC{AxnB.ڐsqH֝yȩSmmsT3=gC<᢮ CF=9N:t;3a0P=W_= 4s (de ͪT2PK o(hF4-2jÝ dRGt;mg oC.^ԨgoI X'&|--Jj~575^VQtmc%U0¢BaMP ޽ OgT wٴW?ufz-m OP>2Y Qm+CAd499/ egxX(&Ƴ7PLG;gS90c}(o18#r=JSTRE:o#4z拦Bho2sv)4wdR 8xlФJ9G96 i0|hu HEPX,YEFޅr="̧߶}yy3:۟]Bt#K4~d I3:,.-*VA+Q|XDJ2&VX<ʩB9o0g5=>^?6C6\pBiӏ2jyJb/" nʻ l/ FƁP槪ͅrUQ䞈2,/0ZjЂE<[s/vaXyM Hr&Vh.[\|8wڞ^j ev+9_eٲ{xsd um @8BmTufb"WYR׫_Y l̈çʣYA6$RcZ.8r.RDO5˶ Mn-݃07C?µ)vQioZҗJZ-*hn^뵎G~Z2ú]KYՠ<7L̈]]m.xEBՑόM!G(^CEE9,Q.ϣd]LIoepos= zNO^7br< ֥Eh[Jɚ&߀8TL57.[aIC/EgrMT\k I!Q<[SMfuIp)#w9 |"˟Tz9f.D4BdYN@nd:g(aÒa\ZlN '2ґ)4R'O48Jpd+UkҔ鏔_)؝4'r(r^moȗYhZ`NƷ6OtiSk'QUC.i'T &&'cG$"LԴU/o!_)rfۛZg PP8iKK=:7J=y턫Y>`  YQl(gCϊCfOe dipHt,AAB6:Y1Ե*;8:JC"idA2- Qԕ_}T.hp3SaStq@BhQ7 b({&*J5UȑD2ހ*P5L.2?>T S('/%GQf7K+ ̨ˋSj R&f eoU6Ez&(jqS/xJwBɸYSd&y}[!Q9It{`>z!pl}E.74gSqxyyp"&KG V.J:h6 D`Z ǽv?~W>.Ykn-,~=U%ZuSSrjRjW-| 8N<@/@3" {UI: ,!zl+\ Z3+0K΄0n˞Cvqm!z*8ߋh97wͲ(9ZAӃg3~^@͉ۚe蹒6l &Z%52e۷d,4TZB-}RuBYu6x?_/wTcPSQOS}_+/ݝe*U@&`y|~3W]Wvna*=YTT \s~a:>Qn.}m?iUэأckL7vZVuS(YT!K8I98iJV>/Ufrn{N=ޖi9nj:lDFV툯-#!0g c}&Oa /U)N36CR8ۘ+C_p Ӟ-M,%;T U+c;`/vvM)pS\Kw|y\ؓwG0#sxP4詸x/`~fo& ɢ~ZE.!nd}N Iȭ=)AK0b@'9r0*&}N#/w4%E]TCC_N@ 'pNw?a~YC+`G׫f[J0E/_ǚa+*!L}AʈG;*cq_S+h/aP[ȇ'TN=hhZ3U= kZ馪N&7d!U+_a8m'$ ,&%uX$`jl:4'Ba!%乑5^KьbpkhZ&c'.S \G{lZPiŭb g֗ˣ>Ώq- uRIo")qdT~i>cSç F0~8x`E#>&WȲ4dk_F2cC^a}v}kv^MZB0UUUʯ`r|+8n5.gu}:E|t5;󣳮dˡ7?~CZ&,,EƆ($%!4XMPBГHJ БU!$JE0!4 ?RrCj0hđt3 ByH Hȴ Na8`BP@#s1;b(? +SL&"_ r}2wvID &4{+k:>*J A$BPTUUTUPy{8C:bbiI(>JDr/'o. ѦJHif`YX$ !*J@* f`*H ((`$2Wܾ eW}DKMj2"fI1bhjɌ,$ ", 3, s "33 b0"&00@89D'ތ(Qz@v040c/@@b#03cmĤd"B$䳮eBzA, AN;AMc2u[Vʂ0Ffskţ"'k-7}ܚ粵SLIEGhDí10ĂC0$ۉ ,!#=g2` Aw3ԏt$ʎ^+>6wAؕ 8^kx^.VN/J'P;ɿvZb)"V 9'n9?5AIGidT4R4EQ&SJXC׳pU|)2 Bu yJV3ϰDYQ\APjع Vb9sI~_/hف10syZ%S׊)B :ǸpWoGYZ|]n ×0c0[rbo U/oghUAFsHPRG8=W׻l{*( 27#y)vUN7˅.P0U.=^*PPb ^ʳ8)w@Ytrlq]8t7Al+*B8D&=~u.gu|^vTUCjOm| &Hk&v?FÉݕ/Y('5=ʗbj>:ҠQqc6%1!)2p.iQC,C.[vT Wg{)\ %6P$ƾh"sdӲHGRT.r: ע|5PxBUS=ġslT4lvG!O 5:[el#m Jt,9=DE_+Z(uI]<=,!-,e@۫9ՒN3P2=bפn%NJT9>]wRJO'eNR77CEG/r:éqbw]kU]MN)3r'&}- 4G ;>_(Uo{|>CuOZ{+ԩVss1{A}AqE/z}Iuʿb!P0PR/yjTE-Z/߽EÁ~mMlaL7P UB$?~/;]q((s+_H =No*&PQĢ2DbxZyк*_+OQ'Ɠ1Yp#r#,&GA86Bm'(C _%7Ë:(7& 8 AD};#t0h8JfrP^(6 q`#CI hvwW!|&qAa4;iܞ wpu٦~{I$=0Hah4yuL׳g(AzN3Y\.F@thi"b U&SH|1_QhHpCL 1H&NdU]jL\-v},JHh|o0T^HZ :$U!P@AjQ5T37P<(h fTC< LC!`P+UTsARQǀ /S]}YstQr åJkŁBP" AAIE-W|eMFq-fM{94Tk&,̒*eqTSEk2odSqW{d8o*.pʂ9%PU2*AW?n]tj( jeEUzqMEP&2KL12h(j((*"E62 XErhk `f`dYk `f`dPPQt-jj9AT\dSTfdZAk:j2j_3nȳ aTQf95W\3Yk3 (YMUʮfUfWl̚h9Zʚh33"̊*5]kHjѬlLR]`dVfV+YIW8ek2Ku[&޴[Ӫȵ6fqM\sU5VѨ*-M6j(WeAEZ̋f"j-fȢT@F tߣaQV Dz D}--ߍk%g~oOpcH)hLE 0}@[D`"_;K>Zi=z1sϣA^+DϔFGŎ/Xζ ZN8EKV>,C6gPgU grw9t/֦)/@J1΂GYx[" C9SIyȦ{FD@#\V{ءn/݋^_W$Pe@2E()4v"Td89|CK}3^Kx xo+t/BleӠ8*e CyD{F@Dvd|ah#P: Es&u1CVWSFvF_'7_E|x*~EM#,D-2!-E,~f9gK\p,_k+k'aYcU52 SM~? m KUӈ~v5f]WW[72:8;;;v|! ~G~=7[/Ζ2Fy/=V:25Wfͫa+|8^{w%gBi57aZ/Ӛb.[,Ym' ~߭efsϦ}vc9 w[}9jGY"FulX mDɏ @1381d%a YHHqHHI ɉɢ(Lx,46634644769::::2S7]]^^O_wwvv +%BB%)/#ܐhhDh>?:8##y zp]]+n1ݿ廹B!aanqё1Qq 9c-u>?Ia@a=]㣡 nP1WXhب  " 'j籁UTqpq%'R>}mٵn4H)2-stjkzK >’21򢢢bbŰNA/Cnv@ Dm@ !>>HMrBA/+' 001555+%r98Z%ZbX,IZ&m2qK?vkq316;12;>Ԕ-wVn).o9άaPET xs_%}Ԣ͠AV!CDذPd=-3`:F .ֈl M~%ܩhWohEb#4o [x-_-㔻rTtj@YT;Ty;*Մ:FLt)RjԉUR,Ix a0XP@PDH(tl[?fIĔ,Cˀ?'XXfETT%EdWI )S WJ,fs]AИ~`*U 2d;pH5ip.84D_a1s9>JCVtadb$9\̬Ì6gԽψ'_V9hGfu]>#džUJubuZszT80hB^͂,Qh sшuk0 aC9 1x`rDXa t{?no`sp0XO{ id#zNDI$yD4C ѧXFOg;b">zUTC@Έ 7WEb8AU*zEnUAPQ|DAK"*=R-,1CԊ)E 9X` { gd*"6?-H͂UPH( 0Cgh K-Hw͇5HuJo$*'sf}ephOltc.St>h+1=ÏC ՚' v<;3Y:l1y7kl5`L 89az3Iabbz"o oIfKIS .k@o*qP QPs,>{{ QD|9Up~ 4"ATJER(E;-?pΧ;#,;D<̠B*JR&=ȩR"qQ4û,5K ֦/Jm4|ecmLox$;|@ށuzlhNxqd/u|v|`<]0ғ7ߔX^>E=tYL/007A5=,ğ1`ooVeĜ _1S0KgʊkF@-!Xi򯣽!|]rm/Lة8Nu:ö;vGD{/fY9B'v:;-\t~@;mf1#@}(sJz.dݍf*l*Hu Ng` nvSۭ'D>MiWtȟC?vcEҩJ@p0qCހX1V%Rʒi SY%%*R k E4`11:pLT!LIZT+<iRI80S@r $ `\!Sps lR U,l'Cт&+f*BъE!BJ&"D?] oA*J2 (hHVQGI -C4b!#0ʀ *bHք?Ѳ_O=SvO.~k&,+{ҪԴ-X]'L@xwol>)U"VrV#z"T8IZ$`mF3DbW-(3e|czZXwk|1n;lkgIxi'H5tSE0kQIVcOnTE ADJl'o:0wрKXkЄ5m&A1nLk+(D 0hS*HhWK'=طsh(pMPD=>uѐ^<ƪ|DRJ{?Oc7yIs );b04]v͞*_Ӡ3|( ]C.GEdC$kX,б0ZA9(h*^تUpkmΥ!2rF䴋2ԁ,RK)xGE޴fFCʳHC{xvЫU @vE"UZNrD#g< "WK)#闬3SDuxg_& R̠p t!7o~/6K'߬ӨM;g{W0׷9?y: W}ʥd'WI%BY{FEHB,\3 bqg%LU7GLQd[Sn%iYгea*$i7"I.> 6]HЗ\GAQVyŠ)HfJs:ŦD$A@ eAP@Ծ45 laD&L9X{$*DzHj˺-i+ޢ%>.DVwݳ4呰F{JkRVZzF"Yٵ Uiԃ+)# > TEJHJ^(x5|ѭZI) оr L:΃*hQeYlTs БQcF ¨+SqYYe]a=sI0ZTee}F؋qyNڶӨ,4Bts0r]i2k[Q]nae+9'S:uїLƨf$S^3E3-a,`Tei0D CKN <8fQc /_;dtI+4*d}h8$ΑeQzfk599 3ֵGO_Oa]=-/ĕPRLGMIe\ (f`J  3 j(F (Z(j`&@s!dAՙQL@jϐ[3X!ٖ0UӨ Nf%N!=U@|; y(@pQLDIGdDx@.(&x6JUc$Q_΁q( !D&( @ti `qL7SBn`x" \1NM+p&&101tȒ*C"8tC (#h@" H&#Bb2"z˩ 1!rp  @C1! cX3HLaOH,"ۘ`Gc@CM6D¨K`qB(6o4[,2Hl!>&[M"d 4Jc S0d !&" UUT=EQ(!F$AdDTTAD !U!P %RTEDRDA)UI!d"B!$Y`$VI!!id !H)JYi YI1)ZQ PP))D"UV2AbU{DEPT +1D QNpYB$H"RI$ ~ QP"iTPVTRM# )AU_JER `مOT>)qCq'H*T(?N")k_mu" j&f@5ΓCf "1lU N {QJ(EN )UM*| C֊ŧ⠾J/+[?s__QbE\w ]Ȩ}xׁ:Ȋ b@{*z:@rܰaA; @O`;D썼ww*.FATRaaBp(HtD`"e<Eh Ql' ;{QU(_0?A{J@J)A. _o"TA(nHҀaK>7YyZAx4@QOCJ?( o *]Z (̈w SpI$e_z¡(s*(ObvHB!t߭HMBqXXDDLAMXgJD,*: $T~(z.R/+Ry[a~B!(5n굼^u֚cRiu/Co?t h'*=EC~*'!z=$@}u2@Ub;C̈J@")rvA*QO4P jN"I` fk[aB~HlsΈg ~(kSDD| u EqUR B!h%T2P$R@⌣$ A!E51CUEI3U-13-PITLDT** ."J%)"@BA JE(H 44%"@үYZF(FjiJh*bBe()h(J))Z(AhPIaU(B()@jZhJH J"@(fibjBJf* P& "  j!PhiE%BXBdh(UWHa?*@r> bD PqKF"BD)a"P}+ '\.wY"Z2(PAWDKt-:Р=C/ ~Kj 5 vg+dL>,dL,7'/DL% ȃ+V I@p]t-]Ŷ)n D6\q{''|=Nh+_ixh;4~>D ܰ(qGKh3 _T4#n SЋy \fS{&cUVH*}BJk*8=~ bo~ˀ&d8oP;+؟jb wwq=[)APEPb$ (*H (j(bj% )" B` "j()*R &"iXR*"""@h()jQ""fJi jI( )bh)*B**&* JJ(JPb"Zi()ihf&f"(b "HT@@ aT**QzB)PZB@@t f2 H(~+e2D(bK$BH@$2^B$ I LQRJP̬ 0(IPF`I$HYwbbd+vAMMJ-@P*0D  -$ C")TDPU2@ŒQ")DI SDQM1S@ +@*+H H!Jʔ%( CJ*,@"# !$H) HliT&F db?ofO.}=ȭ:APMP` b<P=%D1sX@IG<5G\N8@ѱ[P"PBDo`."+~(i6Z7\JS6a{Ȅt16?d b!q8U!h:thѹ+(]q4sV9 ~zܴLEG2L*ݍ)SëoUamXE{ T..6xehogk {QM 9B]`E,f*#yD@IU? -(Ҕ-R!CE%1#AEI Ԁ4‚`D" T{N^ʣx0$BDT{w`^ՄX"0 @<8.R-Ԩ$S @[e^QHJmde@Ub `h*V@+V߀"I ?a?dNh FZdE(UJERE(@D)EDiPTi ?5mh+ 4j r)B((@fbhQ@" /ÒBI]I?V~Z VTras_?p "f 44MV4J JD ED  u S"` NrdQDC1!X !R TBQ! XfU(i"Y B#/hpoe\X;;A0^pPMr*r=,ŖU?'awKẃˌl')#sMԌ|^)* ad{%ߣ d&i"3jKmh *Q,(ڬIrr$^=uY>燢( =g'."~!aPQr:>\WauV Hq^ {RBJ) lA[ʐ`} ^X2!:҇ ЊCϨ!m`$%8AsB$|& S?;`^WbO`.cpCcC:S֣lvp>C*(,^#dkϨ{juj:'AXNo0Bw/3oۃN+q`;4D%0a|0/_ sB"Woztƿy6Kߎ`"5xtBqdɞb~Х\nʄVu2<DKܡhS\'EuGqӗPz yP?mHPjN9^:MRwG RGZ r BR4BDQH0FU=HU؊aET yR"5XA_t iOoK­J@8λc0S Nb7^G~u: )<8zL7(@D{(o]!e>k uMNAxbh\Sݞki~/]KYz D/yPH `E"pc{\ ҈_ɡb,b "&Wa,0Xb UC  Rf7E7-RΤ;v ϻH#a"wE^ {hT6>qQ/IuiG콏tWDO^!at)1N(ZE~Ne4j;J31t/&~ \wB@uwNkD5Wo u6/n޶oU1knCTތ*}'Ig)UP9%͏ЗGj4gO3; &T\B z?O죽2Qf^V # @:C]Sd(O"ppD58 Ò%ZGѕJiT)hi b`"@iJTRV ZABJiE X z|nd/PJyA?F)V~n䰁R$,e XPUr0?Bޮw (gpm)>IOPꇻ4N߆K嵍g%Khs=W8.װ9wUg6 V̑?\'(]) D"h>R&qtMKe w oΡu_иW!L?B.uCz"z}+wc.o7 (.@xLFCpQN&T2b (Kp|뤝w3+P\(x{f)k*sӠ@#3P !0o53SU_ w̵XLM=fyehtb^/ۏdwj]e8}/b #jxDسm=VѴw// Ѕ)(@ґy("*eab:2m+MO?Ɠj*G'TJP3^Qۨ! 02H LWEx{1i4BW6PPKuAn2`S4^f{|뙝 $oF(P>~Zo}Sîo[SՋOac=4@Lb8J>k1`7(|MTL #~ӷpw Yz۫C"`DNkbDnEDPUAD EZQ玩˜A}Yo7RMoweU/e4B/x>|į~xCT9d~%e-aZnމ6sWp =pqANI?ǻꖴzUG~{XKLCF{ tXT`' ?`'f~p8el".:  qԠG۾  CpP]Dg ]/یEbu%d$n2vQ㻲1-LE1. 5[CR?"?쀶ܩ]tf2as!>^ؙ3,e*)w#07+$ Bïz{ԼGS}VV 6Ȭ/=T! -G?Q(¿}DB1`Bz׸ڎ߭hiz捜)}̚+˨-M{i<|'YwϿ4v?~NW Tט&c#H1#^SeG˴u, @4.p;اAq1t#ɼM3HIx:>s{5-/'+Z%mgIX~[MbEd ]XdV&x:@C/, ettB 7% _)9o^=’CϤ>UT3"%>qqľa린X G!'Y8?rVeʚ:@((4Lyq VDL ƒ[8?W *y%Mlvwge\N?坱*y&{s_J>AE@Llt,r|UWY  |Bp"ߊ@i<<-Mm)/IکRw[XUve4Y8ñ,LvnrߺN<&DԹL!LCW*ԫ]% W/1HJ=_?=A଀c Cd=Cu/{ nٺt-4|O6f{:FR2mIݐq! @2"B,,(@!@qVbBI$ "H9DDQJQEQEQCUi XUJbZZ XD_ %5PE&ͦܟwa:._ɯe9IMyƲ2[hRwbl6R@ 3y>#8gOmo I~ޘÙ&|MB^ɆsHi4XmSouL(O )@!5= & g?\R'>eDQ O8ٍ3P'\<%@\vxPCmfU`z=Bb$똊+`Ю6Gyf̀B]Gl,Srq)gTS[N;W};[l/_1i 3X? NHh.*fYq0Mݽ1aH U8GR<ȇ@=ͦfF*(?<5PMs]`m&A$>r2iXY$yGU8d!̙E\0"q:I\ŴG鱃&T5Gg6ŷH 2 ͪ9JĸI:WTs Xyzx!&ߝ|6[صNʃŁZ"$jdjL"0H*)KqL0b$))' ʲ*)$1ɂdc0̚nc- U@.ea(a()85L"T&dIခ9RET`B9Qc6% 4DFUQ3F2bbYLaLdPL*IDEaQb))bdUUPCTM ER4UTRRP)2:p2"<54D%CN i!~O1_?΁aFvfcҍL'8ET4jo?gy[+zQ(7CdKiO@ ,T%o5a)p]M{ّa/np^wjL nr_6`x6?*b(6o#рOw!BxoQQHJF둝Z?arB0usN~/ L;k3:$kzUo58˳z;g+ ;6+b_ܤB]QIT c[D4u42J'f .Ο^c:\^ݾd(jđTkq62*s RY!e\ApbG[VZty^ k/YTmܱ[1ź[b:OrgosDG<C*ĴMG}&x ꣅUCj/eqE}:p;nP0 mpbks9˕꺾߯֠̚~D$s՟(Mzo ^i@C0Rܖ[mVǴׁͰh-YfŒ87L֌tĮK0Jiw -ey.aF^\ShQWV?q-3 2 |<{`{|lkb~_=cd 9 ݶ; JC`h4Y@pE%^ɯq;"G? iA(mDhQxJ@?Md6rv-8/~r#!Bjߚs~Nn=#z6 IXϯ~˫*~Yqi;:5\j`|z3GwI%-wh㷩zX8LwkL_Y>ٺؙZ9;Qz~`dYMx#"hC6"7ЉOO͢Qre _Yex)0p+^Rn5(B E(QU$E#Ĩ'L)<Ƨtm83~vSzh dڝXTb\ LJ>IOa CH`ZYl[ jɠ>0?+&Aѻ{lrh2fF˜ͬg%|WIr/c8Y dA!c4& Ƭ' ^)8P#|Clm.H'7J7>.ol^}~)d0>܍xܯ!q7NHo0`ȃ*O(w<فk!I&ŅK>25Sϼ*[LGRqx@t3.BE.gw:K>2OCn= @qK~~ұN_(sd7Pq?瀻Q$It"-5Wq\aOnQs]{P(>H:-/妰QyG,=T {rEϭcXyӺߕ;H2+ *:(w%%2Y]gGX?v^&O;|먊 p!6;@ K T`{ kI OxN)\pwJ G\dEܧȞ7%k <~y3h_8V8csÇuoÙ43_oCkhD1,W{gGx \LȐu`-FH{]Еs_?D##.7?1!VԩͪQrNVݻc`,_xv2N8bh|ӆ@+>x` % Ǝ>!p߀TA ?aIMoo=HE8Zgv J,~M_n՚׬hn[C`+dR-F=cy/}qSDRRf߯vk.M";7DGu_ARg?:/΍5VY>=y5o -u)AXDDЏ|$?vta^wr@,CdW Ń- .-ŗ\8[i5?KBӾ;}M(Ջ`PR뿦sz,- PQF[{c_r 1-nhǛ׈*TZyS^K|`ز4\q"& B ) @Ĩfmm7r:CF37{)/0X]XKUP_rW/>  3Y=mXi\WK -4W.b{ky$`$T($H&!۱DQv}[\ĕ:ay.Ǧ!8w젻 Fu]#@D} n^/H- k^SB<%w򒍥^~W5ObeSA+[szd|F@.H;HFl%PHա o[,(oj'p8+hR0kqJעrz;*3} ieeì_? g4UE[U^EwHnIozv^tM$_)@)߮@: Ruz{Pـh#/AE19ïxy|U`DEor ?TCl^-nw FA,N{)du^GvDtax U(.Ikޟ_.x1N"2Q4%)*@n A:xz8d2DqV1L( _K[f&$pnKӝ-ʌHʟY;riw;Qcyrhųddt5]=6_Ҁٰ;2+G OHXIG~?cyj23&0ѷR%G  L EJVܳ6kYV,j*_lb}jQ-$1;6+s,1tI#%=MOVɳ?@DN)0I.(y;)݀Z* (>t5d`{ZQbXMkq<M; ^F<հR>eP>Э:u {:OKM*@Hϲ9~rSf7yyJQÞ+~iND¬l8~- ]` -mpCg<#+Ÿ~h[|Kꝧ8[xO?ήl,A :6φ:M:_'y/|. ߠ)-"˩'qd0}pZ/r_*5uqr5lWVXΩ55G3ƺA8a$Kcޤ8WԠ m3M) 5PLmzI8~8 B", %5lHxh7x3Tl_E"fk[ ,w) a<*[&+1H`+$. >ŨCR!{.qme~ox2JmR;~$/m(:}'e3Y\u7%^*V}xp[hдt1D>;ɭ  >PCE7/Q1lط`z >Ci~AUzAAUFA@D1 C|DBB„?!"$d`lkDB e\!᠚(PE\2 >Y?+"(!p?CğP4@Ǥ@ /3jsa&`L&48PqUȕ B!M'0<<಼a *4 80OyM0 lHm˄if0(Phc-j E2 vMF J\m(ZZ uuS(ASJw b,i$@[J!?>9[i&fKBGc2bDRIG`@`hP`A"F!['u`F"!d}l{-xy[0+b'B3o3mѽHD<~.@[0`la{V5*\4D"=€!8AFUܶI(Ǖy6f ׽y8LV+^JLNI)h0VZTPT'UTJ+2ԅu*idi_k|n>w~? +4z)Y%oM!Eˀ6D#N!;;r:2C͚yE (2`tׇ/&&!<'~(x<[:֫NilD4Z5hI $- Db`e?ȇb^-, S4ŀ0cuL"-dH;'DjѠ=7 9ZwH"rgUZ8Sp ,%c~˼*(*| &8V(n#N֫@8(uV!YʑqزHF#ޫɽsآPI$bc{<{!UtX$ p,^Q rN%D@QPaqG0kh'-: TVDy\Hgj[%+>@3TVd"Qє CAΈ Eb1iXU=%8C 랼-V9ykwAxU ZܺZ*c @ `_CJ<bqyD!L LjbGJQH()hTLBg eH&Svb!0_fS g-9* @'Ĩh9Ë$xI*TcdE O[ ØLD((jBvq׫AjiCpѸDjPֱ*9.T0 gMJVPQR.2Rj]-O1X)-@3caP8":pFr`IGws}v7dGB9d,`$ʔRr&@aCpXsdX"Njz "&AlPJʓ ~*dSTثƶͱd5xGt  faffaDp0~TIɉ'TJ\AQD2hA +-@ "Z $$$(KV@ J|HVz2g"I HA" \4KQJQ_T* @MpMY7Sly 0(8* PAVtCPzMs RCM dHf>+c>k}oyTS;K~J/}9R4[6 Bۡ\/!Ko'o_?m m'3iZj^U%4_>?ڪd2  Q@1CWȨ@TɇVgDPAUZ7% nWs:LU_!MXMJtBn)aJ8+[{%v9l 3vhuy'V&0^Źcˣ{4972E|*øid9NjlR2ۯAbĎ *V}q݋` %E_'<a뻙ՙ]" dC "]FRE S6K%0 %"3}ːg\ך[n?:: RF}p9V9`b?yDF hh8?1ow\2 )Qp2-:h?"#gWQ#{I##@&OVI$ k^W5uG?>)P֟"k୨ֲ0lE跬G/R9׺tu}B YSV"=D} *F{'R?!8oB|UMxFD(x<\t7mqQQqUe>ǩ'OٚuX..1^"k92}qh)uuQ`?Ûrs8ǟ&TkDY:{fφG}Z:mXBОk;M]L=LԣW?'wō_wNZp#DrPHiBPE͐TK_O[nD %=OJFybm~Լ>&ۛ|P]u06_CuLjP 0QtLDe0f]UC 9M'ܩ ~5DtƮ;Lz՘v$-y*H^"z/^1fOz#VǪt~]qrxݪu* F|۔0+ߠ ? "{ϽKH'cUm%;N 'bTQ=k=Ma{=iiIƲқ\W4V^Mu a_{GL[cbVL[{ x+DX6)_GU~ƙr> pBm |׹]3r&Ye>wW U:z((n*ݞK'˃#^1PyI884!71R@Ukd\wuklD[f81O?Ձ]T!V.|(Hx*KQBE$"^SΏZPH\E.N*jdzhôl| /kWjaz.![]-_~#Ȼ~)NĦƳ8`A]M^Q[F:ifPSҸ1~O2 +H#!IV,ӈ㧚wCd.2ݪ v|֊j6aṭ~>dt?C%K`??Vm 6m"Ȭ{xNjͲþ M4ÄHf%\);=w?6]45 Ͽ^>Tޣc"qUX MZ)HS;c4r-bq3gt-WdfzAoOCڟ` =HghHs.J>ۇ߹<K)?r|7x7)^{TkdmeKhC܍_19-O@I\&bzDŽ=OK_!Q jq힉A+Eo<͛Zc~./ewlf;kODT]f1b,ԙz:v0AdToq8xiA5]2Dߞ4@4S8(٦yUw._(jݶҿ:3eKpy3T M[m7c+4M즱.z M^AM ]Y}^S6ԛ"ޓ `fڠ@!lo!}+7Ujh6PȪ-YaXfVXV_nMf[W@nuAEo,, , ,8΋abwvt~-}Ԯ|;0A8 b@ ¶E*Fj6 Q,hP`+rk<<_2-Ըʓ5 6iX%!%htcc%}Z=TU@rm>g}Z\Ŧ$_o=Cf6ʏ_&ܸaE@*"A Qү%Y Bq7|vIZEu7K˔Ou }Pxf=a u8\upJy-dWrb8 `QKAA PmF0lpu꽝?Ɩ0&F"i&e[6t~5K5p}9GEZ 28 UD)&uie PYq'ץSz>}k GI!*0bFT'G90ŋy F9I?=OP^nv6vv˜0Q@Iuʫx'yv"a^|yp -3{Jo/:WA17מwY]3Q7{f&[`Jg~cwu:Du5hҽk@=Jn xYrQAP]<_2ۦh{ÞD|6u(]:ִ1<鄈svǧK}-؝|TQE\gÅ4 ]xG ("(FJ@pK`;߅s~ zK}b{AW|ϕLR1˚TIeeYZte ޳t`t*cj"#=ۯmAqިZz;Fh{FЊfrKx_@aƞ0w$(t7gRAd)Q \:k%u1r؊y">4i =ܺw/v- =yHRn51?NIזGuII{F_~8>259ٌ:?Æ̨TTp-R!~;c;`D mP"y/G:c }ye4>uǃ줕@VPթ<{xY.cI0@@BB~گ$pg? \{mvNʷwsqV/ H~s_p/|[khs~LFRեʝo*.3׵M::dm#VۋB'VRp% k\ C Dd` g,J*~5/m"3z2[@wchyH┬X_nP@""eB | (LY!Ud*A:[s4 2WTraO)0mHIi&:2l1b\^2uh)dz!x2@Tŋ %" 1Yv(5N_ۭ?GHXy扌'Έ'٨(Rv#\;&s' V"@S(4'/jo켕+_e<zgL~/uy`g#,~ <<ͦ&D]'zb]aZ͐&+N@Ҕ&";iԂtWvp6xR9%cNcL*?,/x% x|a.\Ay*سR+p8Ќe?:S8h:GTP \!$Ҙ_?'a%Ѓd(!7+~͡5>zՋhbahivsq΀p!HD ~!u܇}D篠z:g,!yHv DsMM+'Hqt,mcj끿jZA `lvI̼4$vی[voUZ,>7oK,@( (PQAA@ ?kVˠwl㿿r^oϙz"MfR7Yᮽٕ9S'XT3b-&Feo5gx[iAyfR'vsB m,[.N$Iذ]w'(^?; ꜃ ##UOU#E&sz'h8X҈ @FDe֝c`27/̭z_m:>\X26g3v'gbDhv׿OD甍HUsޥ'0A _Y~Z[hk'DJѥH.ݔpi9U.8jYVVcfa:-/Q'4EApXMEJFbWVU/$+KaԢC?~4*^UYWv g͟Arxz4faqj5Z\rKh T3TcE| a*U줒.%R5+aȵe{5 gMRܦx׿uZs_S}} uv߅;5TjdίjbVU^:|D(儖tiw.Ij'괙YYU4j*gaINEvea^%IJȉR.`00$]ݖ9~C*:cJeDB8GQE\02Oʦ&K9{JJ[C{I-zrq6 o#:kN9_?:۹$ /\U3_߫pGf4?1*eUMFt%՗W$:_wpKkR֗=vmf\+qLe6ptNV.ձZū_ >|v屆M~Yl>)[TFFfh\+*U{2[4 gK2!RոWMVtpW~oN{Neyv귾^? 9;K94!y %'Qdac7yÄ͜{=ciR9ه#<*Ŏk j W3CK@q.,NtyI@v˩-c 1>P_9>r.Xty6Ik V@6P!l DUG0vuKJ cRZv|qlaf l}l@<<ү`NHgz'_/cWFkˇ`КQ@Z HHOPJJ%D(؁N޻ bNߠim=7xm^rox&'Hhfmۀ1&'}K5_8VT 2*@e#;8?CS~h2UHvq_(Ӽe##@iѸWz~<4$^?cYYqHӄ{{=3fiJԗ6Gg9%yb nH?"DPXT(<9NNAttٙ+Q_?E{d>Ǘu>m{3hno3h}OєslH80cLP v(/\!"&4is}+nGK2}vh)ZY*"ɮtD?O)Ƽ4LvQLtͲ\%razo(ɡՂ'>R"U1 {boTb=LL+zp7@PW{.t}Z%kc/&?k[ MOYZmItEL=ºGJ'r/}i,@ @b 5^(BgcI(d!L1%L1*\)&|-`wMB|Q! x~v_=)gzA˫*ꁯF:`'8 wn>tNŰ982RMI,P:z%b1c J R->RCO/:DAɁ.#E !uc N(@iD r$" 茋c>ccܿ םVahGqR[s6"/|*iBKmf .R Y}tGQ (ht79/"kD^Hox@꺏O ;}EJzbY2!o^ej_ vB͜~C6M`ۚp>*y\Kg3˯+FqN"^}ks8k׵֍N&*TnCʕN74HP [OÇ: Kx?v]lt #~¥R&# BqqPNCl+UO/0B9xcẆʎq`BD!fO/CrC7ZٷobͿ 3A M@4aPFYlpx7ā`zp5{_D]]&RLƴ]3QzN~!l#UKI7J҈TwGah6Z)VZ+^tO̘Q]Im0QzX:Oc̖ xbX@etk5 O ]QGp n:xm ^^Sd5qNw\}\~aZ¼ǁW,ԇQSKxn ZZf# ysP!DA8{ B` %F[]ͥ*B3j Ad 7~ RNriʹO.p+~l{c\!Ae8_< D(k( DD~w \ŸgNx^"t^V]0IWPzG`t&ٖ>=[D1'‘ wvQhȺ`0괺-LrDL9BZ,euWڗY 1Ѓ0a _ l/j ;ֽgB˞E%+L]s3ky_kjYZu^Ѷt6!MOѶ@b˧<; 2 =JK.WQ=$D"rĄI3Els!m⋰!3K! X8vNRw>-cZ oϹi@@2f8E+d7f/٣;;Ur܃ 5|I/W@p}/H2.d`=+.nP:^U7Be.5 Z& bR #hKZޜI/ѥ|Wr2[as'd;q:޼s]7=N}|c,@y#F訴C'f<*bZ9sGxDT$J:ieSwiүuKe.7Qd ͩ!$^;1=Z'~wV#xZOn;/xj:9 -Z|pmBIhyF=kԗ$ azI$KeTDGUiL]BZ˶b\1g4 tݶ8ަ{_ݺv_Q7+T@8ɀ7)2(*JQ8tjU |z] ~2?ܔ!eٽ%@;H\>ۉсS)Z ! xG)zu5PW?Gљ&80&[Vu]8w<0Ggwzm{׻;αz6$nەb-޴ ۞0H2P(AtMܾh}{ɵoi:W?≾ڼH0/4-'uP< }KJz~`lS@G X5-1*yroB3#;l'Kb`G&$Mi[5Gw2|ta8rY?gr}ѨhSvƱY?!A*Vo[;^OU-dbJJ ?ctT MbԿ̳XD=WJXIF޷YKvF*OrsU,1L;vAL6(|7/Bs ?faa^A@l|UVe0=j7^͇팠kǬ]\|u;FLQBRvH`E.% E+t(cD]5^TZp߾N 0^(Kp@/`t}7ogZѷy_%);蒑fOQˎts;#ycӾu~,77xvUcG-6"-fDRW(OJfASxܯ1Q~^gXVh)O_Sy_d{{4 W]Cto\ e{XFzCf6eu䇶lWp *%&}UF.+"#T|LCs8D)^Jzw-Yx5Z }}tp{=u"' _7g-oH@Զ!lFυtl5(ZlO 9,FRW<֘YwX<5"F,<d;N.ƁLI/RŒT<;殑EgN-+91,P6_fZ2~8Nj@bhZ~lNAA*Pq,9ҿcVA`Dv d4{O3%upB`Ot`N~\1ɗj&_ЗJkA|G1.v"wtIEHUaVŗQxV4VK'tum6'j{Yy|˜3;o+j3}$qX/MgR,e_K/F88X '!csYp19f]Vxa< =J .\<<(m KRTTBV&Q$Ỷ/lhDEW|sg˗BJC-C` h)G.1moky 59YfB׫B:$+~f! ^bJ:cc$U.2S4жY|I^N\o)zo-{Jp/bD4(:bhps#ht͐KMJ$HȫX>:gVYHN] ] f(#iocO/3= 3GP&e$8_Ѯzꦢ)#G|U_kVw#Uo{r]~ &\]sR}0U._1Hޫxu<ؓyEEFIy=A)2%,cmLR$d <8GymMnMs[YMx};aAA v?%]",G{APBR[Cp/p[5E^Wܻ/wW7R4}QG4' >iq+TWFagus=7@j7M6M1PೣȰls]s_;Ah = Z̓!ҟ! UADDRJʇ>/!BQ5&(_zSHr[GNPNE <Ӡ !ƵTs%;TSz>A%ϠdV{m T@'7joo\MMgyW[~ZU: `s<(A3GD(,WjAA)T@D(oo>Ks?+>Z$i̴"7Q|+JqwUCA{q8!AhG(IS=!twFIy"4EDxw"!ûlw1J()D8XMWv߹tFpZg 7jCvZ :r!q@<`eH 2 IC ʹ͚өTG `%m3_qCs'^W1~#/ }k\UJ5g~!hBJGO'C-˻M*I}>57q{nBr!RCsϾi~7g 9W9`ӝy""J?XGxWB(Ldz9 X\S*\OFω 8hH mR|PL'bcc7C19}A (P9vq{_ɉ{f@ Cg=;~_6M^B4i[B5yvs`NBw(HH?TD'6Cu fs6];dhunE2SㄈsRq xwr7[9@|-%:e: ([AT 7H);pɼЗ)ࠀy=N~Ho.:s /~4A#((()($fAcF AL `$>8-<K8OϘ4Ozt}."wPT)->/J3qG;UMĬ1)5Qʜ37X3TxQ%SCpDGrbIX ?Y a}72fAmR,2}OFogT [s_N| j{&(K]ywg.G59ReL!'_E;_(Eâ1G^hxs׽C#6 bN6 ;z5 ED 336T5 oQ:[/6ېnk TyZ4 vg;0+3F @PxX4M)F uJ9)dXa"D!bLA+sCi~9R-y) K1v98埳oD2F3 v/EFr?+u3F"Qlu7UMmzк$@zna[9NtiMl~IO ^x#ԺgxǠgFdstlOntՄބ咐 K`qH~epQ{iiXjTﴥBM-# A[-b@@ \]{ 8=ܚv%x^C: EW2e3KV _1{l>f3kҒw3ԺPzv[Knf ݦ4W˯e4?K\քQ*fs<^??a޺{4ixXd_yPf@PiHP!'O..e$^r)Nn f}%k~êժce-ΏAz*}^:ԛ9*3R,*M4Cmmn_,j& iR'Uߢ m$V+4T)ybQ79O<&tvsࡷynA+ЏwHމpwDf]TTSZ0y!}Ƕe OŌCZ߲$i/WZ|æQ.7s2ؕYmjP4uG}-*HcN@%L 5J:i59Y$5$W47аy}ܥK?kwJms ">r34rx*~޳Etް+q?MQg[W [Cy5$e׈x <41nMjD Һ 6M+wlVoH:N Zǀ5~a$bWq[8!ZlVaT[ܹffl6e 4u`|pe+!suZU fPEbk_ܢa/Faɡ̖][EУ\dXD@c5rz$ࡾ49f<T&nbHFQEfSQ5{6ъ%[Uh) Vl $М;^jcr?9A/uqҫXt(r]tUfG@1E7xdҟ@|kcRUtamI?馠wvxuSudQUuOxcz/r= GLou=7= ,(#'Bt0pK=@jJgɩUP _Q*%bnϺGx$ݦтb w&<g̒c i:_#)p=ߎ ?8МGNέ;ȺS矚}Bo*qIgQ8oyZYd/v@) {z؇釫&YxMr*yj>WFSƺd]-ƾaht6Q>`7]8 ⏲&^kC(a(>&@i[XzQ#ILfij<6SHdgIc`D7gu o~V3tm*]%GYsi\|)OgermSae5",(ɋw\'~'|k_=݁v< Ck\`q``7v_DTk6;*Nreފjr9WG()ϟs|0˺=̚x_c_;AN nBG{W˜ͣ.[eQXnP^A@?lTYٛ ^mg0%MKDp=RgVDc|P2lybOc?!zOӠt:2DjpWЂc=V>|D=O\0©{,D5`Ha"E^&/rˆ.>Bu 𸳗sw%Z%ϾvhJ8¼N f:}ڭ>!֥|\{"E"7'56xXrmWj$] p~F<{aMAUgJ0_E_d>. RR!%}2Cd K—1rmQ4fUts_ Vv ݃e^|æPTӛqd%> < @%6Ԟزpa(-EBoQQ'Z8㤪|Mg+vm!Q$yM:zTCF?gL˩ֶW-)+~vydF\h$O^^lkJVWJlsE'gZbN-f@43'%OKB6hdUH[zNnJqw~-&acjTX-"j5H{e(Sr伣~;wDgzp^'T2KPǍIW?"6)1G1oRSv/ނUG7Y usbHIޘdO& fi-tZ|zQj6j? 1c͕iΎtOqeg6bDE!_V׺+ouZfM.IV1s߫=g n=9'Rrٳ[.L"ksfhD\c-Ux2f0FqJJ?|r8'#{`M01t? H] {\?:lԐ[8&o1I2n̯ȷ\p!EKL忬9}ўzA *9:]~~,1rtgC؆RLX6DnJSz$ɣF%QzWoݰK@7n9 G0p<1`[lo^d=޴Pe1)an3?NԺ*c%L#Q;KwjeTbɰ%{lqkW̒V՞)tdv#%e -lEY*wwr;VmIӂ±Uj-;>^TŻ]|jZb18,A=X9ȭ:%ˡ=|H_{Q|OѲWԊ{曝~؂K&[#fUU4o} M =kg7ĒX+(6JC,vy{3F?'0|+1pCKBa-饛C"f17! P 8öBKՊQQՈ5x)G5qQ$Z7Km NT3:>jY1/,Vzq1H8a$}ӟ}lԫ/4 [wfw${r^[͈\sn(kVAGF|^iutW`J(5LKˡ('rU׵% 57Da+rK-)xBqib#eM]?_ًb@HC,,@u 7*|Q؞E m}g+YIߗLiA|̕o6GG9mqۙtD3TTD Z,ZY"a(늚 ]rH`' 2E[ihlI Hۯ 6qp+Ndڤ %CN9ʚY\~h}Z]J p d?frB*r~ԭYdr!t9\O-"P\w B7ߥy@ Pr='!Y۳+fFКGf$NwK?<ұ6hWnE| _</tN^,`&Zu6$*gL"yr0w0 ʧޤCw_%b? F)H? vI-'hۙOkQPm4ܕwO ]c]G=PYsj ZQg>G~eR ߈Y]Wdu9~ET~Js"&'@!'7K k.qouUmFi1. Y+J5LL|w9=з]Mz #蟤BU{`L29tisi |yF˛$ED"= :_lbER>V֚9MxIZkҢ)TkjNj ݣa )y'P9{xG|Jpi<\>W`2=\%w#?Ok.-=6"*An$SW "#|1ˉQev FbC<7GyAVCY8O{GOsVLSi L›nL1;"92ԑ{?E˱H(lE}d׽c =-|Re)>tM[6݈إrߔ.k=CjvM0 -Ch I]vdYiM@I=5pP1fTq7TED?HPߢx]MrN+\` 6 (AԲ\ׇ̦Wu(Ttj *T6e*`kg*Uu`]4U8O]n/ GڗuO0sE~noQ~߯ lx-b+r}tD? LTt?ߚ`?ג :م.e߄I#L6<)^!M^)UU=͓4OVE'p }:VpmrG<zi*͗gW-C1' ޾}XL1l2a}v-K&s}@MHPMɲؕ},Cw瞑e񭫜+/5 zqwޮ;狔{Xv^B,,:_." t !웽J1*k}VoUg8 WK-Gn)0IyqYܡZtH$oS̃+4caxG ѻyRxdi |7Ȋ acïRfUֶKӞ2/&N-%MMG˚ 1+Le.*7?67}0;L`SF*c3ogS^9U.S"u F>}[ 滃E\ J@g =}0V7DsAC/Rz&a7Y$!h[Ӈ Ro*t띛/;gaJ @J?[1etg]W)t&[wVѼrEfJ1?)v("GO;?*($M'a(K/AFbIF1Բi{h^ ^W[ԁ<+HT*ʮw3X=r?CTv-|~X/U $eݲw7ϚCt Ǘhye)r͒P|ͮ$zi|ityv 2|V?2Z gkex,j"V~ FHr AOi3w*ԗP2B C_pNiŪЋ'j󹐩23j:h^>6U[z-Q?é][u?bhW7$dK+/WZ ?lM;M6_ 7kHB[.BAo/{S{J䬥_UhDmTԖ%27ܩ(x_K!FUR0i> AʊRgIض!vzW ,5' k'd"4Q:.+WಽvߏNw|2$  7ϋchy_ ѐIKv<^"˼_q5n~s.vOoE02a4;k2y}cy!Ιخn֛g [Yc{C!ÐTz['KQt08$9Ïuj#2% Ajm_`'#S更uW 3߽Ȝއ6Y~-=2roTrmG"iI +B=еzh5;%Xl W}yNZ'Kp}py[8t_?NB~@Ը@2m0W:JwAl+xVRcS@rHe0F]{,A#_dy (nn*\;U@Bȗ0 ӐegtU I+ݲ·^ȔS׀Wh;D@p0+DlzyKQ\Ʊoqe& fsW2,c=ck?tJ¸9vs[42G>u A1DxJ;`T' )4f<d a;yߣq֫7hy{/;[I|g- m_uQRy b_ aAhݟ?|\NOU0^nyc{}HKH-PAdҽ=zqL٠!2J,=U{<ޠZD.cExcRTf1!KnX OZǷ+du~W *ZeI#;x^Nqd[5$Ē!]TB:8GGHTy@8+;r.AP97%[ L*`<SYj,v%?|6 ؂TpVEA&0U-X|Lu:î~tCgGDmKerzE9'nɺPA!)J}p@ν>2ךeP UXcb;9Kz⯡9QJ?id. @AȰ;kY8Pt U#\fB ;(Yݯ/K:ȋ_S@0b5YZ#~F;Q+_EfqL9 `I+[?,f}Z EAjBXdtWqZ1;|-eN5Mo [(A/%fB;|T\Em:OxxSߚVy9y >*NV5y/i}$D 1&z6݌ S3q[Vqh REE`RG#Y4n`ry~}r'O=;4G3}< " 49%_8H>ApCap{NoL0+?vt:բ Pm㣗3Jir#ڇsR&oHih;O"s&v0v޷a0f u'_b:쯰! 遙t/c–8Wj滉%$Q"sZ 0>9ŜtdP&Q" Pcю3]M)oBo[>'@\ Zv|RWd  llhnfP#SzqoV,@i֐`c8P&NG72 l`A'QU}7oL̾oq`DIo#S..:0 %A Ҡ <81 ;vjq̑v͓ͼE$b]]F~܇mB2t dt=L`My<̆z߇5wS/XtlQ5@\!C 's$ ӎӼcOz/_,24%K pgn*&@mgV{ E[s_gb(]%/[-?_U߉v @Bjcv`U*[jѲ~՗%JH݌XeVRlQ,DQ-u|H .B/RE[jKp=1+\$ю}+=-BOb⟺scc}(*(d@zd@.%9WqO4UMhmV2bMY\l (yʂ#'g#peVE4aZd>QE$a%>g#IN0dP'$Bխ\};|@U/?Zj:"w4?HT8Cdw)AȫH*=Gܘ)K19TN :J{f8bmV'(xQUNK#*|ӪK_ G)DAj[tAyd0_=ys=Zh^@=Ԁ/Xvl'DʈtGp \AuYl0˹ ;@)nJQwMUQ3!0gr?X]P T1 IYkZ]IKZgqTiרwV]{&3k'CBVN, +4<C`W  ?Ok]̈ri|/{3=K]oUIi;7[lfs?=IF0N?he&N(aح8#rEnz5NƥMTOCJEF>Fa%G媓~'=?h^5r$a P }1V/mǡp'JAh+fuǛqǡ\* XGʂt* &(\=,=l1Zbpcn.kG:ɸ#Z)J(sZ{EEPӚKڎzibq15hx? G)MP..䢤Vd9oirSqɗ,տZFFߪ*@*6KM^k74y/ ,Tw&gBB Bj%ln(% Oѐr_=R*\tBך'BmS^8`?f暞1h_0xH[j2mEL1i'eU$Tp*&giiNo\;ݛ.?k};öjm`P^/Y{(3g?9%MH9i``?{?;>|U.σ0sxߢ"$B">v=9BPX #Xf>X8U1xlXIIך3Omm/ G/8SS-&GUcAJV"jNƒvBHHpj)۹RA{0Y.v_ ybk+7OEba&ڀw1;>CL|PAUf"?"Ǯ?;!z$әT,ڍ?՝5!~Ͼ4A镥P*$Lo|r 2`A>blq8 L@<{t7[ )UX f3pn0gL1`--K #J<266u`IrsjIm?V|DDI؜gL^L~#4T?(A Ǒt|ۤ(u; OއZuA?FBm]Sc Uٿk>"Hq6 4Z6Bp@bA-|erf+zy/J+5,Y8*<~Gke{ith-gOh3*VxFwXyY1`zV8j.T7A*hDߵ ocy{wx^@xkUJZ0&_X~AU*$vaH@z[ k5E{P.\0C&:wekȤlBMݟ]4Cq~i!s16LJSX(E/O2nCF)w_oJNj'L1ObT+.4k^r-@KZjF4xJks/T c 8!9b\F BG$*yݦP)!loɱ/a7L 00V;צ dnyܐm=O30h>_ ʫ&Ő9bTA|CbnDm-D;!^Q{ͼ2RbUpZKu$0);x~qZ{H@4_rD9܌Sws7жЗ+Sˌy;|ƽZ$/P h j @$0p)х(?G&i1+ƞ]aW1E) P RsƯ|oA?GxN~oԳ|o0P:cӺ (D*Zz7I{QeUR/s*GG7M1!eu<32؋ftB6?ԥbѢ:bTA^)Fe83XZ#0갗roM^'svoU4;Vh:$2V䡷dʨpn+dE %tb_2UtUL!%Cd ؠ>ڠ`F@|_Kɤ}9q;zzOd17]snz]ɞj]w9]KA\z}쬬 4BL\C_|?r|Jyȴ %=&E^ѴsV`BTe8]<#螷8PP?&V+uoٳnSM , b6 4JE3P*w^f2^L-AD_ Z]aÇ-<ꁔeܟMz.[W6%gSm9W/Sgly!Ц$2" d6H`&*Q@h  B, _Aj|[ww?i#}@滑Vv.dxUkb1 f] c7aT =_}{MYҀ *?XzyF蠢|*-3^x3t@05œ8_ϾCn^TaN EUT˧/Rg>4v{(c`9i @kM!;߮hs {dp _zӬ K!hFQږ.^ &(dʡH>S97Ť53ӊ~Z˅-=SDXC(4^z(b7M'?ar^bY=໤ E1J2LX7saZYI>xRo8~ދb}>*;RV42 9eٗʻE7||P25F"[ڋ׭}[Lh\#1]HChyY:jW>w:Fd yaHC'ƒ̹yEAILլoz3]Wbـ]&^;+M|K{DВbJv [a˺96!a>csbG1ޔ iR[,B^cE7fvtX\H>};5(()@0<u.  @ y(yCuPp @?0ddjTDwzV5*Qc#o_Z{>G P0$4D-x7'j~vY7G1{dk7|Sp tyV*-@a?Yqz N)>3Āez/ 'o-rJ!{c&i6/d\&` R7:+Bb9ҝhvqY^Ү\Οmϲ\yYz]6`bɈ7 2/vO>,>q{qn,gPB2UNt1AGRnǭo VQ4ҥ8HPckJJs=b`R&/ԁ*}laP*π-C3GO`iA֚ˈQFyL^dԻ~ J>M@*Y+HwR-pļ_փ}w|~b2=@lJ8uՙ8Mvߖj#$ 0kpb{d[QZ齶r{o=v{{y_ Exʪ'cTk#38NZ3O y/WZw|j4GxL9{Fi q 0D kp] _73SweV+VAgN_!Sp5: %]IvjƓ9|?-sTMsB.</vd~ XV2XHnMKεbq1ޤ Dʙz-ÀlMB]\bK?O\4)w+oSDUB$T 'M/'z? K'8C}>rO]ںt@ZȄqU։ـnx_>('_Rq{Dv΅sɗP2%/GEx?v]R)AzC ;F|x~]ݓQ+햬UJUƕZ,z4m8㤵wu92 u!X dj7HA :#!=*?Ac(bJZ%QIifDW? [=u.~4T/?q Jm8)Al0Z>O k<,nt۳ nt+ȹ~ {w ={ȔmQHxȞQ"z'齺êYc݌GEG[^%vg+{?,yA Ar`, xL.ڮwAm5zoûvtͣ\~lb6B`&"vJilx{Cz[9N qRt`gP ?fLcY(G֮^^)0~Tn鈷4'/S\k1ެ/g/Up]&a@;7Dii!68Ey3i9?VYD* SP oy,aRY={qzw14Q<롡, ^ŏp~ГM{;̦J,z:^`2Y8*]t;LʂX~W3&'Q6F\WSXJ~2Nj#a;<"9Чe#8xO-cX0:bHT)@']+dS/4"$ԐFL!{'"߫{sc9[Ue\U!%2 ӜiϮ&P4X *.^@ƬR5?b 0 +\}jL K0\D Q~lX.r/j,?̞;_b6ɾHfOS(P]W>pY+浿cj]d;ѿn{>+!/e 4yd%Tn2|O=˯KŻoguH~x@r\d ‘ʡ.z 2i٪o@Y(z`~UmwnC}ey_.Qd.ٍ+}@LC#zZBOvvS)]o4OxNqe.g'mRR~R󴃿KPwvhE쒂@|[9*\RvH=i {uIi`8jYͬze*HY' 88Ò_ Fϙ6nH*+3ӓo4sxseU$IF0XAJf`2I)'A"KpR $,H#]|iZCFO8C^1}hW/sR;QE vOUdv~|qF3H @P ) ?yUB.TnsrK6WnĜ2  9 !v3aړ3]ONcXTyw*m3:PլD*:V0Zg((%ACGlZpfrbZ !$H\mrZO~SWq$jQ"* XHI?"0wqU7G))vi8 T ZUE/neL|帜JPXzq~(uFlf9?+ovjIT T'*w*Dnfw0Åb')+f\PhP8Q.@Og 07Bˏ S%9qNtP8@FEa:oT7[ `'N_n_#|yS|{R]/(ÈB8( zoLca%a>vl UOs;9Wb|u :o*MS;.>nA})xKA'+9B*%>< sֈt,*-or`7z^0jw9l܅ `h<)YN*,?D^ rn5 %E3<{[Ɉ#,ѣԽ~cn/wۿEk7HsdK E烄x1Q3^5P"ώ̚O=:Tb8E$oG^fOZO|W`$Ǜ=]˼Q׷?&CbR,I}IOE,,)=߫ mN )kL[N4}C>~ORQ}`},1~bݲlN7R8ghw_Y7nWP{t?\ 'Yz,[%gugnN>J-YJ\O{/B+㇐p1( L dށ +?;P}FtT/ bi^ꠟl~7<  TDł\bȑbU4S6 Ԇ|c h4acgCa풻ATXR>X@W()) QKۅgj}Z44AtotAUΖ\qK.VBTr,jnnc#&7@0,a',4a*9S{HmHȒ*H$!>4C[S~ !^P@@& *TaDhT{U& Vxy(H{S.`\jQ_1*Op3 ǗdƔ ;fOVafFv+p4 }+iTl-{<[$<iihW<$p%Rn*K-I@-%G>jY 20T B` Dz(*$4ppphqpn\A^7o{Ҧ';Mxx{٤4D0%KEbr+>l)+*@I@{S@C"N8k2">@aIhiz^} g`%IA3 *<Cݖkl,TA e@. (P-%eGU Q ,`l ZH-DEx)a5,əvBQECAKEL;L⤒k8< YY5* (rp"YLV^θ Xv @i|^^1=3vіXFXYEn5oz݆fɝy`Y6f4 j[{hFl6ACMA_I ==[?:*gh/̠,p--97wi>EЮS{Hd I_.Ox^Mw$rzجDԿ+{6ig@a/}t`k4~_FoghrDo>Wok|<)ǖW~Gho\2B>dkCjWNϋ{O5Oۊ-CF4DHR$.( +xO.UEρѝ}SIw.!nb\Y[{ʿ3oW,qύ%j_Jx'6!o`8{3u>Gӄ;x+dX8,H(y4Bv(x$%%0L04: gQjk^!+¯U7]e[RZ7+I [oӼejy4h54v#aNFK΄¸q&|GuqbpA:3x ''ESYv:Ĩ~i9YOF.iD%ԒqyA1!B\KL @ύ 5TTMFAdхMPddpHT*ĥ1h 1TJ>^"^6I琐3&D<ڀUK BÕ*0(ڂPA,8HSh[MdLMkkI$e {WӉt 9[ҦP@mN-N}mr 4L^O?8 =`*KҐX@29ӡPZ#Orj* .a@v4`o)JP# qJD 4,wDZ'e10)G\ B(Gj&E HF;M8(xa9}&.\k=pCTi$P%L q0j^]PgGXLYC{살t4&4(0Fƈ^L  @EXL;Ȋ׻\F,(ݟSL7vpa h!?!8HFtz^~TQeu?7643wq0ֆQ95L|Iƣ^Ve Ba2\rxZF=IҠ_ MM[zŃ-#Z ׋9<{ DG3Smc~=[xo߾ϧmcWn g<_Co9?ۓ c t Sbڪ P5UHQCP$ ȁE$d@a=RC^OiqKK_h5; 1y((kI~!OOA|GXgϟ@H׆XNGꝴ:saNԑgEQ0 15k @_ض$kk_v6ѷ(VF&kUWf3fp>FM:LTN}R#|{< g\2faUһ*ON'1VC~ ](BQ][^GIGZ@~_zh-b.5$?$᳸5 áWTyQyB!0! ( bzyUܖ-s ;=DoeFq6<NVAImU0/mKW+j'A}d3vƋߦx謊 ϐO4r@t0)6? *?A~15l+v4ncO2ݞbD;}V0 ~_|/fvO$(Z )kzˇ:8VQfuᏒE37U lϳ DO5_y`KR1}8l*ehQ'uOe|0\33E^fyjeyJ p3slJȄ<|ߓ*K" msETJ&Q ǎD W8C@lσ8YDrBg66ի)KNeˈ@;5dCmvu|,.=ۘ!,>'[C/K:$Oc#@\=1 ]*In˗ƄPx isV H73PC0 \Dx.('Oڂ(Eچ#;I\ Z `kP3Ժ9d g"8z V~\UL`>iW]n!bҴP-h ?8&lx<9W"rY9JB*l@ kL AHՍkDPjG&&pxAm}'|?w"Q x+h֕#BSRrV>jBN]/DΣ#y_qҫҌ#A$J}Eʼ+k{lιY>"7>_3`bڨt3龛N+bމ&J:o`?9[ XSH+$v}wR+UQZ1pIӪ$gR#%23@B>x*H@TK<΢ߚ{fnyq>B#Zˮ,l R{efG@̌0鮬qtI2*J+ ֥$I !*I#,3!ڕTF $ A:ƗȯFtq9VH1I4g‰BUzFG\|6u5o[ۨ-Z_nqH)TrRSw<TanB|0#\"2H\ !qCiwJ)(1p5^]Dw ~N"L򢀠5LR1ջk}$KC]'Pn[_w`*st:]I#YiB t8&WIOLaS6(Ac2 NUBU)VE"(4*y[z}J'ԑNt^|c:ؕja˹$dUκiyZlgHBiYYpQ` pe@BBesZ I<pcgA$*Qm!xF yJiI}h!0TLdm aJquUp&4|Lii?Rj$l3Y&aaw!d$Z'߭pwn'xNLkC1YUh #PTã#aat1{8x:`F/@/dٳ}JȄBTidG-QY­qk 7J6BK> <2eGwE`DZ@5^kq ~S RQٙ.*Bٙ:ttS@, 㟿 0AGPZ:rA@` %)h7 À(w?^.)R@B@(y2fj L HV)(2"J.5V @ځ&0YQ>>Cz ~+l kwp jZ)tZxQ S^ϐК +VE 7Paa  _q{SG)b(p#$ .94AdLDHg|OLmJ+~!У9S:;}("zپY| {txܶVňH U~Qk)0*x/X"lӮBOH>E|cLA ҡ r T2PߓP5J/(i؁!Py==,zw͇>㋘;Af0ֈхzĊ#AYYh9%%H<PPPDÝy-`\lD|YK1@6 *Ȳv0ӉmsCIL.( H xkA_Tf*&1M TY0UQ8DX c[yr\U9 h%PB! )) M.>~'J@0B'B fK$,yRm@!H( wg@)C66 لnwCC@ B Z8R5)JN022x{񺋜@u{]M&A=zi1JSbЀc]ش]Ŭ5H{bN>:_c|{vwY\*vQ NᲙPapkyGw~XsboMr- 3ڣC?S?ĐAAza BP!: يIٶ=ngO .uv1屡!{Pla "!LPH1{\@@φz밈}JuQP a?އgpqyϛie䘜`IV}Wo3]B`I 6x Mhp mu @ [1Oq [(ňcjr?w:,doƹ.h@2|p\^PmўLVUOT 2M@΃HPی?9Yof~JqŚƳA16+o4UWv`yv:}aacɑBろ  @RfD0 Tq6Jv@$%kG{H@S;_w># uHo~ym!߾{JtnqV ӞgYa>}o0ݗ~[,DBWJ<4}?w/wY_w*b@KB`bVnw]b"j.Q aĽODG=SS), s^(-dnuo/g{w zE^ k?Vbf_W=wfSиL4H"U> }~zxQUTP^L(؁P T:ZhP3s9huj u*, 5B kh/yg*0%1J Ϻj*F/[KK`/rx*:cZx ~nd{4ዓF<}x@ ,+[tc7 $k0EF]w>zK5wzƏ/vkcu`^~G z{1t }h29-c!YjFbX+xL'hiSoz 76a^zC6d-R@`x#@t"՛=<9c/Bd&(H@Ԉz.7L8%Æ:+~x\2A|LD9M hJ1d&yOkFsC$Rw"]q ݒT1@C+iC\gӸĞ0Dđ|Al)=XTTJ&ۄaq^2h9Wß:혰wifԬ <^ѫPvX}meL쨺1T\3c^K^œy-'~4W[\E75+4m ½Kxh45W}PVPz:avLg5HAاbGWSdϷ,68v#)'lH+%+[d* #!&QCPHҫc`F" { x ҝ ITHbKZ!U~p9nBz~y|z=g{D+E7P!agF?[;*R @=f VKmҵ+?~Z"0!&de~;ث ݛb5.&4ޭ`B  z.zRpxfҢ9@{a ^HA҈*SBE1ƕ&{.ҿsNBRPef9j6H!? `H'A4 "sٳ|iMSW>$0u0nQ02~ |0Mε㾾;T ܱ2rʂQ6DB.9J,?Wsw,L[3C0 SGku?Xz%Au.bTWjڨHFv(@Ir#$G ?*Čı̱1̋0 &]ZBV}W-[J笔^J8מI h)y1q_i aeATLM$4|E<ttz[0_ mSm{(s2 ? `|sU!IA T }(^S"Tw;!KS!Bkp5q@,A>qM/7]pm$okDRN0?wmpީ?mI$9ȸ{7I(QJ !|0v @-{#~9^) 1 㢦X!5s#dQ 6[] %!.${H$ eJ>h%>IV0# 1q\eA2H) A`M9P?Ѽ3>ֵFסHs.[ 栟/r1D=Xr7*^"Z!w|_y'yqz7yW. O+37.b >9@Τ;^cf*&a5w(?<_B{usdIedK7oa Oę 9x?U}МB8αox_^.@ohUxљ4p!~ m(f]UtosAbOz3в(K½)$zNf6l+w޼}Ib3VXj5rtsV{nS :U6! h|ٿwm﹚ja?7Mi+! ꡩ*EIr)a tCۍwfP}$*vy*>t=.',gzkxr@@(TH_BBá戏#O[@*`P% ?9_ͯ4qX(_W@q⹫ST$ 3=|pΏzAiOҪ!B o2!i$dX YCwhHm - LJ sL !깨HNcPD6kQp_S`X"o;[}@V|x. >w vEuzf`h!}yOC 5= P?)J 71r{'nw?[U}2mNL β& Z%O`( ͬcso kPCD.A1Gh `{;c?[GW `pw'c8T(J!H`螁?fȟ2qCg΅쾿:w '+r/&jsv9[1tC̵Kט>9'rEU|\MW񉸩&Ӕ:0TV6h" )KxV#۽uö \c5V{}J!,4h#0Bl ppҰߢH.}]tteꞩ x]]y`}qEP#;|/ĐCH w$zC6T#C'Ƣ~\\Q ݒ,"3"nv7W=N}o/b/̲2<XC0+H_(:hp. DFnDoĽsbM Q x4O JG©"qr0l -]Ob7CG_QM-$QCFŦ3%y+kl~[%1+h-X) @PBlR @`Z%pY3 FjMaqⷛ8 @Q, aQA:'Yh"#Ap4m¸fg2y:w9N?RVB*Zн:< S͊!&K1iu x\I%y p.m>PL'p\UG@}0*ᢄ +X8moVJ=c,"nF7 :e&޾+AK.tXP,CP&3 Ȉ9ZHx̯[Tap+r]|]WΕe&4}v2;Xg%tjns19SG+kZ;g Uto@;%Dv_oJ.)GBKWg p@SE]~IoZa3x.nPB@$F5Ni,PQ~R s~ra2c`l)^HCRa,P#K2Z]'L."M>Hz(o&=Q*HIh: )CAmNg&iII! mu Q_T= * $aZ֐( 0 JiH7:0 $ E6\96NMAO惹x}# B>-p˜3_N{COvV/PἨNP8!3xRëfÆ"76I:($x]60F*">5$P۱"۟b!??*of0=VV}y)~m4[Mj8>>,k=s'><̞.(ng uȏPQ,+]¤zj]*j7[|԰EfTbUW,!dMgI#iR-vOᠲZϤiHcRWY^ Q S+ H<sdd EY]wqxELb)R~|p(Я e(TX{g${EJ\Y典=3Kr4E9+eRes*śTqUĢXKDth"HG8g "9кk5xuk:+ T8aEu?G9_*xR) *f@248ph臀pWx(DMƜX⢖{~~PfE†bi8ޛagאKo8Fܩ:'_/Cdk#datقɘpVI`lF#4p%1IJ D H3' ʆbS9wU$ scڄJM;Љu tT^|n# Y:pqubC_*&+g3vgByM$9 \(~s/>owGMd1M pÅ?af @:?6;SL#(ek{fMƽX4WgF4D2ǢKݓ $+Nz 1O.#ԡhW0a~]0)0_]ִp;l>_Y ~$Sb&8osS[H D)~zJ$)B#ׄh(A S)Q0q] ^|+hTDjۚxtC`n פd , PT ]][6qPi"ŀ^zSM4 !%B O%/0?=kDžm?RXxD:RQ2ET֬H! .ns&u4 _^]B 8!,S Q.m4PP8| )ÃuBii[DV9i otP 2L:.d,{2u:R0]ll9(5V҉"P{]3RJLJJ:$(wb!ܗ^krjKBHv)0 VTeQk #=zѳsP̧%hHnPi Z*L%R&q/AYnk"&p2Ĩ́cYy !eA"nc::"0w) MuB$je쫸w87i5Zbc ?"1~lۚȬ^ݐ흵4'6?ߝcOEp@ z|cّ 6C5olO G5Uz[^~}SIԷ?pdE79Y49^Fs ~&}~_بp'>UC\-Ri(Uz<` <pE]/m0e{&#R2ILj:h)$Qk&k`fc[eY_ލ pXt-F޷pZnӯ4TWaۃ^cxvyM#hӝqS]9uמHw <9Q3{ 2 g;g,s"d)V7oIHy=+=MkzV  KPG`0 ?tCSާ{q!F%&'pl٤8tr}):*$^u'rrCc=g^B< `.HÐrh˵/Ep-~n\K/D kr9/ 2B&B@~=FT(+qS}Ny5u)*8AD SkDPgvܣux>ףrd[{5A3l+s [wFg'ԇ 9q"d BP]~YYYkv<?CҪS$jvy-@zuc4;CWؔ 3#!6MEyt OFE"K753Ksw,G$ Geyw`anK>*RQ :Jß7ā>ۢOK\S.P@U5SdA sñ=: >&S&&'Pt"C5NIeVp`A(=}p"( H *,3 Zlʼn\3̄M Z5D :.^D9NC\ D@nɪgZXP` ib'꺩ڗDLT!TI/)Ќ\:HM )d@dِeR8|KQWistӎB a-hioT}30pPPޖ!mEaɆ8z=S ppÖ -X𴬦:I@,l׸8Da@ȝa,ALL;3lvsGtp:NNn4d] H2'@ZM8: doI"oݼrHd=hhR!*P(&5~ &Ԑ xߋ2O(NTr~ =עr2s(wڤɒ6kA*=,8iq,6Jq$r*D|˹/#woSwJLz 5KM-|7a@*fno|{_};==m{қv31La@?듎0(*g=΄{ᓱR~5ߋu3z?ǁ&Q|m>{t #]> nSH{,:#aO9o6o3itn>s_"p7 55aͭ 7CIk@VA1#1h'dbK G&N@CU<#SƧEl/ ߈]14ZU- 2%!t"fk,o_mRP"ia5  g?T%Ƃ_q\A;7f:.WI!x~BBa6EwFaqT8 BؤE{D/iEݠ"^)'y@*"в$h{x}l=_$cܟ4aX~)勲xܡ_6$ ӣog5QQޖY^]:KU˺I蒫$sG|g6)[[LFvmSq{ۻU;]QɡCAaf&@'朊aG?[A.22zκ-( O]Q+X&JQQCr xԽǞɣ`3$9m(\Et=Sr`L鱀TdR #Acd"0H={a=q\"_=*' rʪC;`WK%+v'/ 6 | ٟݸ=5~RH%l=)ancy_A΋^P_{#~-lS@hR\{ )qU{z#7<ߴ G[HaԮ'7-`Vrڭp.i}Oujw>RC?~mN5'qmI801 RG͢M8F<WK**R++í5Gx<.H`*Իq Sh8VS6MƼЩͳXQB9'8TG'qй֬[vє]UʖU_YK['[nH.*QX 737v6 k@'~Gs'wZ>>&#bۯ\,3^[ѣk"3b[a1c+Qc{ 4aoxq m#&/B !#EH\Om mPCggԉnUbiص^-Ko<)R}<ui\kwj﹕ zD WO` L-O A*Y~Ҁy;VBalG'amiNp})VQ=EQw7^꡸{8ri(JC6 >k fl/pR胥  ٮ{>x]:(P܉Ċ@",}v~+z{SGӏ7Qe_Z,X|( [a~O)O^Q{v^.e"ORȃl*ty[KMߦE ?en]W\~w x( y#;N#XmzH@ǰ҂q4%#3sO)H-hRkfD?d;O.)IGlMwp!=w6ܴ.&YR' 3cn\s)] ! p8Ly!Am@8m@vǀQ! \ehe@ x$9iϲ4/# MCB}aŸ ?lݨy "4 /9L4wӨtlGw-"s0?9؛W+wx}l-xߛo.ɀe;&Oi}xCߟ_7k[^#{_YnǴM'B!_ )7{=.f2v*'m|VWGUd7^=‘i`P B#;5/ay*HcQ0י79H30` UZL|Æ㙈gS>ͼD;TyF&Q..>Yq+P%\0w6{\GMm>/3vy *aU{VN<0%!BVlgKN郸S(VbbɄ؂y @H@e]Nam#2ċ[ajx‘r4ivMn/mj\A.K/L]nBpЀ5 {9UNрI D\5]e4v!x:hh0,),t:y`& t$>5sڝF 5'gy",j{B23:=k0gT7 o>ʃ-(Lj &C[KaAIßzcam-KЁB kC|?}O+>;@ RW,17@.hP @ww;SoXB0DRP@ *ߘprJ iU؎W Uz (@ ri7z4pz(0N|JD<=G|ó,x_Ow-Zu6c0]emto:#¿?V^Yh攉M1hy(Ԛu|}8I_fLqIxRzU@Fj[2{m+s.4\vk֏YBȕ`'qB;蔅@* y Tؕ7@ܹ`˰@@S'7?왯2{haA*a+>[E2'L6#ff{'%N%*O!u11LM؞붜/a[I?B[pgMs7z% L&F}N">SS>PV|F PJW'm M~&Hy`>y7/]#J^&~[]1 e؟OiOG>`APW:׭GJF" Tb"4ɴ<#*Txt"xC>7b9ÐrB\I|/?~KqcQ[읨3sV:%WTKkDRGi s˜1Jvr;Wld7U<9i]ۮ<˱ܒ(<+[k ҟ4 ˵1i>Fe:.S^9̶)d9o0i}1EⶾY*`k+P4`".k>Cz@"UTfo ~,)$ K^C NQCi{7?n:?_= Ťs_o;!#x GPxQHbOupyiLzt$ {[F|TG\)O&i<ހu$ 뫤&ShDIc׀^)NSUw7aG;QRL%_ve*IMy,A-ZwL˾7k_!Լ )kK<4c@2%5Fy M6DٌR:l* ƄGS9Y6|O; K'>lv7$"ANg) 4_K ˑY|1+jH;K-alIL潀v!Oy0K' ^J?m6듊WO&? Bgg霖”zEK-32jj^HVH ^YEMVbւ&I==.F;Qc# 5*tZIWmCwts+nQNΌ}z)@N2IʱnAQ"=u >eW)āԇ求P0vrBG]`9msY}noovMH02\B )II=Q~NYh1vhrjaH_ F+;a`{7=쩵%Ah%;_&WѢ,ۥ$lW0G۫ sӬRLto׻^I剛JH*o} تvZgZ3SQ}^ՕXY|zzyJ<x 8H<$ȕ45@TϪ!@Sa/qyW==\_C)ê5}"Ttmӂ= 譸RL^94OJռܥ W+^DŰRfq8C\isH _vQU~N],mV~I!ك}R'^umC''zil,<]ΐe\BX7ęi >(<4fh&?ZN"r?ُd|ʡ_\{c r޶z6ѥEwߕZ('oT.:Ky^4s~^9V#<=v|螂T#Wy/Ց4m&#$Tg@J5ˢv{>󠑏g@{PoX N QSĽ{~۽x̕^ƃJC\d0@5P]*?:i$]%C!G֍RaC*F~ i/4"LM9ᐹe.<3n9%޽)!|MG'3a/{ p<;\{UUN6. srt|pe7jkxA6Bb1@-r [J\Y*HN )XsQymx{;ը =Pls=0XrOÂ{0/`=l"a8^~εhf}#B-ItF{xyϞRZа*N֧^ނE`<'B_dMyV[| ~OvGaH:DOu_/|O鸽'{s)x +@j DU|wgTUlLZs,WLR]뾱"eѕpi6V0V޲}.|ܓ5O͙G-# WyY^av9żL3KlZoZ^/#7tO>Bt-uO3fnɒEK$0?L['Mo~T@7M M/g8OH!{ϐZR!-@il_^zūZ r[d|pŠEZtpv Aq!.Skۅ~CDDP3MzzRRb<_Gˠ0*v=Íh4=u}bxF;`5ƉO +!3/*bhݩY>4_1UQbJyت"zVYi-;u\[¹5βg4Y w&kB͹G?i-ܰz#H0y)2-b(w>`iYo{wɨKT4T_P^W) _8 #}kU{&ZXE5|;哿aNt~j94bDC10+>:b<~yj$?͊j떗ϝ%x#ϐ*@wSnv#5ezmbR 2?dSlNp|Gul5>؀B] F+]'LyΓdxy9.NX}Tԅ fЬtl9-eB\Tyli*I`"M؂y@^ZNˏXZ*o1^j}>NՃECp']M9xg!) @bp ± Ov RoAغfaz_t>ːZ:yUvuTv^+%qBFXvH"ucp+l.xizVv@e)9 spW =:#6UO} 2Q 6=97BH-|LcW_ iS`'KUKaa\1o[$q>^ppH[@"">S ʇ_b;-b(`y V}wNjaq B m7Ϛ6WpBv[VMj O]Q'F{oƣɱ.'GGW\n߷b.8&xpxn?Qsk(_N`LBhACpP,FvNx?;\~f}fO-{ z_WKC1o]fb6dǧ5 {/< QPWu3GY&mἈizK N(ivXG~;]jW5ޥSŭ` 9,T&M~0gp0h/L8| #ps %FJ ^P@ $Rpy",Af8P{3hg`JZ-?+ 4\_uX_R6ч[jf!E*fzD m%uS׷׬ks[b+8 lwpfCCMq+mI ʹ dUp[Pp S^AэA7lnDVhujvJc4D 7`؀Bbgjӊq}ʐN[Ou>Ab@-4{]܆JФfXLI.gDȖ^wǠe('v]J耠껾V`(7`J:`P.{rg;VVk#) )X2̎/o?kbuTnUmսITAYZ3=FR6_qjUѝQ6sp:4>9#1j Qu-jE^cґ9g}b?V8,O/ڔxAXF[w<[4"{{oQ8 Gɟ=o3Y~ "^%2)1+sUi` lz|?31~Jw*hD .bz/OV9XXϝcU*niU6|~Ӈ> lw,w!i|4cLH fiiz$dDZmǕ%O,t|4: ^!b΁kv;Dэ@SkcW`nmYe[x|'_ZMCq'fL؛pw_dR]aVg\T-&ec Y[c[;j{0vh {K A[5z:=^COHt&j {%VemV׫̴$Ƴr~(;ԨϽyhKHȑq+_TOW*1moGcPљaylU(3æצແ KGseS`|lgYEpazWb:x}Ҥ X}J $}1eQY_Zex ]c ߺ8 &ă@l^6c"tlscШ!.'uȯFp=F|wrA%ԤlX e. 4Rr7Lt{K /mĂThmuv]ڟ{7D19j:nԱ?¤}'IG_MQ}yDsXȰc9g\tW>E%śa-_"r{[ЛA;qjʗ61²{_gKZܷ-xr-wmԷ[5${$ѐj=pfOs >_p -6h%zӫ,Qn+b}W zdլ>&~mB,̗ur)"loaĬZa *bb5B*ΣjlZos4 }I`7LWGR;=DT[YXq>}nKCc[XƅX^fu?kJk]~~T 6=\oq\ \"xܮ>}}%x8yt:<\&j/h ;6=Djcy}cC3dT!.69ƍfЪEnXӘY"G5NDɤ.a ?z0j?7'u| swNOv|")6H/=5#3Ct>9Uc^&kϙXEw`ƮK$ %Y+D'D+a>G}$>yrZ\F!jRRuuFg{6o BR1͚,t.NLL1Q8_yP֯eȴ"ˌ댃y,>j]휥+W!V';(/@/,%W#ϼ6K!,A¹aI|iw=̦-Yk|px?YD0Po8fM6iQY -R1b=F߱W !v ~^F4=-O]{P軾?P-OIy3)b rp2ewMkN~1G.G1uj>gnw/)!uWO#?~۫\TwH.Qzb-RݸAV MI RQ3(e캑qUcs">/K g/Vm()oWhtʊR!0AO1.C>.ߊ6uhEwc~t4ES׆1w c`gNLWeT{7]| {5ԥ2.EWq|Pe%)x[}zX,"1F!ee1O 49\Qwt斞yF( )BˌG[}tä++dJrȿT[mZ3W4#=ԉ07I$f$*\SOLΤ|7쭧Eu'ɫ(}4mv Nzv$ -n jI[ׯi Uٸw]8[G+B}ei=B(-1 \IݶH;~N/!V 7tsII'G׾PѠ/ǪǑva~W`\|ôI¸||9K;o?ZjLGKq6଻ydpCB:#r'UFm0YNLXM>(_L"AAܑ{Mj9a> Or_s瞔/?w?^[Zw{kriyQ{-+nKwKKoDS15GoN~G@ucPNQqq銥e1 g;@'05Chv .|5%?it,v ۱Y-w|]+t !!wqYV< ) RTʱ}5jy:Sm43N+_ pp]q(&L"PLu}Dzsw`a$O #;qSֻRyFV /CY$[ͪ\]gRcLY'mH)|Ob @ uV]Mz\Utb#-s.a6S9||H"jpR\F,ېshrW(YX19 bc藚y,]nfhzzdvG[̫3jh5^]y({]`SM! y%Xu~>>c`&m?L\Geœ@\4 w80x?.|I+*MFߨv7Cx^Bp|;'I(`K=?\/1ݨjy#Eœ@\H'c6/OwOӓϫy;3 Ui GqdQ.lxl_cΆx)ۭ4. zpL2ΣC? PаM2\E쫳bP>ޟh-)& j 4Q\o|][drv"εTî;)>_QX$}k+縉Kz*3„@>4y_Wo4+#Ҕ5NF|ڕP<֛D?LDPd;>lmkRҲ'Io娾qp  32ꇚ@j+E})PuyeW:D :@ qg;R / .] .X7m;GB͈ཤ+2nwεh @$4\%#uK1Va?!fr ܈tuLjmi}*y!tKHe Z~X0ǛCǜ xpU5fY%a2s. !Fn.*U89(I>T2SĻǽ2B  [;T*d^ᬯi orBZu4X2J|Gaep;%aS Ge;y',fNZ>j3* }I0R6CZbnD;c{KKT7:5{W@nk$k!0gW \K%H>\`Iutو9 ȴlF9 pj޻SމlcpDM1juryqJ3}FTja/Ł",Vmy a}@_='6tBWYzW?~_wԃbM?5O6 Z$ !K*շ sVYJMP0mH +{9]a@ YP2znx6W[r;\=;@SOAqJ0٭Imׂk8".+mkculÊi/E? ^^]VxirX8?;о*=VLhJ_/X->g7 c_ݿ~|RS fč,iTt^ķN1czګp< aSVşvx:V!4G>novf*p6H:r)8J> Cjg,2xGH:E#3omu)(QHd=F–օT7*@C5?\U&Dǚ<$只WWG7\,1z*C N%N=ubLm>酉 }̭`O=\Fq`Bp&|[`V* X^/3iBu#'XmLF&uUqO'UYpeaЭ,҈r]tXqIĴZz*%ZKt KD$M7x٣gi%ȢdjcT1j&Ft)N˄+Kxo#K5~f|?/ZJPޣr3Q(l xrUnBR<2]h -"8P>= G oZ)f IU퀇}û~.'X|x-HvGC:8l OvslsN&ZaIԟ(JukDAGga J>~~ N,R.cvmƛ/d8ۛ/l9szCvRUAHN *d?z: H꩸TT\?2{ SO )m KĩoisCEμ?;FY(LK/K)XMbAӇ$a32)(n~q˶z74tY:j έ22bMfm})՝~D-@oLnvW]v\iEPMV\1H1N!HDa= 2gHr e?cu1\3zkR7 ˴M[`54ie;f0 "4j(C'.އHmwGP5Mt2Z VV#ܪk ~nQR6B 5k?eٯ,\߷ p;=F7NAPf\gDZR0V{@TIji|L='vQipmUh8gmosUw00:@|( D)>~4:qפ͞+0A$s 0hx/QHdl9`PPR iHQжwMu;Ppa7(XQ,u~-5# <1x()u<MD>ey[Sئm[! YB9ơųoWBY}^DP[zgOnwTI)SFW_l$[ܤٱ:ɯ~KùΟ(Rm;Y&/Ors˜0*%4E峊gc©N['6eWe5Yh:%, Uy`]C݇»,i0GML||]RrZKts^~wg[٘|_(!. @ @AE xvOWcf;3$ubAmŨJ>؅HQnjMMo=%g /Q 7 ӆq# NR\-1k(cZ# 눜u5Vkt04.1'E:?n kFi~/ݬKcϥ:39h~;Q∏Og/Nj_nM=bafJ8lZG Ͳ$^z޹*Y|:DS݆S))@ R}\=[ZPO/(78mC$!qLjņC%v$O&!@^@aD0DJT[d f*6cߎ]%S<8d|(X'j&b?'ud dԹͿLL 5y{[)4[sߴ&^8_L}a#~g GN";˘ک7nSNu+Mڑq;vD7:}Y/A4iʴe@{[_}~}k?'px;c;G`/ )>KK|?<"G(NV{\ksc{*Ņl}Yؾ?mGfYHʜb[^VK{or2G}:M-%nm7G𔭰Xv_Np.D#Tcɏ&N'=H~VLoh-^};J#25Z^ƺ#*B/ijI4BZ3!J) -oXaFף[Yj6xۼzWiY%XZoit7 Nƨ̭VBL$Nޖ :/U+?P>&3?xك3:d"8ZO<).ɓױ*r:w~NUzp>@bٔch}|GI~,H7,YTL?9V|AU*-]m1uUT@,{梶~>1i~[XWQ _dY.JC_h,J?e# uM1D=[ۿqM]?%K24A 2!i?@ED|FBl5P.1ll=Vs;<~|w{ŋܫhj[iqґK@Ióm!Wݴ{%rU?tgSf:j?)7 'uDMDQP DEJn )׾٩&G%%eǶ,Dت]Tgkl_[b6u*xub"O EmV]epڍ/Jvi,]3|: ctur"ufnR*6vz78@7;-^_Y%Zf"S\2{H~l^VE8pȸ5ARA/(.=to;[[͆lvxY^1%S=S?SVy'7#KֺI,hy^cïwRعU/6^)D76+ L"x|?!x)Br}fxw{}N[{k@C;@@9(1@>h6糧N*/r(\gP Xż< Ŧ:X3Y2lY80, DT~.t慙K#<:w|e1U5be^LNހ̮݄h%CԱ $ T7_|?ib<"'BmV(K m=bJiVds"pnA8i] VVw?iK͝ޔ1t0 8a޽ׂ}c1XM@Zesq?] kpMm)gd& n{t7z׮Mx5$M1 T/פNo{HD8u\b`E|!r8+18U|!C(T0Yг}V%A<MqؽLD>FOd? [{tn׺#x4; !OČxqӒ[x4ysK}eҾ?s~(ZoQO-J_ V+I"w` D zI,c!w ST,Lp6(:m#{^fiJj9+xщ'ʁlR2BߦtD1U_awXo o"[fz63I ($BKXqk"*]?LxSuNP PB2FIKBD%Ep˷^#nُ}yK+_FZgjmi-7O!AvMjqdFj^:~>4!di:Ċ #5M )cb@;3xp3={C MQ[ L{;z⾡¥0iۭ.U &r^2[llhcfn6^TDm#v( 8)B'd s B*T\j1UF^s^#-jxwF*tAq[ Dc&JeS(<+_*#Jb՜Ǝн3q-EYJKW2VX5kz-'W?cϴ]iO;`$~s;j eT5چ\?l،M}9 H@BoX.ehn7(XۋwDrH\J y1PVHD[Ϣh-8mYt=jx{}5a-=PEupQ_=|sⓣ~ Xn')YHtdMiʂd#1&2ϖjSK/|̚AG3Z?*,jZoޛ`w, ,;NԴOA"1{m@=.Kpy-bp~i}u_-?c ڷR& ޙ57I1T ;&B{9X1WJRyƤﶷ[YO15i9c]X/ǹǾrkU*vkhTxl2v1s$;xkϷjܺF/JC=V39—㽃:x=/"dOI'Z"%)dT߉"1$˭heH<`ceVA=u ]orG5@!] qEfE婞Ws.~87->w9$EN䢛aD ]vmҕf?DrdyE 8d:c''~41``$95BԏLdXR\çzZK{Qa+n@NV 0v{WȟPBeՖzD񌤻UG"|2{?+^0Œqdah!Otc4uH^O||55O:{[ϧ3)cwK_İ#O:1Wl9 ]9p E!d7 lO"ql`>$5)+n͍rQr%IK۪014p5fUK:W-ZWQv&~5- `:rs_̹k]KxhG?)lM3דD v$fwY#ye@hliq1u1iIgDnu噵B]z-,t= FHyK~TnQ4HBܕ]ƔQŞlXby12\TSg1 y cw*IopŵY<),:,G73}k3yܢ:!:Jf|.ɋꕛ3qDWh9%gUػp{GOƤ%jGw|p&^r_ɪFH\ȶ7bDJ)yzV,g},dAt:DG(IC @ɇ JHVH ZF.Gۉy"Ve7.1 1( &m8o5k&=w9QunB>@J]F@FwgS"`#sF:/V*v{1Ja#qYs3!ryfqjrp>ȓ?=]HmP!n<9B&8i]`=ï0o|?R:tٽsγLɛ#=I9J\iJqʯm\[0g$|lsK)mQ)SR9\ pEz`t3>&? \frz[ LCC|XkϾ[iгCATZI'n1?;u?U)RVfu[Wu=g_:cԗe6J_߅cc?MO^&hg| K%ej9w0vQ^Jޫwܣzԑ=|;>iLR*yyأyO'FpKQ*+ŒqBQ`{NW25i-搧\vsD*l'[[=c~O=kssj|ڧ_05Wi_޶=t>HYP_9[(<?bjۼ{^;#7'⦘^AW{Q$s kٛꖾ0 `i,k׻rj/~z}hNkϺOVE67hTH TrTL~ꯗ}qQgV>{:W0q=!㛓6#肆vױ>`@aS"hRfaK']YNny|CXBubކ\aqcܟV=  DAۣoh&HF1q).:F~X_YG2<y4-},b6IE0) ":Ȗ_u3 ՞$owSiEB !H@'~x5/s8nC=߻|<+ڗ~g*_Q[\=އ²QK9~o!k)5uuziou6iyk ߮%(a42 ϟ%H7#曠uKz,1<ω qwe,$ۂs hkGtU={_XF|f4ϯe~+Dզaxj}1%NC+-i)6*Ua 7Y)~ڏr.NlgmKc}+WP[j. y-yZ=>o*QiiE-8;w*bA/QmNKq9c/ܰq؅ɑ,z?ɪ+>o_)7&$ҁ?j<Ѣ)sh?^AomM!σOVVr`x5HOɝQùdO~yR],~(o  J+@_dX{K.Fd(c4IÎghSW[ 9v׆uQ(;9' νt&X$NVs1٩(*CZg`b=$Y np` hohAOYPk ˣӴ0߈y?ê8÷yIܩ}Ku+ihH;?֮R­Yo:P詃aPD$˪hx|ފ9m6le6 d3٣]5i]w4 A!;0mgR؁75z8_m-AzHp'0M(_Q~,j=VRhUM46{.i[b*֫莣9RtB@C8j@P+h5߽`4(;P@C̎jO%pߴA]OG0D5 uǐt l Y "Wn;*M'q[7z T'm"SYw M)#]Vaլ9Ba7?B1{; a _S;4foJ$Jȹ_Vka펴(n!^lI9clKlR-IYLpx;'}⴯DDQ!Zޔi˯3iGlWxkK^T,vROAR(^%ySԊHYXY(pf"E.äeHif嬇ǎQ, R("eK{\+`@ ;7V"{3sH{Χ$g̈́\_9*Uģx,k[Gx!̐ ѓP^Heb ܬpcD$&pJVN %عʴmBI}B߲VʍgKHa* dˬ"y@2,I[ l[1tK|xX$Ք'NwPAȼ:0㠥_4Z` .[e+ZwqbOfU'#ܢxҾђY7)hyۤ YkF&q^Jx8^rtgl|+Q* gӴ6_~~|mZn^cW~gr"vV]/kw)¼Gu-tú>C<\Hb{y:d-5RLتeF]EC]l{DXwXkj wu4'IH ;2469yA#+W㢪(qcAf}$smKX?xFaX^{xG('T^VW vyuG*\},_w 9ZcTx6t%EWT0La~oإB1ʼnW*c}LEnI Ws&˗\pJzq_PlWJD,Bʠra;.Nugl0 {{'S6aN׍mR%⩥e +V  ''+Lia +C8p/m~IW.ԣ2n)틥/ddD @hyk>t$jǶ"B rvgZiXÞiO}Ή+R)_5s]oY~'Z-yD$xK]:~z+wHV55)1n}K_EҭAL* N:)|J6n&,F6wHWG;ȱANÒ%qHQqi獇hnUl1uHuCS.Wr_@ڹVEj9˵,=q/VޛL_lRRiy(e8@Yf^xT+;R> )ew0[)FE'=Ut S RnF B B38=`es,<ūf{.y$"rFL\ƺ|}Qvш|uUXg#ŇAs]~4SD*-* l4;Miɨ)TBnsz4*>]KaTD(viޥ{g)|t&ӗ6BsDj~K#;_KGqMGr(ԯ8\+ mRwKܲp4(͊xBk 8mc7!ItEB G?NM_2\*7,+m,pz Ш%~]^ I푢J*Du wsh;nȤZ3zB8>-”`|053=Cm8rD{SB~=BױH;|Ckv{mKZ]!D@zҘ(Ix 5sz <d_ a&\"7.~ +eq\ٕolevMnn"Jl${-9}cH ɋPs|{dn;Hi :pE:[iɤVbD"=QLӝ41hIGC9E7 .nS‹ā"OЄA1F<]i~nxÜ->c{E"qzAO;wOA[݃v|łUϪҧ]<{nR]!>d mIFY alm`# ?KI˦y5yh\1V`~D~ 6 u{6_ͦbRቔGo|wdHs6Z;̞߮3c勮fv0Y],c!*. ֐h$[bKisCOi16ur)[j ɣUuJCRZidbMvWp.( N^;EuƄDb^V z-S DZ֡dubQ3=@,l >ɈU4g9 $n(x !RקD`)nE`lz9RSn!lrgŪ`ίṟYӢ9[8t5)ݧ0~촟q@J\!ᨖP&oAObdJ^R^U#$T D 1jv}sNzO^Bĉ/%A)Q4b{!Cqe+y<钧;C*6AॷZrF/w:dI>P}̂n0 fp+H#ݪ".Igr\7M0 8pqkXi=i֡3ɼRΫ&^) ߥY8t %n^GE" ks?Qu0ƨV/ v!"un8cߐBn0ן6-9G6F>ӣ`ZJf8a<$2#p| Xֻ"UVR]Eu]8R$9[S%ēb+' wwD5$4J3(R3fa"-񇿜aul2 W-~i\e*|EtL w Á(b#ݼ5l(Ny?p$"nS5ǨrmFV/bvSLZW&rO `c߷{?Nǭ~?c6NBȗ0dpA*G +[dnE $7Yad lO' J8J'D%^GN}I">C 'e/Dr-kSp *"KWnAH\%U FGh\a~Ƌ'EC9^89h'c!Eд`pȬ wa$[P~y}_4[/LS(ZT¶vމWW᪲u0(2'5?+pBQ3ŵN2X\۞v[F*ۋK^褐.#wqY7SCٵJdC~S}xj ?c[:+9\-P3P5:o¿e̡N5 ZD#Eq?T,'-QXgj"쳵}M%8@*oC% pSPﮜs08Hc&f!.AL'#­fvCRQH@[NH"7ֲ`R*3,7%^ŢUuO޷ RE(Ȗmҿ}_Kj ~揔zKXE(W{lyf3*FRX_` "0jL`*~7]l[}tosm~E;/L%Cpa7A,! w~ a296\qz^15N, P}iF$ XntAa}J(BN]Z~i:[ wɄ?[LGhK홆 MF וW?gR{ GUD>>uH-[_#"fr&66M*!Bv_f83-Q)_ 4I%g(Aڳ ɠa2a0Ɓ.Yԯl7 ꉨCZuKDEտם{ AC4{s-QxyD6Sv*-C殣ٯ~o˜V~וXuqKo1&1 V;+`yzU}}9oǬ74wOv)`/bƯpg0SD43_Jyj%LqiT:+bô^,6>'R*ftP|r'j+> bP֪;BszX$1+&'Hl"$ϵY73xT.8_?b'X3dRYIiU X;+ue f}yvUL$[6jR08<(%}!B"UY.h~Q囁s?J|ʂG"Th@hYIO>gwC$.ͦ;}U/ ˷7km`%{uIy4)YFoMR.+19έ/[uZWoԍw/Jr]b_pٍ2ܒcM,j XLm%SWVLYt!S J&XZ᫠Nt >!0\'SgzC1̢S.?o)HhIbs_#pbxG.\F@@ҝ@P:oȞJzԀ0Űe%| @W1 kS#| f4-nN >+!<]ջ|ɭ;l^)ݟv8t sP{D?i*raꐐq|u ?n049EQRw(IaAQ'U M42NA3"*O![Oc'sTDsIR|xn3$!g~gꉵCe8f6[t.Izu3gQ @tI٪>>9b7)xiP(aFOq۞!Z{HPo~r.%$vΞ?4`47oۇO;6PCWE'oxǎZUvhI fUO%L͕ZGf7KtFG ZcaAg3j$G۱g_{b#D+Fh*SFF6ԞZ-/о|.,x0?;FAJc;(Uh[>h}Ar#a G\pj i4eWL@ ̌iji1O(-,"7 x}$KC_kKWL dtc^- ݔ}1TۄGqn vjnErmY}tdS ZgYO5rL%[=*#\tmZDZC&hFi^0oE;\$ cy;u87>n[_CW26i`sX z9.l`)Dk-^tHÕkF6pE#ŏ4rLNM-#ޥF!2+=LpS"'q>flSXly|nj:^AIHd}mnVp{&az U~d4Nٵןy_& pH&TgHuP >;/ӞY Cv{)^̈́`A& P79uNBy<(u)ZBS#D}zu4Qx1{>FڄZSuv-sd„ ,*(k ߠJN=<< 2LPiP[L"BB]jr|I0' {Np[F+KzXss~W:" 'Xj%OgE븎8[+ϜA#U-R<PT܏WNԠXb.f'?lKBN e:cF<Ʋm;A+w8li!!@uTG2̈́/ܩozv闡`sWzJ.ixX|Ma*?Ԝ`d~]s+*0C@C["U> J'3;s%OUai1#o|O] sӯd"z>26HwSu>#*%>˞TaSJ_]?wF΅m!>(:(Չ{˙7UEn¶@Ap͈sB2Od;S+ݢ h`v*GpYjubtg+|ŧkF ˇE2t?xiz$"p'?7sQWK )qlY(8=G8Q#_Iq Oy/d^h`c9@[Fg8H[CtL$9l? ZE*%ѼG/]x'uu֨&/+"~)IO.<*r;%:\}A9*oJ@# +rvYVu[Y#U]bJ^cjWVc3e GlCc^5iMa~RքN< \PXF->AGюtHŌ)B#d?~2'<)P{%~}Si\ӛ=a 'xk{fC6ާ?aQ\r~(ު\꿖AwVF><.ٖK('2bJ~uH\dc#"oOGe;\Ҋ8nci1MMm iZzwETS˖&ЃLg})k0rCzgb(E>Ɵ/|“MqXGtlQW ?9/Ꞥn QD<ʐ-3A0eѤ4|"gj:ϮF86 :SөeP[\~ [//󠔗͔`b,nr(owH/Y* 9e׺h˪ "zNZ1bЬ^*.ܿjOZjG&TEx3vU1y^f[_3ٶh1ڜ ?7c[6sW?&2˼(m7*υS?9]A'g/Wa{Qߠ}@0ǃx2[{fkL{8 Zyc=A/Dx3ӐsjnI, \(^K|* c,j3{e+"?GeoǏ!Er`:UP?3AI$!RN:ȷtzb nzw#kȧ0CQ;@Q~8}c_m a7TlZ0PkR=E_3Q'_Ae?޳9w6]ʠCrw( Ɖ} 8%-vD%QEО@@mHT!v;:[w AHcEⶼ SUBjuNKjr<?:f5} {dn(흺Q{ @59,@" >QVw (^m>\c`ZSt |Sz30CSf?XjvGUHMW禌I/6~ùewtUpH!1 STv5i&E%TĪRx'ǝgit,}M种Qe:V^L*͌9u=SgfsX:ܫ&m~r^V->ߺACs@6@88f ]K 2bD>>|M%*_5gxe"*%ᠲq_ {kmDÉ,i-C]tt5}zK{{}\gjiixPq6̓ 2r&iKs)pJ>g2 'b@_SS8Қ`q~<,ÆT#eeNwT ov@<\ö'#b=_؝v'wx^nvu4DUe!'p*zOC[?VxJR3DH9R2^NF e)J64IjPմev 180rrُ);1Q. ^̕7,LӉ+gfG2e<)Z iƫ$ܼ˷ǥv\.iQw&~GhTbwhtT8q)®aEeC8I&E#ugcuz_zYfʯn#8 f~ 6s*ÑD&?|/=.iRӱ?+ei3*a\{Lt`E6s'/co$F5G]BZ7)eiW.2>& xьS;! Z!KMR5=cdi3ʏQN@v[$fXqK;\ [#cdl 8uԿ$WPl<Qv͹jp9.xD Ó3PAuڨC~[?AiQ =@!3ӢIE6 hBC {[jF!^^wag[zXm4NbQA{д\zG*guG;ԝW:Ǜis%g.$%'y2mSɅޭ$+4PK @-OMiUΌ.i|mQs_ er8ĺ*z,ΣǾMĨΪEA,H'~T|̭7G%f*b_%a*>? e@l@ Qz9o/Xr!ȣ ؈Dd ,.DFL /ГH u#üzBsVe4HumM||WTo+Dnuz]@4M#ZH%K . >TL3h^:85FW%zodwH\'nA7ĕޓWK: pҹңlsƍy,“xg7Зtbߟ+#j=Sus=YI_z:1l |i=7B^ˀ<,y% d6%?)#x\@naݱicK _>|` ib:{ky,)zz+P}Kzߪ >WJ\HPRRgAH?K"UJso54]N{O(|TX@9ŃC1aHg9/OjuO g=)D$/2E0헡/Zbas ]{-gnxXj1DWrF[k @57o[8h0`8MfdbE;yQ@ MB9}ΚNnω.'='#=բBضVtLV}F[FAS)tFa|ń6 fo )>^[bۚ)#k=cjQjj!trJ4yLjKx\?ZOQS먏z~L1a71xRj< 0d)Uh[QO[>Dt?H!&B#L]h2H?͘X*OҦѽ+|gedoN7]ޖ@eQ✅N.iz%m$DYLN]XR` N%BCo1n< z<]E!ɁFr*._\| $G;ݗ!au0a1~$Rqj{þݜ20G+x't V1o+3 ytȡRf\Yu_sܯ?3Ah#@-:A`2mXv,1)0}UWFQ 0; {aZ.rRu1홪Y<_ w}Ut;M܍F犏[ 3A4,algfsPg{1 |;6'dܽh8}J-6 /9n 2g?yxLF8m+Ncf )<&D' yT*SpgstMwk."ڟkra䘎=L2Y-j}P0D嘺wg>> q]Z]p.Mڲc)$"Msse*pUKh֒w.-*2E#q̪x8hR.{a4;E$ޝZ3 TOaI?zv>uN6FGds}oKZr`MgHmyB -'4]Rr5Qz2rĄIӉ8FC:Av(ʿ`P!'A?ͻk@j1VdN/}fE<1 ewEiO@sk 7Ӳĩ:1B5UsABI_zPenؾd*x%.уyPf6Zÿn:9xE49,b}^:cٸuP|L Xo;Cu?BQLt8!զ,7΋5pmp˘&F͂fDA1 ٗ_g.@~\_z.v^:|:]'qR̡$pl'տdmJS 7\>޿8.xU={o'_w.;&U#/'+ B0ft7%ְLl;\E/\8[Tк_3\vo. V鰳7Vk"T/|dr)?g5 l.LLwq x1I)JBfK, dL|Ev@ OjTq=^N8Mc969P}@v, #;ɶ6t3;^ND.ŗ7S a1AG;xy:O.h!|dtv3EL,~3;..UsKCC̃yNcqL0cfL)o[6'$ҟ(MюsZOձqaWޙWsK= $ ESLgC~+^mvOd_5 Uh'g5SP_s8K;>reT1+' ea b(GX:A^ 'QC(V}>{D~W}c e1:* ;VqL,oJ="B9'Z/އ,|4bK CvC1Eh !Rp[<fU/F7K޾] BK^EXOiSRS&ť`/D.8$ +/olԨ{_S{qeQ`\l `ݺ@0p_7쌾# ]'2ӡ5{pK:mo/y $ \0L&˾Ɩr@xY_faebQ :ϫ輬'W#C=G` Z**_e Rd nC:0~7V|7'zӑC~ಷ+jkz-x[6L1y,=K< YcRܢqj *L΂@^V@ãS9ی>F T$Q7 D@E@ =v%|K% e 5B&K##,aG\ zaEXsLX)KV&+!u O(0bĕ'G 2`l}#x5@KY0aJD [Gh뇜MωB-Ϻ'*a ĜK>+*<ҏZ9M̃z/gâ݇$= I[^3,ȩMe<$yNUv@Xj.=Sz̝{ՂjTlU:?H8 (|}L =uLJ j}%q]&iڭj6hA OM@ZSdJSi=uviT~,!{8ޭ"r5$8JF2h{ 5|֘Q6 :$ˈ#̖N\XKNxγBKUZiڐ|v5q7T܉8I'ЄCԠ#f9Aӊ R\Zcӻ43ID$%* rۏ1jHuL" E/va=DKB܄::<,2/L'.0 z@c0( W 9 MS7`ߝW84˩+) P"8^P@FQ0A®}slx+bhXt+[pxg!<>jqƜV?Mdm^Jhب2vgXWpm{6^γdyfT-~8f4S-/;4^.붩j2p=\!#S|"8m>gL&'6VqOJOV1.k[;2l3:Cs1O̚aIw!_c E)=[a&1;Ǘ][h?Sq;v%]N2ȍ-I Z^4a`yt攃Y=! KuH- ;&:仒Oe7Rڧ؜U.A&(^e\\)LU):Xfet"5ݚ߷N{^?oZԢ.y Qι}?,Øi84)3[K7e(qboE(者0à\h%q-Ռ硓xw @W&5uu}ИjQ`L#E%"I8^N݊OaSG7c診ï,&9P {}ϣO T%|o2R4&!R|}%ͨQ7dGCGtJ?M8L:u0f.]I2H6y0#ǡq7<_mqcAV b׋Cv@-DXc hSMl?k @[2TFwG>lp)8AA1U&n}N!ou B%Q-tQP43ҁdӔlGz`NxtLy8u:`vۦO(,17Cɧhv(Y]9&.3pꤸ^[ ) FAeN/!~kayiL9zEOeb.Yxg)ڻlt&0%,f,!nW^)oRh!zTͺ߿&(ө?4f=i6R.]En' tG ڰ^pp<ȸ+xpͫWBNI) 4znΰ2@4%}e׫IpvGOJyAҨ12IBe'r>upiw۶k {|F(@'l(@L}`Z<;iͅ&;a'=0k8ȡy_`+Cij./{š/Z R"HQײ95&W|;v%VF ];9mvƼ}Osf dEVUtM+5gMf&Hئ(tx˒٥RY Z僂,ˆB^w]鰐S~|;z-`RK}]ړ3]Lj$YX޿!4vI;ѯ$1f:]⻎7y~y?1ofHdС WJMȠ+K`:1S쾨$s8a] gPRT^ή ׆ QMCnDH0l:$⯩cd/qj(u -Yw %LV~^1 osl[+J65Lr3Ei5{>!0P vfHOɸڝ5ߣm]T>mm y~9}<u\:@2Ӎ"4)MմFv}R )?CyER2?IODhrIsefk6Z8wYMnmg|Z,/NQJ\"Y_ AP@N@Tjw3]>^?'P!+3WvQ&`l Q !hFM\QQ[O{JpP.G1šфXo4"s/y&!?/O$&(dLOHO bBou8KTm6\ =RL91cg x {EoeAUP2tW?\};XBHhU,0evbMm5,-BL?R4.z{:" <".яwNqpBK.@@[3 h&)ˣEȩZWZ9x8 ^yZ{.QlCRxyŨ(޲#cq>OM>{&Փ~0L 0̢L$-o-//NͺvF r`n~9NIw)chd861<J3!F`P AvB[>7oʚObȶ#OW_޷ѿMN5+#Xpp=WwJi?!ccO1qP l]EDfE?@jj/< Od,ZX3x,D>>$$Ѯ* ׸0T(Y==J f{f|*D~YzS>EeƧ!Y餺&3(x5Ğ̗GDQ&f{WKҽXU?f}%XO_DqlB ]Gr -S=,'@h62[bF^@#nJ] gvÕAoFNL 1Yۍ'JJOؗ,F6VpO@"%ҽ?V @?8LA]QA~?_5شh| w|F"* 3yx-F䆦6#0XIL*Üқhrs05%^弟 GOA7;(O)t B@I T@7"BŐLPCp}\*n(BN"/ DA,EDJLDQHL5D-2S%ETDT1DAUE1% 4 Q,BJ@ %)!UA;@) \@dC˂Z '9C* C!PR*YĻ UXR "Ef@J F$T(JT&J)*QD!E~QP@4R4!Cj*V&JV"DbQq$E\K%BI$P ąH 0@1*@BIrd hABLS$L!EiAP)AvP6kC/wd6.DLoCa{ٳI˜q EnVr(u~nS]VO3īX Hr>ggQ }1-#ai@2⫣:4&DK&kpqT/.:gN (cEw&U]Npż:;KGԏURq|n_4~qnsX?M%)Q Q$72hs~zZ!JdCνCٺǖ]ԯy \TTóWfAZ:_s/7&&塓Cn:|^K_q6\{‚[}s.7SDpnPg2poq{e̵÷6܀܋: )SƇkPLB9P aB_KD4o*~`n] 9\9[^ !ڈ+CIFdYi6Z3|i4I~}une`s<y2+<3r-WGcU[>U7f#W*`oM۽MS&bpcϒgno+bq?勧 <.OwɿynRC: NdC콝efgxxd+ ѱ3Qviפ S%PjYߡSu M{Oe, ȗΖkgrpu+רxY|$?B(ӶĄ Ɩpt3^;K#bCt+KR7= 1FleOoGowx 1Hm"f*'Goob ?0|,|h=XX$\32 2>s"p.WR\URQ$ go| Z'=`! ĖnFd8B2iP& G%P.N']vC8(4C{,x7G* ͧ)h1nNb.KɜyzVcJʙB I:l?8Qo6Qh<ENVgNEϱÑߺ?^2IĨ'(yawT)q/%vKE=x.޿b˻1c{FhsZw:5_0_Kd4Ut7B7wb wYJa rFE^3p9D,3ۑu ׮8qؾ:k>ckOI No֝MR] =gIe$U_{*jɷbNW|zqșoDEB ۾)dK>>ƌ'!M&{֞UjglI5 H (+tẚ'PPwAvG>ں]Z99w|T;e/f9\Gc>c?9伙J T#UUdMSy3]>9|O/O>T>3?j?GP uj0U%FF8a;?8>DXq>gԏ?uKUS!*G&t:ck:_wyi]?lfEMTC>u3?ǯK>>~?9s|Wو*fg̭bɪC#'8$H>RkVt| \mtV9}|<tvLĕe+q{dg_TA,&L_7 qmecG/֕d iv dyUQ\*ث*ޔ_B+~9y `#\U V82._hn¸h~:.͒/:3uKXyEDES܋ M>n`;6v#˛`_ 7ON;ڝ/ @l~V}Nw.T*VxՄMVJsH㛞˭m&kuW[>(ٯYzhgؐVΊpOY;NҢZw`@hOĸڻH{$voxTA61<什ۅA8!Xyq-A]N4FLI TOd4T>rz|AЎ37O;fZZ 0(њb@{~!ؕ-ƂiUIq_pPQr[Gm^`~c7G:4ls iDXXoB;)Ilܵ ?$B'x_κtE F7MF* R͢5[]'ܞ=-vhh8l=#b&?;e,_B^dNi66U f P Ր2> "\M[O4 S\l[[(u(?=KI3'x7nixMR{Nth0z0{إDΜQRED!{'qN A ؆fp{ #&J :K&r(`%qZ 7 ds(zGIDwe>sZIAt[t;¾iiXq\&6Npidofsua<1vu P^/]~/)a]AO).)mGO0(<%nO_{oNOϲ`?<^V!S>+<$H?|OoZܷ38 K' kBM)% ۃ'vը߯KjO.Ȧ3`fH2cz(ϰ<_Ztp*|oG{MOUrRú` WvLF9Ja#U96õE˃cfP`'.1"c 9Ag@ü1'U]A]-Cf]!MpZyM̢jT@~C IA u 'wE=r}^GC3~#5 IDu/(B t<:Ʊ A+rzY\3DBQ($!T Ȃ(_:o'D$fzsQ-%KD8€> ݿ϶o l;Z>kqaCRBl;_yQm!Ԋd@N645d3:q.9ɕ5OޯE@0>$&%i pI혤=)]i"9qF,yWTɯ_U8vSIˎ|(BmǶ.FԹ}8lr9fDawIhEbݢBZ[rRu29$_UaIY\%XR1zMn]9< x[.ԗ}Y=ߴA)=ٚ16rĪ%U5 k,cb_WysWO~l.sՂ[j)p$;}UۆZ;WG>+#cT2^7kuu|K{xL$I uat+فD޼. oFNY ]6U[lԒFG,8++'.'j.-i|ԧ \wIǴ/fkilT*6=OLl Z84'" H jD u 8mpF$Lʁ3DT/*kC~zNZthqdӵARooHj%I(M(UDɏꥑ(Ь?Q[qԅ5_4lĹxk3k~uJKVF)TjO?ld#v{uvJ?OjwQI%x uMߟH׊hܩ3ge=-\@B ̏E=Fqez$ WFJbA-I526ɯR]m*z"_iPZ)T0 d)Ep  !4DDD&#h %t0 HzoC3J [C}-ՐTqL3@CC89vzI߱..nKBQ٭k{ ʪu7o{blS{p]s(aPa(~6u ;/@yOc`Wtp9z2l~[YOă9ß9Ë={G8PBY&wsi:xuL7}Q\%F6a9=aȌ i+I{TMH@$E?&@|gI*+8!Kjfm^>fѓ6` *ٰ9Q8{t{KDb^`w"gB hH<9$+#(_57Ep~ W{i폟%f,?QG0vR5yf͵S|;7%~2>Sx"|Y:asQ5!{'ARAUSDއCA "p5~X;.%.r:=tAD‹AdvAVy|Uy؃x<^R:dOphA o"r ZD$X<}2V(aw  ֵTLbRc2jjM& "dQTbb(ؙC2"ұE llPei2lUeDҡy4 A'҃ 8uĈ[A x`|ǩy ;dL^WjN:# D"lDWT;E215"c5T2 ii x0ހ܃Q[AUvyNz`Ap{pd:US|v2^Շ{@(X13Ճx uAhuAur ;9ȃh9ă&p ACx2tsJ؃ & p<' 6AQ|UG{&%:&`x(sЕl 1 05"hL̠Uh %P [7ޫHckVjՈeYC3ԉb &!eG٢c Y64Xc N#P4i5Ub' ĉx; OOgp&q6 EE{Hu(z3=BD7 Hn~| | {gDA΃t H> .2 AA 218{sA9H>|["w=?4 Q]ޱwI;=ͤN[AnA9; |ܮټkϘacakPPu q"kPd Q:`:eIw3Q͌eESdb~dYMEөk d .e2+3i[UY6jLSS*֫NՒ4K2ҥYebۄ86,,ϗhֲ ˓4dd̑̓(k3VfUd@64iU+QA VSjU\iJpcLFjRʮ3ɭfekY 략٦esf6bi1 k=jc[4LɚiV "o_`Jh&0HPjf]ة ff:$N l9z *mYdkY5k8e%;E3VY $U%$R@Gd Q.f$[ ) PUvJJKT:`tCf}ٌiz\pnkO睝.Olv< mݳ_fhk4quǻ|tcc֝qu]ynWn]pݎZkNn;<;kg-uܜyINg=q> DQ{S@¬ڊ]oh0jfb+',*`2Aݨ2%_4lkL3Lf3< v]vٶlmmg2ff>G]mR~œLɁ7*ki-̶٫XMTIʃ[uvvtpkNױ~2dxʭP`֡ݶ'pEu9=JeDxM C(vPއP=Jb^HN9w-f/[uʏ.qwGPCuEq%~]~zw~sz\ ~_..W'4c15-E-9Eh E.~My)ʯC!:0墶$Mj5jO#Yƨ}ZG]>'K2PfoP C; ^՝Z}LRSX ߐM#uxݿM}WH {/ P]>m|?|wN󽝯Oʠ]6}YqOU*9:ϣQڎ Mt9%Ukx]'Q[TC؟YQ]O /$dHP:'Ce[2TL1!(>Yiz izT%&2I>W'p֝L|O5|MQ~c˘˕KKf`M,H,UzJzzъt7'dN*'Ci(4LMLO^M))IZLJKUY"h q"~7o#vپr?},d.9hޠTqꌓvEuSo:kd]nçV/t^f)Ê>}!ifc&~Q8\YiC&2ݼkrvpy27ZCyrJ꡺TmҜ]ΆuD푺zSƊ\ȞH LTW&vHŕTLMԋX9]O#cW8?1!r坠иW -w^ >Li=Grx .P_އC6gNګVUhMxmf[4nê1EŧIo01rNv?J. 8:H; ~U^d3޺pm.&ݍi ȳ_"- 2)%L Șʪ7cv*.wiZ]NaQiT=;ZPd L 2 8x"yzHvPfiͶMÇ31Ua Ѿl-Wǔo=q"AfPYUkf'Z#*;-^%[zl ;![2Jk=wy^tz"j8nYƍZU_"/VҐG͹:ԡynz\SrNoV, XU`e;`pZ3X(ߔP*NUM cYɗozU?GV=du-pCNLSؘ3wUzY B=Du ^OxTz[U:7e':1IJؿEP!K ̡T2-b60̏gR&D55PTZQ1jCߡXc.MZ1歺.C;iJ$Fa=TU&Ywi眨m oQSZwE oS_;#~#5*fL X-#ċnw3V&ӡ']3j+U5T쮾jiT$6Q4#r_iCzC"n *1&ƙ \U*:&VD6<ւ"6E!X`;<㊽zg{wxX&9ECʯw gS tJ|EwΞ*uF8*6=9)|Z<-&fod13wM:1ui=fv%UC=^/=7Y,`SFvˡ_Fdu:\uqӿtt5WJKUæp5SQY=1.vDQҏU"w9ޝϺtzdC̃ sH0` 0` 0` 0` *R[TEic1aײJh8.ݪ/yfy2G}78{knKW UJ"?;!wI_gQǢO[1u61pAƺ5AaAd5"d͐oUƷ66w|&~W;tL4F͹rˋ6Nu9l!8\66*vs#.q9oN6P!eC+49\ɓ6Doq^(ޡc,Mdb@ij֌nH8ǝ!әO;.T~)Ewm<{Nt7dm:{P۟˞4~Y1t~Cxq.{PT>O~rA~ 'UYU:P qqpTy#IMʪ?%D/~2e r'J.A?.AQ:SAΫd`2.' i' 3HU3#jdbLQpܖX:# wJ0"Uq0UmX{cM&13eLU$ni0bjMAeI&Hbb ZZjRF2FXAd2 "%0NOTQMiLd[+*Rf)K|Ui1Ha,I,ʊdAdA0$*ni2-&&*`\J X`YeAHV023!VbEH (rxJH AAȃBP~IP}_} >ϺW''=|?]CeZ*ńUomۜ~_Ÿ};K_e ls%?Zd86vG<ȯddyvt֖Н.t<?;x_ w$OOO󛝷h~~}D~o}۟| 7zi杮zbv?g㸄9?A5LqN4OI4ᇤ?m;v׺ Ͷ롐_%̿Auf-2{ Sjaz`qOZݑ?"} ȟݑ;$N06 AdAd96 :dN;֑?]d 4Pb PrTVPTp L7iu_n'2 ~?7w'T=_7%=OOOiҟe0KX+O'Ox^WmYA?c쿮nb_M?Po|{ϲ=՞wlէ\ۿՌS~pO?ž@l}/_U}Ɲ6LlM=6t]8Q1=ӵ9ѳ׍g/}ߙb->biWa4~wSt黎O14}9& Q( #(rIjaæ[¦nYj@7bb WWWr}m.%_%U r'~2DA?w1OH4r -|t\uJ*i]OB Cpe;hEQqCX!5Uh^҃ Tg뜃 `l3 ?}0A "|2Dw.up'brnA4ADJ LdMA ǬbDÅĉۿwO"5SDwSsޓu'Q$(Bٓ3!WAAQ\`{@n _% :2EwB q+pkoߎoᏠ˩>nSƝ.mXODv秆lOi7ZuagBu6xj}tGԜnLrcó8 ykA5fk^Bp-ZtZ]dֽ;iAg<}92?Og^j o˩ |/5EZFPZހA:"% n}KN}?Z_a:i/fk&`@ֵyyk[,9ssdI$IK֤I$I$I$Ilj$cIE? ̋ ccs\JQ'R3 }zR+!CQ1c#$Ç&p9`ͱ& %H.뮻 EzrD纡j$Ő ˼K YjV:VK[W!lwէ *┺*< pUNX _zy=U}_}ZJxtvQF\bP'`pu70) LMM 0Fi0`ShF 64 m=FѩH4zA" i@M 5Ё=A   T4dhF@b`4hFCF4zh &Fhhh4dɄh=LFS L 4Bii=eO'O#zi6D(ާڞH4ihQ@&! & -OeSSOOQ=zLz4=F=@ RD MCI## 54aOMj&i2e?B4i%6M~IzAhd4zn?C01b@B%S X0c ecmT Jx.9If77ٌɁl%d P'+_- Nz,bu4-vG L0`Z%pcKU55V0`a 1&ٱ&0`h( p٬1cfߘҷc&c1?10bc 00`L24, ~GGLz&0YE _5 qiaElm6 Nnb>!$ 1G .:10B10c0r`Cq40-+VF(1Ռߵv/ɀa0Ml6,RzѤ)-Ns,dgm,ڍ-;-#dm1EQ>@w9\,hUޫZLLA]m!!@`"XN޵Mơun`92MzS:*p_Is ,&b>.FQlq$21G#;G ;y]b]QERz|dZ#aX0d [x|7ԶK汜0N38 MJ )idQ (H[:(A ZZdA@l ))vURi@ ʉSmeJvǢ@PR*H6hMh UeQfTfzܜ}G\ZQsU-.RPi<PP@DID$! D"1%#, H B !Zʓ+4l.TiUd %U&I %X^, L +,D$!@ġ! #LQI2JIL-D L D , DJC0JR@PLJ   Щ~N B D=aA)@UWS*" O @AڬmJi0R*?>'PPP)$"LHl *H ;8(D> JRB$H RO(A' _bGzs|N`c9eXts{-+veSW-"z@<$^H9#W~P@T]JkZ6Ts 5G3cPo,@:3۴-vT/(3Dc04`Ɇ%Xc a)bR`a niYI#Ą@`S){ #Mł`z4濪ӡ7lӃeŌY*Ʉ0a17~D<`_4B' A i "Zn`1/ DA$DDv9Du8`bV#X 1 @@0$BBBD$$D$,$D,@D  0b` LL 0`0`hX0` 0```(t*mGyI6N`|BI3t$J$fV 6n[qjTh!Bګkn3g0[F,JQ@cNcGAp6{'gpm;§Hd8N d (HP`1s\Dza'x2XjjaC|ހ9#dMcjp6߉ Ѫ<G3&1iz&9 =M(&{? .2HŒTSbVz8fZrڱ0=!_Q0W&¤q޵*[e<`;`޳塢G2QpSr/sJdLZ<%tWA,<mxrnhx3c XQ4]q't*|0#^9 Z_qU=FvCo-ӏxZսKd>=8Xۻ6]6h(:YX+T]suԻ2T"w܏nkMtW1,xp2a Ƒ i70lC{v;a^(${Ljxm klQGT z6Nkվ2-Kd<A<ҏV9ُD|G> <rLv\c^ǘ; a:񁇿RtqGE|kl)n[zg g:kt9>XmcXdq063b^|6+ʇ4l.Gv?,h|qOzCXg 7o[[plߜ|;Atwt'.Fc|%Qq a "H*  `"&B RQVVTrER0ŕTo*)HIA̅z`LKN Ra8+ ̥&UXTq"*xNʤ͒Hj |c%J(jecqѨasgR&;p:Фsb f Hh!uz6gy0H$L6$]m|ha% g FtfIB37~ZΑ&Sxçc0%\e% gҟ Kx-rށ ٌgCW!h#ڤ=i6l+hhd=$2գGrCxtɔ`={(5Rׯ05GcQye :]C Ѣ3 -I4:X]ۺt% Q*nxJCD3!ϥKt9 b$f3% ]^VK/0tuf-[@K΀j~0DhftV9fܼ'(3uox.Eսx#ލR=IVU|3l[a0c6cGs0;lj2򛥢׭֨c3em7/cOW@x1n6L PT!*zc2NgE=$N%ezI01;AT (`8 AFPG0bPc  G0L `Xv2Elh'`P)LFcT1cÖF!8 H#S9jk_NԆqPl3is20{SOޣx/)iiiwV틫8tȬ}q菤?pQ/::@pQS8O탿z~coVJ#U*':CMDr /aƺ M:r(Q/d,nvӴ/|<'@ff ӾvsGAb4iaŇ3폦|*wjsN ±\,v:"hP[-Z+Q]xZKCQ3XՖ1~!phpa~H*1G1,(_h P6DIb*ըN̤5) y5jnѼkD煰_#@c:ǒ^PaG@khO(T*bJg3CP=J0oּhT.PJwS|+9@<'PySToGJXqPokpg޵ЏZEGkV7M<mqa3}!ގ0u^5t~4ݡ7.;#DBĒ=$XQ2\*5(nW{*6>fqؾ=ʶb6Ε9 EҥOC(0]=@bnHm [$T挂]M,,QxTh6Ň1wѺ9S5dĮ/vT8zVlI⨋ZPd0 ơX"Tp^g8*I>1bg Q YLgFP׮2M>٫|D/BѷnRPD3Y!RPPeV3()O'RtK5M"j)2ܣ(7.[F>RzIc/4h7.[z>K|->+{弇}XT>y.eߵOA玨j =5C~A8z#q{DJGV8çV.Q9司L9):b&'drT>;hn|2>H2%T GP;`4P.jMv5ޱxj4Q0fkٮ&pXn<g6(פQQfbf6XO~ \*h tx4Zt{7yᾣ E]NMdUɴs^ϗWWJͪ<=zQ((Ω܅KJhz'tңk7űEkf;6:L-0%k[7-my]VP!:Hю-9di63p4]K" a1QRsëHPǻfb1b(QeEDoFpS1Wl-Ңlggh" Nb*-$ҪXd#lh3k$VTr?tzbzF֪~ ul*~ l!u碱ʇ}G16tZ{c|/'iUHݰ ح$tj/ hY &+qTJq^c+8!C*J"F9 T葐2Xگ<bE`R1(Ԝtj(3njXN!,a eCV閉(k1dMkF&6"qVR#`z"9L6-#|&1t[{UV&hR=Ef@Q%V ]b;Fp9"Jب_^E1-1Am+ c`!n-cQ-l/PqjƐX@Q }Th 4B/td +8 P/ Wpf  l}Pi%䌦RضG(p(/_ $Tl='Ԗ Fcx45P7#eF6 ax\ c% K(tHz ‹(.ȨuCHt}09 2 1 q.cx-KCWgo>Ⱥ3^(?]u,m`6 1ZvC>˘4Tnٍ`A3PZ#/"ZXB b$'~p쥷΅·|38ܜV Iz*>@QzO-ˢ6)bBsR1EP/ժpR'.X-m glѦYIBmcerck6hh -I+݇fUųR6\̓L_qME-(*ZT+Au϶jҸ8a7cM 4?bEukO1_jt*<1mQSPڱRC pqi[ ОgAڢ  &T EO4Qڮ b|W~XMACM7 [ LWlGXuù ||sZ*PP}1ЌX{EA`ZG[x<1P HPKbaJ-((kG/ PP>8G pHfaX0SӘڔȪ|Ռ&p)& K"2L)2,X J`X LL001,idEhIP!EBb2$>|4 ̲vN1OaY d 뛘%bi9\U;% !!YX @R{b ?i(> RڞmoAn]qD.ź+\>{Epž,(G#$vrbAL{^A0^`T k=Uwq\LuZC20LDLLG `@9Pdq4=PȒCR6[esln_4K]Dzmdgml|=B7hC{Rh  EP-fܨ$CŽTqV_ԐFRk/|τ|#YW-cDZŏ<Hx}!D\XzBB{i)@d1dyƬq 3 24 aUh/#,K!Qʒ *ddq%R?t`A(=`cVR+XGtl}"eTĮ(m/5GGD05ot|+R: n$+Mm,cކ=tq;g_(gk5ZevA D@0v q@]2/a#[@IiH JsƓ̻c5ié?6pˤ`̃@L V,] SGڀmmLg@$$c c[.e 5g3nm3ۖ˛bTlD3dF I"* @ \~HXA5ރ ;֠34G9唏bb"D9CTdc;E,pg.0B!ඪ`\hwbLJrHlpQSK&CXL: RP (NIIyXC4 QB?O(à^$1cfh 7[8'w& hTK0=5=O̥mT/%U%(F@(e%&dSbF(bQ*0emY"2,5%0SLUa)VZjT2)o000S`JMlF̀4*2V&X)T] LaP&02#abUlI!j0e`ia-` iZ 1ĀSJ¨UjT0#};5|^pەX\nac6V^Mnt0tΞ=޽Pkܴt~ڂ1ThAŅXBE 5SCMEQoQJQ]RCG# T&&CepJmmV p2NZD]@hDIHS%%e"MոЫu,&UgeWgM=~sZol:4=A:G5-Sn8`S̓gN|mFys9zmÓ)I \Af!k0LApdƄ A =kiS @hЩaT]34ügdw6L-'@ FV4at;du#j^YAͮm۳gg{Ht]6Geik)7nգF*𿦑+nb?4m!v(ȈdH[eg0/<ҳɋ[0Ѓ V尾 fŴQ@!3%(tH&wA˪m.%Ŋ_f:Fg<Ҧ6wU=Q[Gn/> 4!cp翰\t7zNy4.ܣ>5Z6;vkˋvc .I}31>1`X(=vaҲǙ~lv&1Ix}n}2dƛ7s]A8'%4%/!fڡ39n.H^aKUٳ]#w l2Y|;ChP#;vZξߺ' ~Fhh8#s P@a CB*[C2:w^Y'Ee;YADqΦ"|mo Y1ﷅ b'3ĠnRƪ"qG{Iahv>C/c|8ý#miuL5A}U!/<ᙋF ͅ("w840͓'8LI8!ŴT#&QyrׯѣJ)I_EU{f'iR- lMDnIwާ.\UʝX6Չ6eE`bd1$\HqV9:N JF3ZZJER+MU41h&Eb I,*0izpg(tdA=AB'D0inTF(N3%J [5MC( t)S mal҆L1F)(*TDPk#Rn2ci4FT epN$U3r)dT).V* pSKA& `aF +P, T8M*¥"pXH\&IbL$X4 Wᘙ٢acfaH H 868pb BYf+',!w7s},# Br;~zIvPdNCJ:P pJ`Y 1"{7r@T{݅.RWAQ+cܧ! ,aRʘT2,,*LVLa2J"C%YI3!TD "BBB+ Y+)Q儁"jUeeRKCae@abPŁL,YI0YK)EH(1&K%"ʬVK#c"2+D# V  !B( ( )B%"- PR MH5Y142U@Șcf"\S 1Ub!beV(a0YJV "`dI ,U10Op)j fj) *dž HV0c` 8(JQ1,-Ņ\b@JH0"A K1eXa#!Xŋ*2dʬHD%(I (! X%Pd H$*˜HiR$Ye`IAbbiAXA" d%hX"" Db0QXQbFP! CL0ȐA "URQB*.K !*!ii SJc`D(LLH,)3A0VXPTܱFYT2Lt9D!s0 4-!Kc*ȘXP>a0hUqª>\X*hJV"`2H$RFb &B$dH $"J2̲E# f [ )ъ̲>QW j/Z U_ 5o81~+ ndEMATDP8JB@Шƭ!)&"d$0Ȍ1Lf2 aDR?20R 0!@PIHBrDG0B*^V%11VJ1R3(K(2 &I),0Ae#BCEK&Jؒ#,LYQ12& a0ċ 4 Ę`b002)2D0bLXd`V$ʤ C*K Q L#R1 Q`b"L,aR`ȬQ * 1T0brĥ0a$H-%Af# Jc %T$F*E, SR,JV"2)Y"`e+!S`0Y%ʘE`YJIX"`łC+ ```+ `XV X,Ad+$0QB bb `Ɩ+V Jɬ { Q2(p@RTaIH @IH)EY`&D BVY$AVaKyʿNB0L*e*#2&EYVd `Pf&)+ c"3"K0B§:; AE&PCL L,C KJT݈s)2|!,e+4-2$2L00Æ0̘㳤] EXbj;:z)ԥyq:t\Mл1aIqӠ_ˣ`KRّ.7>Qe^bדwgDۂZjJ:eR!e VpaeBI`` ,7b5I9 bڣ,FX:$SRej%K$^(~En+tƯfS!T\W(&o;iZp1w8)-Yj?FG~7G(#9~KucW^d;1KM<<<;vl炼ybe2N7%[ڮ=g-vNa'+$ )WWk[yRd1jo9w;LXsMzz iZEWpݻCC͎N\?n2W{:6x xOLpNmt-ɦRv]\+%+l}NCtxϴ\xZg:|u^-}OMj^tKzdb`c|+ LJX:^gCv JUz; !{'f0ԦԚL JǞH!B{{&\9S'Ļ+|' lcwizLs@z;rtyY;6xN54/ݼݹnXXXXY99;N;œ6oWs;:/'NZiս9upËu{-\\11ntOgssCl+_+i^,J/|?a<ʮi_0g i綧[槜aSmM=~NrEYEKK"]k `3AxRfuصRa@3Z+x8,Ֆ[\%7w觶;vӹ &T7wȾG>v=.P,)YA0B<[\qeoT)#ٲ7S`Պ/b_<+ƞ /د~&d}xCgCNC-mݻO Szno:7rci&6ٍ1M۷۹9U_юnnob<^NɊٻnjֻs1-M6~ݽܵex+.knZ9N'6)ىcbVT_0"˪l\f2~1twx}z[?cl8{؂RXSggٌV%a 2cNiv> c~x: sӓ>F=&=LҶU{f+&=grly:+Fh,1 ::wsc''{ã1]nT{ڤpᲾf<=,6Sw1ȧ)w4rtnݎLrci6ήnnn]l6s9Nsǃ-vustrl֞1X2`bYGI7dd2iٱ998w4i֘+KW|sWG7{kklOV_W'd.o6girճ92tK什Ѧ$X]>iJ$1^/^=@͒6"e$!4 ('-;= JˌΤ{_*R<"#*m4bUҍO*<.&LUw|P7,wDŠf030323 fT$$G!U*ąf0< \ZN|--Z?sʇMnSSW79cp~WpFV[G2j1\badԡ6(ѶN];}}{^{m-rrY1;dc6jUnkF) 3|im4:j{gZxAnW 1gYmYɳ(KxVna/:LD`1֟Xl1 kÙ>`Ѣ'5yA6 keNּ#yP0="q8Crf`fIHY8Zw k$nzݵ=VK Ud[wWٌyyJE2J\r[(L]wK)OM74 5} US\|(߱#31O|`zg׽K}E~ Չc&"²ʼng{yvQ]~|_}5=( v=cp$D[Gb|Э%C&f5f*d >FA./+9c(j3LѸ6/`OMsZXrתּi0vT48{GU} N7 U9^Wޓݓ ##R*S蟰oR.bnbWςz BxQ؛-7Hu>}ֻŔ]ny;|Oȉ&`%8<b$s' td#ԝcc.p%<'CU+eYe4>x[LbUE$eIꞶyL/ùyq$ty>_9ζDB䏘ޯm=z^- _5MAKq"Teu4z ( C~LJ![߷;f*ucb1.@|eUhiRq~]zr|Ww{@>>zOi6ۧB >g?H&imY+W4Y0yyYAь^;7VՑ#t)s=WyF:S^KW@|}AQDmcdE UfgntKJmӾ32b1|A/1)~{` U;DCB]]JkX%R*#oyV,c(g+3ƾ[go_|X3CɬvʼXQa}kؗȧz0RCrcN:7rq8c11ժjdӛ2cM1fMS&Lu:N99;PմɊӳelٳim7SnW'941wL9ɼݦM96;:Vn)1ssuuw7ctw;UjbZ:9::4ݧ'iC 3p8f!fg0RP32C 2$C)2=*+33+!uԔRDOȐ?R^3 WdR[+HmI7oƃ֥^#ȞőZZFUebbϚq:ՎlCn* gΒWGf˱/'Kkq<::>,cMWu.T錣0K\K8~Mެ,eGGpMb8pÆsbZY'G F웷nڕlٳhzi~hc5+:jdS1fi8n^^r|G2)@b`0Ԧ\OEu w GEM›ÊunGq025ҫtkojՉ; .>EVNĸu6et&x+1vp7V Θ ꫭ>ۯ>ii_BxC9gx9=0g93ǧ=39@L L̕,}fN؅ ++rlV{5ܭ1VYM@ݦ:1ihp箘>I PQ(Ҍgī0Nɇ8̎|*.诐B ףDOxhڹX hKcuLX.ABzMm^LVVUYb֧xi:S_lQD=C34eg?=feAXf3/}KgC؝1}XѡLQ%*;K)z31ٲfRޯ Y^G~[eiݼ|{Y2)>~=Rxn9.;onmiXXATS9~J&_~窮R|ϭ}PO~ȇNFlQ/׍k+5tgf6T-&* iĊ DsLA0 H,j"Lkub-G^'\ofxC7'6wqN-;ڛ51Z7jmjnUE]B?bueHе>Rlgw}D <)BḶ0!"L°(;$&<a~4i-!oJU\^ xŻU C3>3>݃1X1_R\w8b =qħ_e6{g_hFobL !3vx; 8 lт`8Gҭ d$RB򪾖߳xx5eXA٨lVFʿs?rϔ<D3gm> @ ITmb(0 !RbjհlTmp/ k. ѡܜ?;ۼj\K|Ks\x ٶ&L0>_o2bD=Ę0VrW!yGV!?]w9ąsl:3׳v,a,JMz9Ĺ: o)WmddbXu] od],jS7EE2E>rr{23pfO9[^Gؕ[O+^xG\ǏGA>(c5D&~ʲy\@MEA Hn'N (=vÊTıq\)afafafa>@CffCs fg2{*x[XQ|\p;_ $.э8̇R8:@;x@gl0 ff NV2b8r(Vq`Wr*%bX2QD6j\s[-7_N7|)슌0$0Y(BBhl:bǝF(Eb' cb]N*I@Z .ߥ`Gy{^eiv$1Q2>X'ZəUz0.Aqw8ڢg޺CG\%^u)\פ鞛Ngs̥U.^KgyʫH@wRj/0mj!nH_L3+|4jaؠ`Gchnz)jF7Eͻ]˄j4/JG}z+2V2To>mJUFqC0iI/AS>wve$ no kcGx=a9gLNwb`1>11qnvXT uh5؆e Bش\ &&=MYÂxةOd8-ioK[&g9exhYdkqVN2u5JYT8rUA% R Z#Dt\q\[Yhbiu4˚+yɤa\k̞ nwVCëVkf) D B,2|2鋷vnm]J.+BcHZlس,@[cFi5oT EP4@PY,Im]K n0 @G29.IG%p4WBjإ/KŁ^6`o"E.?+z~aYzIx>+_apWe,ªZ)́¡H0YjY8mb-t65~{Ū(cB DE1$p|@bkaGV؃-ߞFӳcf mj}~/k-4KVGl\ۭXI:LMVI~'z~A EQn)/e}N?SCNwLU2]p;V.˳F{{ F|_su6˼mmtrwo >JVN%k٘ڒ9G&]H3$ fFU^mOoy.7gҞ|9b( -0EDMzLBexZ0$ HU(-S(2EQEUIMSCLDT@^*+ũr۞g5"q{k\ᐳ*)ް)dIxoolb˧މGMO,U.|3й]ei%X0hH`|@BOfE1YS yPQ{_*=ɏr23O#`NY;'{N/>An'{3zwaK%1_G.sK! T. d$>! I%33'x Z-Uk=#}jT.=a}]||T(mmRW^ݏ O ܻJeW?DeXl$!\fam1:X_~<㒽ؖ -,Tl|˵vj]Ɍc)t˫Vب γ%fh`i4b֍\6Ժٗŏx~&BBg0fLLc٥lW|%>L'qOp.vx:dҧ:>qIg=Hd"hН]Z{gy``}sL:Q|ѨcY0f~ݦ~yrStMe3&bqQ¬/KinrZU;>M8L]^]joa^x(j]jZ?\F\,,\-G #i4_vڨ}kwD*_;o[p- ُ!ۏ~ aQq\`W?%_:hk1 +05jgD\*"C)h"x%'w{z_,YdQ\J<{^<3*%@$'TQTQ"{Jw|{ sW ҖHp._!fqzoU*pJG0*)=)4̂.TW$ |~ykK_>X1FYFY(Hܗϓ3^ёygvv.7*rj*%pX}kZ;WwA!e7/ҲIܽ~f 26ʡz_Vב+qTT&XUoj>tXE:ٱh~hڥ O*lU/_qյIFEiї&-mVI1EʔStDU^^Y]+,(*T9vz_UI١xax=u{' ]Fl١x_3( suN6-8+Cxi;h=8{Ʒǣ®U00c1)VUBaf"V*ef &"*i eg.:hv%V,J}'IAzl721QӒWVF@be2$I@yXH6-bW3 HCEԆԖ]j 뚓A?E;H\y.)izt/Ӝʼ'Go0,R/k7Өz/Ee3L&2zyPci4'%">pVH,͢*+:*pǗR{ =u,U2pC{霮} ?Jljk pjo]uۏ7Yuf1*DZr\:۲OEtma(-?ffff3Q9#ʪqv3k>Z,U-H:cm5+t>'si5R1XQڜ^?q3!2߁IHe‰\R6i"EA{&rVĤxm1X^KȾ:*;&E_ۧW7Y֚ljizdh_1+,),GR B\2xKBO<8Rh8JtMrSGCW'Tɓ*VK&0dNdRd&Yb-틡z]xdmxz7ZC⿊q\n\.v7Ys;F;t^;,ja'CfRI3hvq2! ć_z: ܥ㺼vbFX^vW\,/ư%y.zNQOPh;O5/hyv#"x х!ES/6y CRW͌%{Xz}1oF[OO0a^>%4t'{*8sY&Np$,>% hM^k`;8+H^Wֶ$2b;[~M^GHbaY<Ǟ.!vgk˻cdJ6 XSPx0֗E[nJ!J( 2+!dUjPD dLT*H9+O)rx^+M=mcLVɌcy}j].:~D?(&Xf^uUVAnOBIb:~ph?m^o[ Qu݂N~\Gi{HGFn ָV[NdʹjŌU?KP&dAx?AH)B(zӈ$lIGZf9rd9dC&Y罨-uJ^yOR){TvUX)smmWerQt2][eP3I;mhҬY..stb:WŌ\r̄W(J>| :IU++s6Dר]6ѝyC/,V$c1Z8X alLKjrj)2bG ۈ{$L )0 30@D2M锘dJE hy]:5"敻Fw5_ăұfjOzKAX *3 yV{E%n Kۂrž7KzY> ~}~K_^ c׎ZZ2U/B5a4U=8d\"c,I˥8d=<$\@9grjC|jmrm6w%xv-1+Ueu{m0{\v'C ܔ.Ne{žHeF4a^_+˹y"'q2ihh4ZZ-- KCHÀpxɴ Og}w^:{K}OSTA¸˘({iqO}z)1{u]?fRQ:p^>+W:>hޓsQ1M6l|/i] ]L6ƚ6#89pۛՄ2 @r`d! _Sv&n(EL-$8F,Xe7=Młp5jN6֭@fC+ 8epi.14);g䤰2 0*aeas ( RYIR|VX+Rb-17VS4ݴ5Tl ٓfnqT$*j{ >9%%hK2)~Cuغ.Owu]|{nKD̈/t`O%(, 76skoJ빗iwʫmW\EbJS d.\NyE)ĈQvnV5[+%(馱KU5mS@ 'N_+}D5Yb>1rɝҼK60d²ePڅX6kIɊ;ZTWawWz"|7XXʖqD'Zw1+b|7ee,d1&1^u6Z[ہe(B&ח8aI Heî|6;-dbֵ/J59I'%ߗ)ֱ#G8M2 9R&8krHќCoB먓\ێ\+`1Ks xqC&H5l}AUY,;Ct+tvLIEnPM4(P;nsӖkUZ) i:x4޸31Us3'\xMZacv̞ ѵ2XƧ8bf[ݍsqq#v̶fNLYk5n$ 髨XMJ(!sle\1y9EFԶpJh~l*LMkT5]hفl㹾['wUUtrˠvՊ`wHӸ bbMQ`IVvlQIA 6Tڔ#(K9 BA5C D% ,o:xٟq5C%ѕF ;Ez4E(ŗbkV3<9fzxz`zpᏍժ*xn'jW9[1JOٳ 30ٙN{L2C6i1Y12c=Hmhe Yލ>v@tc>Jou&xU]wy±|ݧL u3S'efOTz ٷҁ d#<$\\ᣏǥ)e, Kgb{ʖ1KٱV,,(HX<g'330gɟ&Bd!3 LJ@tVNbى`Y7̾o7StRcIJ1ȕSX^ ,`Z*ˋ[tUsܽ"Vew=$КLM,333!3)9Xfdũ5)]4xJJH12ѪbAMbCX)jlc`|隊X!XY̦e:} F&2b , T1XÀM([VԻj C†N lȓ33 ?>xȓ!2 32Ȭ\% heɩ W)GkJu!1Vfg5ɺܜW͸;6zk;9l]w(^۵!o$Oi)b\;VB ݦ2m1ٍ&;7r\d/@\ ?> WRtnBܕT^zy')Ǟq%Zc|Spw۬5/,0֩=\kk3, ah28nsfp4ekfHi ǀj(OI Uf6P9kZi³,E$!7צk )gI̠0IR&mB轪Y6:zj3q,gG;\]dqSMJʘN-b4VE١h[dƪqmNpvZ.Errj[6Kum8j撬djNSUrKNʷg6y͔<.LS\I4๜ۇ2SNKV}+- w)FXtYS+vN9Ãh W&pCYe+dl"xWe;,CS,1AXIMiSIћY[$8ބ#8HF+2I1G=;E%Xbζ-&); (6ű˟+uW2  .lB&zankr|v: az2[a 6jMز2;ԫމ^iK' W)fHӑO/¡x);qB(2TRUz?0b~*W{],i{o"TlC.kO&F<'E•zx. .*6Q1 660uM;C g<8HBx.x(o&TZ=e6eV#f!$u1 Ûvc effFiKGqKT 5cC ݹY&f;*>?~]~./Qߩڈh2$l)yg0g5)JE1fCԪvZʇ[\_?KJ‡/ȿy~]tɓT/{Зzi1mb42b1PĺZYC쿉ۅ|&?^} 33333!̆`C3ffgУ)[dc&0l_Vy~uji?iK!Vݍ8w2~Ja{3?[TTsne}e;UƱ$2ZUuzOĒ] og4v;iyyVLtA!~HcUH[xEw^ϑ[ߠ|+Sͦ2K%wwyQv*!Y kMUmkL#6dmF+VJYc UBaPza=Ih|fxkӹ/l,&8t\JU4 VZ)ɗ Y?6d]>ďvAGcPp_Iu\W{y<4 iҍ*l8ib9~~q0=4t0 z;/8 HbuE{NfU.~oS 5. ̶aCdV:>=Fow_xO\~A;F9_qH:y ܡ\C=I5}چMlK[[m)`dؓja Y[0#LH̅c1 l*uMS2]`C6Ѣ7@F0bh/0v"Š#ԋW.x mjM jXqN#p1MՖUb1T p r1}a}l8NCԨ0裮 BFwn<x0ȅ.dW@@,=63 7l*Rb'nshz l @MH^@ 9P{&oqmVX8X6"u"9`-`'P-l r-a56M  cZKFq"0L+0Y 5D6 @tJtJc9$o1.+il`eYL}m V Ц6Cah &Ж[p6hhfm6ЭiMn7i- iÁ΀q n(<(Fr?Lپ5%l225dDj*@ %LsB8nÁd7߆퍵IXJG0pX1!1 а31+CI4h (ĩ(A DA@L"=S, G(i$ń6qXV[ơpe%0X\SL6PՈ$`bILZVJ+Db1LHJH$@LC H1$H-0`VXXa 0``Ȩ w!T«"@fU9i)aeQRImERma"3*!E @^B@-"3)Q҈Uҩik aJW7϶0~L`r' G cd]w4uhh qGD 3U1B;>QZP`Q,;XY9Р2ŵֲcMmS[ޚkF֌ᥴg=hp5וr5ɳ"kx9WWҫQy~|JI|~Zfgܸg];;v;hzJYi>w;dI](|V}<ռnuSF6~L~ 'O EeC~!S-l'-=3QKE #Nt"X pte qRN!R0ͣ9cdPzuwKφ_Eyε@֝ު(Ŏy> 踍ꉚĢopٍ(}F4B9cT.ﮥ)JRDc/r`xl\C|*~8l6Cn 2b3lǞ* `,ORE -pPOL9t]2V<eX6bEAqCG9׈Vlf9AbO)?Ŧ_(DDDDDDDDx?;8>  {&8ae8Q$oM&TJ `#8a)U*çw`B(z*p?8c$v"a11-Gnƛ2fQJҝp3^"|0O+NB[۝E$tzeZ#l0٭dqT0+zAlz AS!c,lwkoF+)kޱe[|rpr ~bt3ZZu-BTX8*#`;cqq#Gq4݃MCbqn+q@--4B,1*{`(- C0s3b8C2l(.vذt̊PzT+ϸ>7?kŦjpmC”!N\E2dtNwJ!ȒI*SdQ$)E:T9tv*tDz:wͷ#~?c+Wy Ι1ήv<:뉋Ewf!vPoy?R}FdבZRm:wk aϚN,hQfNBK)([MtU)܎ToRQlx@U"L ԡ}aa]bv66m3`˄J^M|7T}nP܉G, Y3Iij)%7 J{OzrLE!ӄTq6Pdދ-vA##q4~:I3*RvNb^|X=W݇<4M^Β;UcvS?c5Ҷi K6t 6ǭeΦY2_wKk|wDJeD̓T˸zz˗oobSSOVEung<=UKxH'LtI8I-c>+i8r[ Jէtv>?'or/KgfG8ѥ]cGWR<~7U㢭uѥ\8ү3idGW$]#Q2>d}|ͧGg|zY[ǿUQ\{61[CM%R-7;ʇUcc$1/#GG?E{jҿJҿ5__uv-+N}Fͯbl-e`.OsQ[_Oh]ZY\WW=#\W8t }wow~.\kvAiѩ.[QFHpJU0/jo鸟ɞ&i'dQ'շdA*Hx4#.;2C>V}p^GvIN,Hyy$߸]A:k(?0YR6RI3fhSrjV(P^)b_Ac"ab )Ĵ$K3uy-Mv#g_t`?d!h?ڑILJ?HZ}xgp'hq mQ3G;'upt˖;1ъRiRn?o\m׵lT)O^r؎C9^S'xq|_?)}{Sc׺)Qi-4)ztT>ۈ,+Qmi0ڥ>2賅n]8 +Vw1v/|hV,>PmٷiF#fQS}uoT0-?}a2U xPa?xJqw#R^&:,{\6'9ڻJ~.}!q ]OCh=yϰ}_:J^3a` ؛5tv[lC`hF^Ce*(UE 4g1ݻUUUUX"X@1 -$Zֈ E}(uWm;rm8 !+ߟϞ(樅o8I,y2^I+}?{$**--${P#2ߛ'D?ʰ+D~>}֔e?"D((DDđ11100l8<"b"֦VlnsIiUn7OIRx}ݑD>E'RYr(P+}V[eTܱ׳<3c}Wwח4v}{sa*]V4^ MEVڣrjP }D@( ER>AC,P}={v+{wu|/an"w@P4I{Xo7w^wu74Ǿx;ٻ8 ڑ:J]7o|}{%VwOx.h4[q=v^ۡʻj9l_v!\KRw+Kgv}:Wλ}ewF@@  =A{< ɳEmؾÓ n=0h#܇k\ǻ>=kz#嶡`ev ]IO}pS0 sID;e(9 *MdCvgP >(kwexުm u}( $-1*֢,xusϛIBυ{ΪYaLN`v{q*Zl*k0nMX Ai ]];̂Giʾ}ϳ[;uje >@u{[͇ ӱYZvn[QKlRCT>}i & 5O M4S0.5.G]͎fBp6hsİ9 `y`^FX&|u/6'A~OUU@45<)(((V0$ +R{;3\`UjF$` BD,A Gp8 JJ>IT⪚ O B,}) c|L0?0bƌ!A&O0|m{a _5d"h %QCBwY.B^ڤ?~帢rP9 BxtP``@iQO~UB .RPC% ByGp$v/}$ = `*ysve1r\DfG(gď[Q ljp5V2XQ*IwsظhM48sg55C4DIp/Fh<[`#ŤLb^=dQ@POX4O"h c{)*PLo?ɈXӉ_UP1`=FG\d1ԇ"m <iJ 8h[.o?4ᙤ5$N [:W4D}#ծt2ՠ` \Ry!U˽\:KH^?"Hڑey"?BXC*I(IDQ M )B4=HH($Uz P @(C, , :C]W󈥠"A|r(^P(H!'OPB'$tJȡ P No6'k<,LF$|>S>}xcW pSk7s*O\Un&[ivMjsA}QlߑdZ^:4F{҄ACO'l&xM ʑF0pz VEևF7#ٜF4|zM?8B,e+G\{5Z4 0/98FiG&IT :jOɋ Dν7tlwo͋XL$9 :1,ͪeJ:!;>WCm +MUS/VuvmКA0:5w 7(} .pd}9}z)"YkN^;[0'~[X@=N~ U+QwNHOS=1fSNgi",Y!:^l.R۲(ף9(puǜs1%:>:NG{j`FZ# X0̂'.ZCPD4cݰ<:_70Są4/0H+߾ßy8"M:?H1. Mm:h M?| 0^IoBD/ i[^ JG''} b)qsFtoӄV;+3!=rR.B1W7'+5g#Q9!QBꤢ_8J<7sW~t`a N#*;!#(hWSZ XZUv^A]͞vJaxCʡ 8zǾ?M ,+{Ts>9|%&-%ǗdúFSNP@g9 =3O~%25z(-(`=]ƛ?9L8)?vž^sO ]1UjdYB$ϻN~>{ӷz2J{& 2ڄQH#֚^ 空Nq {n7xi%F:]8$O~:8ӝM Cq_ RI~MtI 2'IZӟ_ZBhgiAyiäCqg0%OZ˫\F';5$b4Z\I֟s7OZ>$p#!ysI?j|>tzC%C.qwP҈N)%d}}mV:HZWZ̭}(:`~)?K?r.t/tO!2h>0n3chm&_Np J8:8-W'}!rf yz*^Խ 9v1OeI+ Wpljz j Qz{gz=N0ӐQ+ TJPyovz7ߧzf|F.ot996| [>SyWtYkf LJ(#ES7ƈxB0w#ݢ'amC~8dXƕ Cv7YF#z +nkZ2Hz:yM#Gǩ+/"!#F[#]9)YzLcj6ƈW5!8!|ew EtZyz)cHsߓߜ)tn?>6272!!'BLr*Wo()P82}}?S:VM>< ܽe5,Q:9!%4C2bA$\F3Chu=QՆBaݸt5DO61L)U9N*@ >{kG<,,<8΄iam C$e%#q !3,ck&uTl|jh72m4T.5+1cHƉ=/.N|:{ȿ;c&U8$NB L>!g_!98wnc x=bQ4STc0rRф gZem'1n0Ï4EŅveqUR>Qu3(\æBnzP"˪9[^ J{RDwfN;EC#۩lu^%cd*M.gFL΃.^hM?_+=0q5 fߣ22d!\oc0( 4Ȗ'F_fۗD$-J^Yyp^aƾ\8=%!,<"!s[vha$3|~YsIUXanFC z*P̯zh1N(0ߺecw&d R?t}=~SܭYP11D2/гXwW;@Eau Bj_nyc'CIAS*~IC3 2h%(X/)5okb16|lpϡE#epMv)rHRr_s _39! dZ>썳8>GfDnfޑ ?GבּWeQ2OoX(<8iBw~~4JG\XK*[ld?M$2\7fv }F$84erŋr!ǵY(Ǘ>08|MoǮ_%YÆAK;~/ѥ^˗n'P!35ic xjmfZcK D(6Z.# O-AVh@߇c9͢gmCG(0aY?Fr8|,T"ײʹ&ThTNcjTKy㫑${8š=bDIwaLƛ$ 4XlT"[}Zogj'&O:|>Αy:;eHDouǠQnŸ.&w6_;Myq8Oȼו?QOIJImI($MVVΩctN!e g.ע'a;m ӍY?8]d$&t*2Cgur}VNRtÆE 2[aFYiٺ}rMड़&?ɏu X3, eqgOq}_ozƈwNugʷ$aI3)4*z;wPmbD|^Y!  xZFF >hؔRe@4rM6j3X9(׍E#uY D3uSȋ~S]rQ&IssX(~K\4w ?0=#&WW2v҆<^Sa%Bß.M1VM9㩈mEzI @l}HIӹ94EL" xێQ|ӿ 꾨O33;mƔ4Lo^(#!D44Gp{7V).Ybc[a -oӻ5kRCj)7dwOi[ he3fWs!CA'pJ9=z/t{ L},Fl,2WpSXrsD@~c)$'?$Rei~_ߟ_|8J!{"j5T?.Mi!utN WW“eY37}(ʽ:x%#_.1CX3ppP!7ho6Ͱ=c4$\")k" kP J^N;ލГT}U tGITK)y{y+Ƥʉ_K}zla 9~/nݓxLjuL!ƹ/psns>nF\:`Lavs=ƓjoP ;$/.?cgO:mY/m2W6Kgomj̄ѲD`D5x(ސ.Ft0M9dw6 Li 30ƶiZx{.X|?5 VMBWbꉖz!{frAumix^~lc7pFRM(M)dcr3ʭV.(W]ueӣ pmim2haÍ8M>$<~9(?PKTBqX *Dێ='RBb!+:8 ZBH ><!%P(I (kN Ī4*(ObJ(? PZ@BAEh bTe@N2(0J@B PJ)aTGH"H J L 2R4(0)30 5 4QҨ HXHE@C(@q8= RD_@&  (!0 RĤw+CdHFQ eX @9s33333333>8dSW g9fAa_fW033T}A aeaG9 遘VaaL0!𞁞}sL=| !V2b0ITBG>(f@S+*g.a ̯Xeg@$nj@s"tx8=9$I9 "\2P@ 9~B2z}.l0kVTĽ|ҋ>kgQ]3nk%4Q}pj=: cK3&V0NN}FQGכ Uѣfc&]F+}j-dpvPUQk쿞i9;vgVtlVVq#Uaщ )6yȓX Z[[`m |ō/ !Cgf/ XʎI}EY)ֶ]`i*{t|c(K?%wػ|gdcH~$g]-#x΍w`zןE{xxmNer)OFF)^hc9, "![wBA™aL7HBs6ް`gF#yp/J0&~Sl-0qo~tM};%~vo0PpԪԒ攚*eҪ'mI!'ɇ bt{Snow8A8<ZuL[7þӶ5c>{lmrO*rjkL0S7Eؓ)[~03[{A8lPqjQf_lXIv JbL[Ws}&m6lOy]LX/WOMn7;7])`B6dI,Q կ[xmB芀"( OI$KX?Lk! ۆ>ǎ^N=JPPe4DAVzQ䨘EPo?+ GLD{~[}n4R2ikc'ƜR#iͷNZ$;oDݶ۔LުrcI7p|7kh #%er˨%9Ǯ2Z?O <@/D@0``w6ӻƏ?=L5 3PZwceM6rzF& LgДutw]q$ \ܚ4#`,ϴz܃E>oo\+;%Qnhu2̺3ѾO^w/3n-{&0柟v޵`Df϶Tؑ{zDWؾ電>8yX{^Rzbl^}zao>N%߮tc_g^Xן?77Xx`ZwN3xy®{bS~=|9=]YGA/7ɑ8OWm͗ݖfѣuoӣmӲ3Y)U5i\bov Ț: ]zl|q~k2ҤEChMݏMg׊ޘHVuWNF 2/V,Ӫǽkת{eƞ[|z ]x;^xb! vN5rQ7%/טgelYT<36t|i>:xXz8Jђ ]];LuuL<=AZYk~c}6xkz5q?'HNw ū˓~ȌqߣV~Ƿv!Ϸp49/^T{=@[Z"oYG|rpOrcOyM.w=[xfwҮ̾hheo~3Ɋe z0wc{r;=/|f՗^s34m 2'ɴ6'|Jiv|ǣT^sdCfHӾ9sU}>yWƼY>i]L\ oXvZtϣ^Ǎn糲<.ܯ6VsV Ҝ.4Z]9ǫ!<;z_Nc|\\>_^ó??.n4ttjL>ܼ>pϟѓLJP~Yn=xe(d>nse|Zl~_H4[6| b9iGW@&"Aն e>=ijŖ}gݺN_ ^y>zc5,vcUTgf-ݿ>?9Uu?F '~)i՜R{O}Uzɱ ekN[uP=y_ϵ1L*_fhsu3>-&ώ쟯u~~_]^˗>qP>8 ;3#qY8׏:؜7(:9^}Cῗf^:3r?l[e+wN8IׅϮ]{ѯ㏬˺NΎa wD=ߟ] VMеݿN8Gd sb0;]4zKĶ4o7p3:Sc|Uٮ)n:\~>r k@܋ϗ>ma`#bF2|g {~^3?jͲ?'с,\2xbg$<:8>fg_z{ W`=.vY%,?/`υ^VUejsɏ0#o'+gkISs4!l>+(;P9}_(ީ#]tH;T$ &OWr! $\ݾ\WjɖRmvu砵QC>K8=ɨ"2RCݿ_gS]),,֥~l" -'~~8gWOGF+3~0ԈE;ҳm4v{?N p􈔹ZNTvcTy2~VW߸UiP\dD;ݨX\N>ݧwݎd`~Ϳ <<737ơ˷p뎪%.~>][n1mKvs{"~E;0Q<VBB h~AgBAL~4^vUM @$M0 Υ FcÕυi?_8}A{!y}P~߫v!](z|\}!Ag*!"ƭC{iI\Z *&kQes s^qڠoMw*w.K,?8! \ a+&̥ѫ]+_m޿pV_i|<j%AI#J"T@#B""T+I$LL M)ĬBA-i`-']&%^@b9Jpd) h((tEP&y*yc$t*hPHAd@)ܨ'}mΗ{=>~^fy{.!pM5c/b=Fw0ţQqqk z dwv yV?AdEw6<-tۼIu{[Ӎ H?uHT)T^ߊi,wƜ+'嵁L@Ga`P4{ͿIv'4kN `'҈i# 0@`,|EwS|YlfoOm7ˉB`n":Og 8~Wo6|Myu5_lnEOۻ0p_!5?;yz%D{s>xDi߯V}P=ݿN(zo`` ho { )Hi}Yڛt6:~B,LJCuvf. Zo<3&toP4uͯ{l:eZ݃C 6`Nί.>-ho#ۓV<Ѹaɾ =$@s{@!qpƁ$)$J BE3яP&/ 4Y8n㭾<|2aA @4A9/cϗqt"35Cǜ(u bD!Voa:>AH&h2 {qֹ`B-7l ,8b۷V!/iL"G l copcI&F{'#@HS{~B;r6],0V/-.\#h՞xnLid9glxlATg3\_m+oFJ᯸I&1i)>t>< .l-ߔaGg\(јZάj:G~./SC- "B KkE[ܜ]mcH4zt?$ "hIh%Ck^d XK1x/} >0ÚRMd:(4 /ZȥDRrB ˗xRLpQ,G>Y˧.~  G握v`vfS {1B#hxɃ^B7GaZۭh7y9;ݸA+H=>;ú][j.,Mz6Rb@z ɻ/pO6vB .0?D@]0a/<1: [tNp^\Cu]0e?W Uή[ٚ-њ\}O|O 0OLBep. .s6;4!oʮ|JgvZIԬ[!Ahh0#a^;tO -t[8Hu5HQ<̻q\>ɧ0zhX~gx'Tp[vʳ p܀* 4y Q(dP7Po&~C<|A4_SSQjkiCv R$ CtR Ah,@ _w_H<=_rXKfv,(6Ŧ%Ey?yZkv+"W° EtґQ$bSS Ayk'PT&+aTQHWQZM!t8:?+#'#2u#nqUm!:"e*>;EN~ ;b˄ǘñd4aHZDM%KEz `HԤ^dյ'F@]"OQY3,Lyptɥ;Ymh tU2O k.!"Eȧ#<#"DC& "h)-Q)"i6L̥A5$4*5GdpEvh>g>9h%G*M'~oQN,ZJj?d~+)jno9-M _/G^B6)2*vG<7h5{f1G۱%TNMYMtώa K ?2S%AKf3k+%mu嵋zWώVS2m;F`ݯYmob5;;Dw K5ל4f'[܂O%_5l^эInk,~L24 hxy{jWdgf1~u無µj)Dלqӯ~uH[Z|_i 7-j<sKH<;W~nt#HB"oZEkjW;rYUgF]=8(/LE'h[ <rulK06𑲋Pގ7W4nEii]{h|79 q3hLiwx8.S}z}ϮӔi9րaՆ}tVo讄+˪{i+:lt&`?Gx0:Z!6 _}f%b5[3z^8 !%;ۧ;C1t-ڦdVi=wx7ٚfXa `!g̋#$?{iPQ #E?0P(㟂p?u[\W8氏֣X<=6T`55u\]bsK2(p=Ul_ ҦbHAA|}4q[ BYmck0!dt39'>cLO&ittӁÜa)D@<p`CC62G6O S(4>PBM%(^J " 978"xq5C?$=YεkXk&MQ){x  %N&&>o&L7^+cPlQ=4i AQ݃nBWN`jӟ~=zA>4ImKH"x(Xׇg Ix@$TCÑb &:|A_iʱْH!= ZhL3d Nd5ksJ78vۂ#@j!3ǂқɧݘo7Jt?7/P$?Q{g̡ASt, ?l?\`4O)Nyu:cqf xYq}6MxL38&met̼}d[ ѮߙA`-b͠42ʍC>NF9NYJq E1cK>8@BARS:mLT87 +dȗHQVMd4qucӶ x-$}oT-ynCb8;`)/"9>q F_" y2d4@F0j7~F3>+ccgH/(@ŨU-! sk%7ur?ϵwEIl0l?Ya9zxe /lCVpd b+|X:rgk ,S$`@i4FB ELiOtī1 sb>D*ܡ-#gFu26l!n'a ;yk(~gCbC'ۇk05% u&y;zD6*1Rw}}l]{qhVUM(p^^ƍ1b=*3 rg߯Rolߌbr> у+c9CRڴV]{ʔBr-w `y=o!765LD>a 1-g !8׼s2hP̓ug*xW8(Ckm~~u!*zYh ۃc4W2F66TSaӰyf׾ꌨ+3ccJI_724hD4C nh`Cduw6{ߋ6uc}CfodMYHvB=yR+<7qtu#c81;Ŵ+J:H/pyK9`USDXl]H,P% ض|A[)lQ5cRe] ó(dTNMiLv촥w׊|`b>F?CGBui91D־x߃ǹ`Տ]8^Gdz/~AC>&Ξ| d~ P^=Ƹ]`wHi5>hvUu;|AtG/\:sHQlۦ?gt-tu˫{N'-]cu?"@B1 4 ҄BLDEMET TAA55IUJELT1LM4 DEQS4SU4TA30L PHC QEB#2L 4 BQH#""1?=QҠ or2ݮ_(s w*9P [fcgZqY{cP:+Y^ P" ?c|<p{CYp?8tϵ?>[Ft* pl9] h VH=ڻP~)AC]nr<>~BsܦOܛVv;џ|C6 ͌{==^}BsDOĊmIUt_ K}:jg PCչ ,[>nFwLsbΊx&npq $n8ߑጾ>&E܄m6K&Hq!嘦cӑC6s$2D!a!)!M]A 7}#8BVpZKhC'*%6z| kc2('@P.Bc[ap\mde+Kqhi`!$ѳ6lٳ۱jsߛmha8p09g+80\ Ccis;AD:ホOa9O7* B Ob3{iΘ؝!W{G||ur!3v{})q9!4vKBAZٱv56\ϧ؃΃ ǼsbOY`'Q84~ 4kPoE4f `Ty.1Dq {WCu.ls>~!C>IF9-re7pQ=Y>kTc#0?K,Kpؗt}]hL U{HT$aC5d[QXuu?; <~ʵ@F\[&`;)_lo|ﯦ.m g9VgDLwwh'^?>e b@K'mɹ<<-Rp4>W$r~W%G쌂,8tDwg川pHWaY߬oː9 C܉v,XPiJ*ZG3]^y.l H(Ww0!{ݼ:tT V(L'hC(Ndl-XO9/ ZF2g%8q׽A Jqn2 <"q9%h;mhh(Fޠ߃jD͢4:6cVepgfÆ1."aÎnNM4ӗ1Ǒ (؀­e^ʶp{RM9887Ĭnn9~sŢyʉA~S7'll=Tqi쭾έ%  ^Y`ڊ*uMe¤,'HG%D]iZV+.Ezr4&Kb!NIS}bib^COF%_x{kޙUqyM  kVkz1{u2#*xWόߗrc.uI#<ڻ?t,8Q;Nܙ}}T3:lwk8m^bkRPU6shV7/,q{o,Ƚ/K`?C:bJT,3xL ޳F4ok{q6%$ g_ng[ٻ.y7t۔AwӅM߾ǢicsYSt3v|Jޱ#1Y&Ƚ/qgخɤdSUe/ %:+  %}#1[ƽ:l6o]F鵸lˇjfLFhk;n_'Yq0Y3g |ݾ>-5 R1ڰ :F;A]2yŴ1D}d&l!'*Ȱ$Wiaue.4pl}߼%chU+;L ClnOc|?YbFDlܼdj;gFk;sZ/O:{^ZM\ÿgns zFt#OlZQGy[9iլv@Ӈxi6!D!oF1_Gظ&c E!l^ьH‚z<{zB1i*8)woY${Nm6I$%G=M%ۭL|Bg>uʒ;?{b^)s\I{ocmo'OǍs㻩oޏ ֚DhGD..lO-.>jVcϯ _* v{Ex51:[h=IU`kxy2Y$ʨ |_*IS ZQ#߅"ok݉ ɢF W Pi,^iB~jhfnBȕg[5sƂc;Aŧ;MҊ ba prGo {~f{|\4d3͒c:a-ulI} U5s4 0c=i/u<_tvJ\%<xDb_t^jO,Of1Y @ݘOXͶF""}O`?h} NFP4@ оFQa?ɥp]1Pg4<*] %HUæɊ &Uը Y[C}S}'2ڋKf8Gx'iU3 sgg\hpo=`ɻpwz:Мi㯱`{xm|TRn垈oawm۽сӇנ$!/Wt%&-fj~&$O!1e1|Ӽ <ȤH09S 6|h=l݌cv#vSekeu[$ĨGn{xL~aͧ^mA,Qd!;QsN&U]֫ynS݆}"")[^<$h]х52Å՚ύן_㾘 v:wy^zTH3bD44#^|$:fs %uy.<.$F2OH+5v麴 6hvc8W,Ctc:>u=޻ܹdN4qkٝ·5{{uAFܿ*03#$3@Z[Tx gRz%*FV;<9ԫ;.J*)XAp'V>baؗ]5Hda!==B^nI oU2Վ#..eZ;zgq:9EIiNZ'ޮHvAdREp´6aZjI֩{ͬ(@ Hm@KZ6yaq6`̙9Rmc;Ĺt6xIzHB {08Nr#pI#`f҆:zM;@2@VeHiZ* -é heLNj;l$dѐ5(N>M/i71^|0]^|\-e:0wpL8 |u鈏vcnEJ*@W%hPuMtm(:8tf yKrZ섄ۿ=+a"a|\7ʮL6;чDUǔݬ>Ƅ E2'xſYF].yٛndR;ve,&鎅.sx36pw%WeٷnԸbP032aJ?:nk) u&w6!:vVV {lO#󔛌)} anlG\뽽RDFޒ7㹍z |ۋlvӖI^7uUjcs(@ꖗ6 (()Ȍf4FXfO[tSZРAV^U^72s&|e8T9E]:c([a۳(L/fZJ,;S-·=ӻiA>JӺͬ*?B%I l5»dΝkXA6xI Uj!?sD)l9i9N;! ~vӺb֕ʙ6_?Iξ\svx""@B q$#7 U;w-<&#mX|,˲#Ab Dxcw@Y_xObhD:E4QD?.]Ze~U>:Ӷ8N>BƬEM on)eX !qk'^5h+/[0ahc̬ˮ,P/8[NLk Uh\N=(WnpQ% ~1h3΋7۷}'J2N`kֆVoMB&0DVn1t|KkSmO^D?H OO^ej\ YϣSnv u RL1%-WYK{Xkm0lt<;$zf"OO$0>2-G&%QWDDJ,XūĺFAׄ*NlwN dIO8dy9=rc9L\ ma x`%^^ 3^4OfG艐5giUP(~Q< [_Okezeƭ<>TЈBWIA "A((AT&JZ$"dЭ)<ŽEGN(Z YļĄSݷʙ&5!oKվq0HJޞq6%:oJ?> ; JfGl Dhx|'hv]1{Kݐ;iK j촄&RhӑUӚ@4aSmԉ(o~(J30#",PMP i`||D0]Wyds+@:%E ~T-q06W'RSÆ luNvTńAlm~ɰ-jڗI4 N1_&oI۠A"=xGcyԞXj"3ۢ'ӜCǯO<>8XAa mc(t>%u=JSt8L` IE@K^M753q*b+C@Oצ?G?4quְ/&.bԅs^vq P, TJH1H@uf TW76ɪhXym)<50QSbW8ˋhF$wZ UQ)<qU)4%;ꦴ<`] 4p{Lvle0I '!&ғ<4"9< -@f[?^tcn̯re6m]벱.鹑u_?RW>]'XE&!k;>3(x}3ifann.+ͻ(0REp}6|mUͯDPW(ri!"G0w32y9/Sq?Ξj0**ȱiI=QIZ a2=28KX` 0 ·֏\S*Tj_aO48cQ"!l2)sV8mSz1%bGʸ;bD' lP@ iz_9{~2L'NIoiRf$nswU yD&Jc)#Ra`%[]@+!*/+}h(1fDHFŽn&jJ(H.u8,H\Zdx"׶ɝCbjE+++ ֱM*C6b;c&SFRq ?t{_/߂ɮVN0Ѯسߧu}Y EI_gJ\! 4!TzLpGZprx`GlmzUKz1js͜ߡ{(u VnƎ[fDS\eeGIݦy0郔Z#f>žg[_rWA{hoػ9 5uoLZM3<4:dUd?'̵nzWH{˙[W307z|!q=^v;TzR0G|W>tkxҟjcؾmS(}_xd$axO|q=_h%R%IBS/r;} (zRUBԅxӌR JP}{~fկ^:1ا[H{]TbC#G|>d~2bԞ ҽ_1NKO3۞=3!&,oijt6^y#+G/vɡ~Pl0U yb{{f*}񷝽8݇sar !P,ǂT#upW2Ʋ=̰ ۷ty&_mk{U[]0V 籍Q|mg#4O8.IgTHz9_QvUo4vޤD )-DǔGM*]2*aG)a!;-qw^+δ!2BꩿK3333333؎B\#m Y+'t\.JTDH%̌>'b{u,;5 .x]f,37}9757ra0G?Hшs6pǝ4V/8>cfC.G^p-j=w|FyÏ0yw>CNFѺ1F#\'^2{/,]߹z!)!Y$ Dy*JU#-V\&p{EqcЮ8&6({ F:` H"uur@CsmnYr-@{3i;3NL2 #;>r&sNOZUŸUdIRG[A繿]&kz+kFߵ4I.aM[[mtn~!7-ݹkнɡD&@L= OW' #XbI2'M": Zkx3gf+ڗ޷O>|6|+#׽\n{3:oI"xg!ZS ~ >W AsG&*L /U='dUʒTD@Q;+ #VƛnEUDJ"$7KβGJm"<`(MEC_d؂R[}Mǿgk%q#gw֏uN0 1]te>fS#0 TV u+o5t\G54u>~-WLpĭ!]ob{x㨄a~NAȜ#wEӌa9lو`;B'^GnJ/6-X2Po1#_ PLD U6]s=p[V9'$,Q#>>R~>+WX56-"8MNw.<;]]L2c=-Cԣ&>L /oG\WIʭ99rD]Pi6aY{rf[dK1=bJ;/-7trBCbJhpV2S1pÌ-*G2CWz|h2 ʩ5/WhvsK)„+1%g^euqQnx)5j۪vvl \ÆƖ, "٩Gqx7repɤ{DN\] uTUMA$s6u x\5@lzicV#q(=cy,ݽ'N5ώsW߳s-41mxexɛP8;0>qgsrL0Rګǰu`P= UYqFDw gmK12ߤBop}8'6P$p^3yuѕch{aʼV :#\tL>DDdr#vCm^:RG}c 퍧h8pҭ==q*7 6w-%V;]Fݟ<^Sjˆ[;u:8hoc5$rc֙޸l>dIRbKS0´Ц 9Y ~̝u{ytׅ?Ri!1šE'[ u!x{u 'yXB q"<[xϕu*>nӤ]M{(/Y%L[-Zs1u `>t{|Nn|>?oCQQ\dqսֽٯr^=_cgh7є}M?,a?%6".kmV-fl2q9r/u:ig`؃ZϿo.?[3L7F~3h?"H2۳ DrKM3+TCtW߆ϣo! yRQxo {g! ᢦ 䬎 BVI2("^]L r8Y(VM=f䪹RsD۝[+ɇhS[eYFl| u`n{eWZƄ8d8/Õ_o 0`W9ˢ7vlKiޜQVF_ʌcmg6|lU#俓< |+wmG,35}ƇzζzCnL iA)T0-.ȏn)hLL3U W;Tϧ{X<˴:VGE}`ٺw-nN3on]^$uA">FE{:sQuitP/$y̧%z[v@hA0qnA4~}!Xy_e6g/lZR۞nɕuh[-희^9sfL/Yz@$Em՝;|ba LOqvÆɻw[Q(uқk3̝/Q~83oinE6Q;)r&H6%Ι$ZsPθ/\!igJO*WoQ]Xѳ$;=I{zh5;Y#Ki|I[Zb M<;CVѰ? k+=;`}#"t}|GxC6ܳgF![;ijfCU}\wn1۞1M-{MS;-͛9lq18ھLۡ&d"ڎb8oE:7WkYf2,eĎQ]\ P)]i}lWOG1(D5xՔ"` b$!0=u!dPЪ'Q z& y"xX#HFc2Is&CIHD1B BTAՉN@h1:*)Ps.%*fd<ӊ5:ĸosX 92 AE^sIj FRBEO(rY&dTo߯qN&}pC`/whN.[ܟJ-kϺ{ ꜑t0_`B G҈?&!;П1?KVx !$#%ݻf/V`WȬk?,.<К(_}h("D/ҩЀ8Y(f쁙_rbGY3(Wo]\C[S+/4NHaph2!;\H_m$kۭ^ʌAQ1AZ\Zu3dík0ڥ'2d`}W*?|4Lٖ mԯ')>6Gߕc6VOU-VI%_!xof7q}5;h},>wkb7םŦ1XYݏ.iWEg\-Yy߆HiLxݝ6y?&O[O%mJ[Y}_B@HkVt \$=̝ʤp|.׃y?Upv7Mfͻ 4;inݸͧ7-zX΍i,riѦs{>JKWͻebD]1sB2і}p=@vwO^?+ z =} yOOW vd_G)1c=k x^x!XX]ݞ l7iٻ:1C@QP?*rlnTQD'U<32((=OEMu}/b=L<_G?zO.,x<\vew:狶+_+a3~GҾ:61Kvš˖vwsǭHC:v-=U''58n|m1! C3!?F}o38:in)SL'[#s7`6Ivݜ*[F<,`"Am^NcʘT䊒Ɍ3 @Rć@n2+al-Q3:-k`ׄ(XD!?-^ˎR9Wtl-/:_ OYT%!| ?#<рCBOd.HaB9 $&$ h cę?ޜxѬlX4O5HҭIl龓Hٸci u 8K|e7"¨!{=̪[{[ٓ<Cv2 fޱ "|` fڦc6)HH=hp'm@>$Rܧu;efYefYaacFF+,$^eyc ĕs8 m ;ѕOYY̔FsqG9#;f`  1@]Fk1L8烝l7 &d`d])ߙ~?`qn+ycW)yYFCT÷?g?W߿Zj?h;0O2Gz@0~`s:oû/`b{O+k=ۀ׾>ru|oϬs2d(jeѻ :tˌ\y]ѿ3-ڊ~^}}}?3ѷ;==-&rz#pǝ8oz 0FdAx&dikJl X C g=Y'g 8O&#RB?w*=(uo,[?/O:g1>drBySIϋĩ2AM^CX1qd6%^> muF9kw?CUH$aL!?䕢d fsD6'Sttt]|G}SX:r&7IlYމ~)AW(˭Ho @S [@`#-P`gbdLNb߻~LndƫтHJyks- T4s (T ` `B0 cJS/$+TbcSLz:V{~-_gգƟc@cIߞrǎc6nր0EQM `xyu??NNc6c_<fݣ%fc`?d!Kw??JO7aI>Xo yrF=/Xo.'^v7:e|dgg H?,˚xb-C[k25v]?4م٠}rOs3rZQ_˄؆׶깷iU˳uJ'tɿo~./lY2ժtv=9.+2VoѧFx:eNM`ο }\uU E>O-1eeu6k2;Μ&ux8T6kϋӌX=dF$e}u@-~eGtl[Zx]sw[^}2Z埧K]-ͬB}eK~<سQc&Y^]uuej9~/gQ )@4LC^S{y)YlwI_s퐞w鿅 \xÅwOOl𛑔MtǑLn's3qx~nBjMHڇW>}0݄|R%D_N4f>hÛӳGP\ !%{x 22> ی.UeGP#,: @7)W|v{{nrC5?wx{9ήc;m^ۿڳHM:k?&g۱OdjoѡQPBɨt-3|= Ŧ|,=w=xv,^49Z(lanz:</"kdp8g{vwf15Vw9Ŀ#W( :::zgĜ_2 () ȯ@t T;]do{wM&oQ?94\՝r4DlݻOO#*Y./4wwVƾD临`23G12pՋ#y裎fn|;G>~N_ kwL0lfq፽~1)AbN{A s~~}ߣ|{{~|3&`;.%[O]2to_]G$?Lؽ3z _C\z:2dƞߧ[Y<>_~oAhі>O>rI9~S_y1w78>o˿~ovɁf_sFv NhF>ooNK[>m[0)o) d 1/ee%h>o=8}7ͳ@@L~wG2)QUOO>. ͍g¯C|s| 7? jmS!x]~|/~ mL-M 3X,w:rݰ _rGu"zqsf:HcY dy)\5YKfO{.R# áӾ]^7t4g2xSqcQeEB.d>~9 $EAs2Yr#Ꝕ7VNҜpt0K*؟o"6E>L̅0#MfHB !bd b ( Bw|p~F f&wtk Rr\Tc{;&em_NRa3@uV{6[k 7A7[#>t(-m)x{^2z5.Ž.vF͙KJȴ _%0FǝElr!&-nmF3z(̾3[q]S:k{:P?d:P3j][ ;3j˗+Cf {'<"[zvݠ˰ğAg3~3+4Ж'_ᱮ~1hOq ùI^ XN!?*R}q!P伺htVjGBi4M%)0mVnSoHEJV Z2]wת: oE|22E yu |5|mYTyT?4F1aeSXbVײ9Jw& ńA! }p9Э4DLhoƸ/Fݯji@HJ'|ZǦ{͏{3O3f!i{g{NoI4/)E/v1 JQ!w8CscXq.Ynúj,u7cq.݌n4sۺ;6ucƘ73 (Зrrec8V"i.ۄB7 )+.nY5F}quKHL>55ȹ[` )̾`: φ91qZnnݍ4Ncam )pY&7$ٷdۃE7cqr=g. ӿk{@,jWs=r+KO|&p2G=v<ӗN{aƸׁR>[=nR/5j@[D`=AF0$8qD' !.|IVfԜbNU6QM}[9ngnCԙD8wSr>N| j5]h~͡rys4VDגB;\oϑo t=ᵗYt)6Z͟yPLΎf3C>ZZX!`=Mv98Lo~Ƿ0vq+ix=ttЅ&#iZx{ ۷n>#w!bxgsVi{Z}nsO[ 8c=4 h{,6}xiwf[\ߘ#w41q1Zv"oLpqew8 g27eIIs_p3j2ni"C3zt{뵒e$ze`jimS}UinTe}+5k"CVEzo?C%s硑^7 VNi$k_ɬ1TMGfڜ4&sxg;{}7y[k_,nTkOHF !@ &!(Rf: [t#8I("ff{I yvDNF4'oѱoѳvnv.AnN]$qyVBLEK ɅEQLT))bl3n&ϴ .AgHT.Y c$]?aCgUP@M / ]$-F+3@kk4ڀƵnƐs=1Aqwr\gv-M0ߦRǏn/n-_FL*pariǐUkʑQhV a"]5iL+L\TZ'rrȃ-/csxʩjdnۆ^QpL[jfFPKAӭm 4N.㤴ȱ*=0z\LgӾ}2slt~%yf=QqȦV6C>in142`?]N1ls϶8snZ30w wݴ67 YmUPK'Cl ?{Cv*zCKpwBq"k?3m ĻtSbd{<~A_|gww)c~i`QaL(6EEx^zHs~$- ݽv֬[U]!L41+ Tgu9Ĵ h*MqG<^jJ %is/'U9[_&qץ]xЃ_ -o߯×$6-דruƩӶI>1E[-iZ]aH_}|7kX),[|3Iuٞ#tf:"- n&W^xʮОVFn8C)L#BVj'cS\R[qӲ joI]ɺbm4Ô+=b~想2 "T Wj$j/Pi1XtLǝu۳Yn?Zkܛ,|uպ1/nIM2F|ctr@nЏ6NAI+Qn\Iߍ3cA m&98x׎F/YenOh.Eؠ-A&"7A{u>ܸ\*,Aq5 ,XXa֝D.Cuɳeiއtci}{jK {׈8 ۷ÁP>,VwfXKM5+k[,Z[ KSt-6(&Z ָ`Q)?vޛ: phi_Mكnx|ѿZ5gxnQ&k&s*nV/G~͘t]m R!1#ׁl9mu0u9kO g56i9\>=-AT]Û;O=|qV"p~]fiѮHC!1=]#wsWis q;šҒ.WG?1 +,׻[˕Џ acT v Pq hӝMfdlߑkm#13U}ڧ$5=:Z) 3}z}%т`D3Ls49oU0N ߖg_Lq8*dɅ\v 3!%Em-UN(6v"&ՍAUDliPgPawP%gXuG6<F"q;dVȽZy]HmM\b֎~{| _ұ/!2/FelL)4cBMOEU骓77竁zSlb3k~fB8A`Owl6\2 4 ~82 `UW<g]"0 R0Rc@dE >^N:=TdHLģ6YZ W!hLQԹl( ܨi#%70&6si,"?]uyRFraGq~ekfn޸ֺb-QŠ$%?CP T0l9srH$bsvlmu:5@ 㤫6RBz'͌qUj[JULwF,~R<27D8\ -r=e-G;&t]^{Oք^GlWaOSaQ5atty\nלJ>BW^"Vq;χ9pȔ8鳰7 -)26m.Z$1"hdake`9U'T1rFEtulx #w3$!%Q/&h Tz#lWгm_wᬚdxm5*wiYFKyt +V7F-c7A42b~1!$ 4d%򖷻]>! aV0c=ȕ&eL<^,o7}?5 EObx;)!L9eC+>vS(_.n!@3J:S*gJizi^oZ+D^f"v[ro50M0}S1n,c^4' cYsfLTUG&6jxvouKd" 9cOcPR/& =u+_q| ŕaUS4Xm"0'+2(շ݆brd7mBǣ̦v'Π#Cm4R#d51}wGtNv/4!G󶫙=Xy5 Ft>,Ƕ{7hd4IԦ)ϽAG]&3 e*)X8pӽ_/I5kP,ݮ)jWGlQ1o#nk(˚ČֽLt`dg0Ot|DV]QYOJȒ - D\6mj!Cx:#52 u?)Ͻ\kl.VU)%EBC<}%sWV۞@FwO{A \j5$G>o`B#Y?3*p[UwHD{tckqϪEX=71"5\stiLeX3ݘ]\&YZ GsR|#>"9M[|`8Xˆ7'aދ֗-*# {v;>:zsx:l6oSmV,*UtВoc'pa VV4L'&6ܝN6oُ!eJJA^ GA\|!_P*B0` )D2 X}d&dQۦlYMXIJt[h錠IWGq]S=YC,V1a!Qv_!oGftxLN0YFoZd#N%*bgH{ P ϙ6 BC;{~J2i({HD1PLq1c:= )%٧GՍ8bw׮ Cs3C3Mw ooٶ~YV82M.JBf|&zk`<;E 4Ъi3)8,f}~eW^f|w=> HN`7J[Viz +Q5PUmpii bѕw7A,T&%~,m*KEn$HzUrKpd7,f)ωק(9kE0QX}Pk^G3{UR q.Iw04vtkmR }aHxzby@ے#;/ޒl؀Bav !׽#(BBdM0 ʚ1J L; 0+L5PoX,vZύk$hqsF3mh-=y3n{*YՄ)ȉ1ܕY;n3e ~H1X^fQo5m %yY:IO̙쵔Ֆ1gIa dz~  Mm驕%(5yWTSxŢw%Q3io8 RR%QL@  *@k"vIlܲR%`22Bq`I &:DLѦb.'ngYPsǜznNۖ2c)"b J ;*^(skE&7zdՕ @ivmWh99ZQШۛI[mfHR'26aXԢ ]R;@zCqx N4oe`·`LYS8 yeu ۱h9Iz2%j1kNj(@́c{%cJU i[nj=a"UD'LTi!#Š%R: SB)uiZNagegMq Ƽ)vcg%g٧M8(u~ls~+;:t/:W;lgSʉj!,$gsV]ŷ;g#)@rщ=d.3H&lFu i7'H/䓇ʒ zm+4kyes>8Cw 1Fs`dnw;VLL4޿OJ{W_m 00 JYh !8Qj RɄm nnY| Yd} Zf@ycH{-0PD6K sQf( s)0pP|pȖlkZ؎($'VI0*Y3k ~o>?o)GHD@2c)H9[r)$O3nʿPІl[=8T:{"vMK!FA6U)>wLb3Zуz ^Q9"BT2[P /ZOS?'74Ɵͧ:Nj!?fُ>fg }FH}lDSNH9nN1ZB `OÆ| ҷWU["n:øcA M6 !lx4rWwd)cp?{l09i HSM<.NH ԀF `!ttl6lp0wd+'g$01դ3scJ`Cn0H1vI g_8v`]tl E+`سR0b6bP|c1J`As*<$gGE uw\D@33"P61gMWoƣjù@G|rpp@ n*U͚^ BHlkdH|Ew"h4 k=3r -R(T@B@C2_5RNBm>q]4ɯ=E"muo=>뀢TB߮GA!ZwQyhR^3Tc%m~,A\rJBR"\jNթ; |}kT @gؖTGQ6bV\,eZS+VC5mK9 g7O/T#nٷ´fr9s8fdLfge1c5~=*%W"ӼQU/63؏}ES9})YV*$ȉ4 i dƞɅlbWjFN ͪ_6SB76Zo(^70úT-BsߓcdȎ/Qx[ -djOF0!4#T7vFe&@B,#koxeLt)5ԳleX?6ԌNa^mi2 ȓwMeպEg!< m)>fj>zwB?9qLmR-[yh]t%VoFna+;fc-j,(^-*}2=Ι{).rv\qkZ ߓbQ,1ǠTi0:p>oSvcwnXY3ӦI6?^9mfwZQSHz?'M  B.3+b;Y1OQ_:syvAs>:^ޝ?t㓾iPdkq~^^v 3ǃǭ7}гc`9 c&998rb&FM 4aaffaBafVW>|!ٚݶsv_|jŃ1 tlBB:l$g% -F lL6cq9MO+sV=P3`r .4Yb;hby:#cKgKZ-ӷ/1aA`oI@61%@._ +dbLƆeUyxJVi! 'o 4,u~* f{o`/R yv:IRT<ŨvI =Yv@@fP(!̐ (Oy‘DJmWd'e0Yl.ӈHJ-i7^䍅8P4}rɵFP*$Gϡ+=&1Ƹ^寕8WGb`O"xwOQԽLZs[O? 2eL"+jdӿ(y"dԝyY3@ (3zXmXc)PWgToj~wecsZo奣.bKcCo94VU|Z4|fe7100DŒABO]dž]FA6)Gz[:Z~MϬ9٢0 p*]Ђ8w P{h΃PV!L0y=yG"r}+NDDN{i 3JlښKA3` 0.a,Yש e 0vV͔HS{̇{׺N/HxupU猂!w`u-I{Ӄ g5ҙLɝS`Rss+Sv%d،kZ`┶ID1~Z"!93EV.3[z= Tȼ$C_m\˟BM=3y ѻѧl}w6z[1{e d:38oۈp{|}5Ik=Aϙkfff;mM$FdH6O7޴쬽wX:ӸϨ-q܎oExZvacc0ɽXF,iPNwc@O1 IuUS+Hr:s?BqvHum٭0adr% 30jNͣFLx A-<N oWڢc==Nb2RGDE5bcIix0UE{' erHr'uhPvJ4&x GзA|t-]h5S𾝋KH\B7Z3W>N)mv &j=`65Xku{7h05 ٱY&%8+ DoxF2$ -R' +cyN;n&*f}=jG0Ye$@)[alLE oB|eimmm؅(<H0s&**t :3'!i,SBinM#[f*i$h~s\z\RD9BmNP5,)3Kkzod2|)mE=ߺ,-Z@+ (Z1͒FG2Nϋ'[S-b?=m5y5'w͸щyR*1ڦl@.$ B J 2rڂS`m?3˞o&O/f>riVpjymþlڀ L~{+~Hk}djnqW{_3O3jKDqfϹy֟K`',16qr-3L!#u-~zU{W+[Zp8EڴwKJ,a&5xVj󍢼"ܿW{^m[Ո/$#;)|%z;[/iSy:.v놰 slV]sڧK;[3"LBKK -wZeX~o-a0(PD~9lZqXJcVvAQx-!#xPPUJZiZZ袚b>?~?xu]9?r|Hluؽpj0L2Ľݬ\`;7}ߢ;K!ދtfe^d~ ?10 3@ObJgC|"D*FāL" D$=}tiKѣ2##Hv5?ʞG_̮>Ǵ34A$$-e]_913`w\.!6$4^Y<Ґ;/|DŽҊ&`DDHB$0" z?BD޿uӂx}D G" ``qu,yipɑ?TUWw$Ѭs=?# [Qzk'p 6 ?75Qs;4yN0Ytm1?tP^'W[K\D9Kj] O"1B`QbhH DX*D:ZCy zxCO1>C:dJA$״<###4(/Bì?FoŏRD %Еgo1Q`/|>&bhSMG+|(f&U=xLt96.f3r(͊Z<]<{bo)@ GXi< |_CDWQYť@I~)w,pujヅ zh_䗦]n2<vq0jBz=HFj(5Q2oL+ 0sQt`I!rBٖV01<#2L6be ;"#к{f: 䈍лn 4nوww9:6t bIYZUϓ@pw)gStO!HD~-bDtC鬱Jd'{EIA  1g7'pv|+5wr=/5T-$}2 ٚxvˉVF-H9 /}\>I{vZgYjz66 DAUP4 IE-*R0H%%*+8޹L!L!5N$wvE;oҤzd{G4Vz}M FW{_ԡjvP6GZ~GCZ}ɝ4AxRg(C pC!ذW ǭIt0c[\Y'ں/51m(mYiS퐀~zT:q}O0OccA($DnT&y!{<Ϩ_B+G}ѳg6zcuun9=r˽x<ϾxS*1C?\e@FDO= @ʈuc֝ak#~9 agko5Fobө/Cvt6v D c1X7l?PDG;ʎ0Ȧ}!1ûfv7T>hr2.c]U".uʹFjcz8Cj7`D 숍1Y 䀏֮g"#ᣇaѻէɦ9rDG%@+C8=NF(?7oc۷c^4''G&"jhJ{^8Bϩ}[ccf1c1`h؆ΎN8\¹LC4Ҥ4 X6cNuvuwi͎Gh4i⃲xplz0zlWW%!x!O''F&:1ɦLslѻCg19 x:1,ógGgGG&#0bh;; 1uvlv8l:M_4)pHp:;9}- ^u҉x\3\90tD>.7B1C W{a@ӊ\Ptk.B1^! ȇ{T((* 2" U=5u@ "Q/t.ti 9S3#B"I!$DD)3!Q$C5Pm0i !i, ]y#srA1fPbLO`ENvJvc:x34PP AOUˆ i$U_A"OCO|Au$3ULW"a0=Dm8*X:s OQ#I:@*}q  f@uO<._kO%o#5[>j3}ܸT?oPӶYByT=_VSCT23(*D@f69s'y%q砻':P[@?8zĠ1ԉK_~Ȣğڥvhc; e3a !2.|\fp餺 $>vjLobM1y8n8t|Z.$,`3biTêмn̍+9&,*n?0f8iKg29q32N{d `a<BC=33! /ᐐSBB*S5O#Q=2ߙD߭6lJEoGr_TQ1؁%̐4w WZ֎Ԥx44ybKW[*;}`>yȉ>әdx R~X"|Hg5G3?M%,~߸c@-1aމop<5&c:"@E`{ah\U#AJ("*bMh&?,4c9Y Š"bWCRM,61io6>qU'-0㯄ވ Ip-M@1GIo+(uÈ =MxO0G" GRE'xjӓDyGnAF$@\hvl |ot,0c0+n4 _f!7@jB t0>l=!3=я%1- [/>ZY4LÕhN1L4}m4~lͪ9F4PcxT29$36t.n;\Çi.uucOo(y[WD;6!<; ./f" Dd"bS*C @h}yo1ʩTFGccP]H >jji&.b>Ȅ#l1ۖjL僧 45PО}A8bÿN(R)|kј:Оm[$cCT)]bP 3_lWuMD 5'Cw $FFVހNcgc٢c'c9~sps7zɀy_ DfP녝m_ծ8zpBբi؆tM}L0@3$)~ u&324H{A5ijL12I‡&wjnc[V 9x鯽\)t]{K ͘kba12yϭ{ud&ζEʄ38y'>*?½T{د3$kNjX;Vb#AT$|1Sׯe 'ᢤQ })8rK4ub m=$s*yYS c*! \294(nK\٤^w\o6iE0LXT2k栠VM"MST/"\ޕHroٺw|UKXXu؜$}<)σpn9s\@"w̧~SB@̒S_󯓄c2﬜r1/=o|m rnW;]pL/ tCLٛ'҉8L0똪q8˻y3 :9M>> ؍mFG!}gVE#!ٛ7z/~Ey-H&T9fC(\mO^rƃ~'?XǑvO1ِk+<ޤ%sn%JcVvnmk6!'vp g\E7}u2>&Yտm3r?C5[m;2ۚvHDѭa.oݼ1`6T!ʬ@oՎyq%YxC};9l:slɍܴl8P3 $Μ;NW{!c1gV<Ρ*,.H:KDձEL⳹wЋ6մU!#l8c締AZke RmHN$,̌X<-,,̎Ӣ/?G{\NTCkei`hRqj 0g_1DfRp_DZZ4GtLHCN4 N{Zv@ALP=6~ϰ7xz[z:BCͣV,W5a3Amy W3!!(B$E~M6?) !C'}.oSg'''GgVCa)a^秷=更ٍَ4wGGGWw7!wl6inNc hi1]ݝ[;bE7sn[;6ܚ]UGvn09 d'9onzs 6l:67΍ܜ buh:99:9L`ك@GNN¤QpETUUE`8lurp9<2Nǀi]g&v6pp\ 97x8ǃv1N1vhJ:9::4ݧ'aݦ+¾Y aI39Das8dI9s,$g# 33 ȓ! +qu͐v$ C 闋38zg=8gO8lG'4Ӡ,|6PF" e<:[ru^Wz^9s9s9Wv3h16Mp>f1`C94@F7z" "FH'sC0ddSG/韹p`1+::&h+dSwDQU~bBN:!99uÌ(pOu ?1BTF:[bI(f \=I(qϥSٽ!{Cڢ RBD,)SSR:,\&qNfg3OMZ˺(eL0( ;u>'NƁ Wt>Ut\kfсAvd"@̎F&Uy:loXQbz5p\FtNWfw}!@nh``WBd!OYay!΀LjSkOm 6 1lϏ&m=Ěmimq_z x6׷9IWvS$c {_85fH1#ЙxbŠ-M4V`v:6iߍjl!cUᴨDh1kn>S%oi4X #HCt 0rō BC+3{4޼PSRx`grrN}?| O;zq܅<`$G+%k>fWognyvf ƛӌzalӢCk Ĉ0 a8 z؁$amq,*vfF`Tk Ό:,N8Ȏ0XKLnͮ(ك16C]fN|EPpam121,tjt =P1riĈ :8pӅl8p᳇ 8WF9 F` nݻa@f̀:5Gl#M}D3<+B  Gw'wa^ohJsuzG+N*!kR:,wlO/X;6fwTfnhfq&BI8 *&;5ʩx!5 _?8V`VdpL UƁ #J@Տ.; yOb;s:Ẍ́{/]O<-&hX!)A(Kp0K=~e$Fs4ݾǔ<Tʦ"0.kDy~.]4"1)Ƥ.UpWT;w$P'E羱aOa~m&У÷mB ݕRTi` tr䈻Dub'ZT;r/gAILY)Uqff6'CX*dn(zVdBAOn)/y]`ط_==|Ʒe'RT,)aGs=,1ꮇMj.',fPI`j~x&WʨdI^>X BAt9å)<|HL3zx97cCpn7X$rKqg93xӞ8p3ӇLx 029IacF:nߟNnQJ=Y-`86 ^3PM 5eo)ߜ+Htf0(`⪅z"IE]E?PL =s[ )U"Qg-Gw Ų 駩 @gOLp] }ͳcȨw g2@31}4خǧu M]w9QoLSvf'7Nf,cêmk$z+NLokXjUےo|5kٖrv(V[:,sHaU}ٙ9~΍t-`݀Ex'[Z/Rg?p.컹BoAa렄ʊ:Co#m1OKd+/7){]PDcVmqMd$10ɃQZ/ӎ&Aqc&p6)08ㅫ_Kњ,6Ptr#܎d)8C=ҰN-o9LhQ|!O"mP 3!t#cXg۱0"]F1 Ĉ8q'CC2c,yɢ 64Q׎7r'xYtA BȒ5.!{zBC}$<%$Az @f1$ba%i^ "4ï<9Au(P"Vӡcbk d Fہ߅ܰ7kN(@L{^8 oRQ8zHKvkCS6:#{2g]YCn0k1p@#3D!Pr٬o<56k64pb9x5yRƆ1e$$d-Z!+1,P6W`5EIj8ryLGF #G 櫨DpƈcPN9v Dg(3#K̤s7[\Cq\LKS0Dd;|.rf$6*`e)ǿ{vV <޲"񞧃#nDU,9h¢DQb P6m\aG| p:o&G] D1PTU@A[Xpd.b{i(ĥⅽ #d(>(8QtN Rg eB̬fP0LB7a+diRzAyDa=B yG*ǜOD_1 kU'gYA Z}T`=~ce(흵{LOf+a>',45_(AG;ݡ=[ QmfsY*$&U $3>` Rx,81/@*Q*3xePC֏@QiO p_/!if@&U_pn.LZ- DBׯGv9Ԁ=2-h)!ATط.I C9TC = #zk'8}@>,?G`.wc@5A/L+_} _<>dO|'PG2c=\5 %&LK!,5mPQa|6lhF$d4FE`O֜f*H R1E!=N *=H2T,/W\oiL`i |N¯bnOa_)F.8̡%u$'qMX+*{/sJ|3-4TQld.s}HZf4j6pƶL`-3 ƀcW-o7GKM7>"c[ -bP"A2f%UoB8hc456\f@" &r0i-HQJ\Ù&hZq֐Slr(W $/k͌d,Tl\ yn`B& K( }}H( j Nc5eoC 8_B$))@)J 'S{WY"HȀ(Xg9|WS)_pZ}ړC-'S4>S$eXqҦbE3,P*0;&^f/ɏ*NAbde }%iZ$ACCvc v^ XmMeNr.%1kb36uE+N?FSs>9d#FpN`Ӗ*?XCȅ7$XBEDBI)~+#Ha@@x!$C?%4RH TPN-t(å. *PrTTQVtA'ra(X)A pJM&u!6W\EBf jZ+X*U;1"H44B,дEQ%*,BH :Od>ad *i* IBti RB% ]4 H >@iF;?3Dh4*MLP4W{{,|!B2@a[ Tե^CcMpݺMTaF-QZj!ihhͭl-;E@Ǥ*8%k9dt .UVD9:9#N%"bBKyӮ϶)JR5)D7b P~Ə/50s'"c"iwbP-Qbɷn LY.LY8B1RN' 1_K8àDr2*{~+TA0ą % *„. `LX9> -2[ JSATD(׿pJt+X2ؘOg'qF-v$"dFu-EarG8%4,є@shd`<($'`p0iTT333+332L̉2PL3 Ј xCF0}AA?J%-&UBb%%XibU 5 ERLDLC%AML!4EUØ{UTP(U0J}d$J[I/"b Z(`J*bs4$UU54АQHD$4+PPLAW~,rjJea * U +(~(:暞^/n9],L1\L7}i\߈g+;}^'gͱW(e_#AsRI%ꪩ)ZbXwރN!)d) [x3& 3+5V/g^ C&ϙCy@) D{5v%ON"V"EE H!?~y7@20%5OʨoMCzۮҳᰎ"峭Fl:GAy5x(*.ိb Kf|J>.D@%Y " Ȥ1^h} eW,H2Ҫ4 'GB!JC3FPAcRo#2~j,ښBJYitti#Lk.ښzIpI8瑆I>hpS[M ̚]FfHˀІ!ُ@ݻF1mWv ѭ]J:Fnlʐi M4"oQZh @!L` !#rLo I.IGPk2x\@(ۆ xCj y$PCS36⊣4@qXhWCx4s]̈́mM+rLʜ34+ pҗq,S>L,6XW˦fbQi8IMx)3|sHFHvm}L^2YNՍˌ,H-7w&"LR>3Y6DsSv͈H8%u D'>U9I$"PQJ42 J+bPƴMRsJ'bdc.*nI~`6zs+V9{`x'izu<Ǣ z"1++j}ő .`sZD8.6հtGoǙC<2% ~HO˔@ǀ@d.}@BÎ>Gu`̎eOk%(4=\"X3dD1UD?O0@ $oK10E%Qk^70lnmQa+'HPWsm:".PVSul?@{3-s\Qcג8-mhbNp,T 6#Ȉ9w〬7ށnQ-=Z41 ;Kq\T5,f(OC r뒚 b rS%AiG&*|'AC)(2Q2-nmaE+AC>Jn@%Q~$EHrA PHW4A/#U*  $'Y4|OX cD$ADH fڵbZ_&lcQŦf*Y/!؝ 55l"`/8t=y?q,R!m !~ H _v()A K o|CfffJQȚ6s0 3 G0d(vJhʴ=wE'w\Hcǔ3{ ܅4i{ 9+Tht~zXP#ʃ,zT'=hD%+?@  dH)뮆na Gvڞ0yaBhu Z\ C`R1G@LcPg8g‘ucyΊwwz`0m6\eZꩢ"++ɦ]0HUh׿ǬTk&Ȓ*U< Lh Y 0-ߠ+9vr$LJp&f\l;\ B &{c͏`4x;b c>{ԃM zRRM50D$ISD% ăMeuѪw =hjk!a)݈ Mo72%F)$y #Q|uZzvq.UE<8S;Z" L !XJډ04P{o>RU!x5',Q.G;azy2kLAgj$d5,}<GO*,'5Ȱn*isrQlP Qs_Vk?T/oYgP,01`ݹv!Q L{ۻ="sptR`΢bPGm {4  `4>Cø Dv!` x}ʀHT. U}E2,^Tdg0,Y4:Ɣ<BO#"̔VXێg ѡmZpY"d.7]Z&Z:366!i5 G7bv]Fp^   ^nj|cd0ť< ئ1Xmt]6 Ϳj⤒s!xPuT7/D4:˖K,@6j2DI#LԔ%P=ZBS]*I;a{z 0`\.>W(u%)C/7+r'She0 |r9 ՟08F+E;oۡs?9(3 03 03 03"~fd?zsU3b {>ëYEl@/߃kZX}Zz[z^W!g +7'G5ATW;۽6ܙ$#IKS]h !1a6մ+7 FuFOE-PLmr;iTv#aL'MRXƎ,L4)Z-RǬ2,:&()#GKLH3*{o;Z#-7;5Փ! |Kô@MS x [Ol1V qh♊6zҍЫ ZPii!GZ9Б 7KULiNo$ѪC C7Pe y\y4ãp:K"mV>1@TpR'TδZ:nI YbqiPfiE54-$M$.L:=60zѡ- hP CL@ !0KOe#eNO cJ'd"BPBOc^ .b.4E; DX5E! $l&h dp:9:;[D|?LX\4v5l."ܱ$9dz=~8)[8z\#!@E.˭;b8m0,evARp[ 1 d6 L4&Kx. P_%S+,G\m*I d^jhQdսX;~ր¬*5(+VedOrZl=G_IETR4!+ $!*0+!!+9$|~I{p2G3sr@G>P2AhK3a6g ֤̒15 Z~?3"bzh O:wA$V/!!OuZ{q6qi|JIYԺX>mf#+0D3z+5UP R,@$V`-3CDmCPJ,†!y @>H fdH!UJ: ,q\!M @ Dը Ǥ&@Oc".LKd*F?NR@оK^.\Ҩ <5B*5H jٝP<g_0CJQI]HяrRr2Q%#oQ!5DDTBK!3sMS〘hX`G}l_l_/_#ajXp4_MB=[݆[ !St ȄHF&Ah N]hI Xh|Bl8у b4YtPrrxY Nrt×AY,.)36R@v1V1c_@?@2VP A&wHjB6@J9 sx6avَbg.̌mie8q5sc!cD- mP*dTeFuIc`r1ɋcbLeY6cFܩɵHLeBv􊧡ԀMԜ㿻=Җmx )[n ͫ׋e0"o0xfDwðW N]iY%gqtF faHb#63ٷ8ukZO=+3 \c<.ah8=S4CŨCVBO(: YOS2L]1RZYwX`xaG^HߥGFT:pmhs:CxC}>]J2Cz{kx@jS![#ftI//r&! +RACj$:1j6Wުߜ5 * t0Cn!BGjLᙄ+vުulj[f`:>P4 V-Ss|֋ۨA](W< X1&]E{=_yĪ gEE(wsfElj٧af(uk\Lg9 mU=fn;<&C)@ A&JbGv` 2aKj,VM;' p}"x8TԸ#] bCOS䍘gY G'RuI9P|:+JjaHNlKFe|X`Rfp9KjXZ2.6&4`1;@eLm';At8o#qWp . %6ҜFnS@ԣwk)3(,`l_|SAe" zSo̳BH 系:Ee.%cB5xT !XRi(U6nY Ւ6 c(7^5:!Mpw{{Qu*`ë^vq5zm Cc!"8%:KYl_[o}샣jCillqUךZo,Q 愸){ũIZA,H5h4%y՟}VJ6`TY SwٲI6K)/8!1 |72\˵\8dPϳi8" .& 4e3N]MtB=+')Uzfk:4>!aٶw He5@C30AJlޗ\NF14ΗlhJ2stWsDrDPZ뇘+ע1 x$ql%5qN%qu|\a#C6 QDuIrQ'ձ٧ʃdQWGcӡzGK2 EvN8kV]E6G4mDc#I%ARIm6٥;ѽA*[@:GV@>!!ZE x'C:A,CB i oH6@D6 *6!!҃Db†dֈn{Y2!L4>`u$B-GY(MBD*ڧ#B킱A R6<"Hp|)!%e;'(3A2 ҈!c(JH܏Q]?*|hi-tR2|8fܬ⮃Ǿ P!`ihNp nTc8ƍxJ)* fR B Q 1RFJVj ) B;]hb>Ǟ>ҿ=K 8.-Vm/ 3`ӎb^9\y\ !Xp(*' Fb7̆FQCJC؅XA=* Q1!HD1}! et9.jz ť_ i=-RwN-lALǀ!=ԭ<$d^@s/}kB$>6[`17(/_h pMDKYA B"Y $/YOGW! D_7q7> PPEPL0$e& Z ICɸ$J\x\ v'%oh1bBmSM|EV'Dw|׹a) & -)S!@U&l.\R@h" `RK@1T"M nl>{ÏIOjW儈vb87BQU^t~c=r$_Z# 1Z+G~sTnԙ jC'͉ ܿ  :֠о{bR0kޡfe,F"ayc6AKf@B"=;$Eo-Ei## xpn@~(V >=PTG V`z3d2iLRRH#'T;s~÷d{@ G dUs2Cg߈ìEmMz.ױ䧷0AD[!g,L J M6M *Y%Rk1wBR R ATe2d7|8۴R^}}DPDD=HG!0u.l< H>ǷmSR4  Bl9!ZN~tzxz\%(~`~O^bKM+̡aiN`\/~ 3T?~y 4m©̂ZJ;SGp Q,p(ZihTdCgU}ѶBqm2 %jh6JkM[Tim6 hqS^L\JM)ZaΥ&:v?iFB1.wf.:_t]$,11Kϩ 3.qwyx;ypTW5Yi5t\# sQRF2UӶ(/IYd &S0êYL`y$2b a@a0fhpc#X:>W9 7C1iмkQ幧ԡRs;wwlj^3N0D&.8Nckc8 aFhbB: \SC 'sO>mt  )5ZGCyYOxژL/mڕ2`D8ř# O3J ĚwAT]]Ht 5ߪl50ׄ֓y3-> e+r)ٷ!8-85y[7g8)w\pcN 6YɦQzHxoqn'VWy|o1zy+Z!:qz͐ٝ{h֋xO!14A)x,h]B!>v2UpشW}^18.יv(s).Bdi7K6ʳh7{c2s7&==VEAES NFBWxhCfӒ EE1e5Jh;KF{M[i>MibK*w&˸;Qaק;m=mBhrH*$}t2NjN"lnzPy0b,PĦMx7Do2s"P< dhC8\=޼GhD|F'Hb6\/-4G&i\`pnz d2L[0rH Nk^j(*x/S8X):d7ECDg6НPr[(zؠnE"-'> )ڿo8|%f$6d z YQ~F/.Dz=RA="Q ɴ) D"ЀG;.)bىCl'.?~{q!+76iuDH={_Տ^ A0|8tn$$#F6@h-h K"B0ET[ 뇋}$g^/}D6UN G5i'v{Tsf3՘l,^m| !H['a= Є$6؄{hx*^?7;5l&|AwX1:MH<`UU >u$h+""9e5c~6\:HhN`ص:]C F(WJPCBIH h.4i STCJPyW#i"v?FU;d(QGGF8)HD0j*b# $;UzrvqNaP t 1Ղl%yŀCiDlbFKֆ1F, !|d3"=$ݸ?Tzt!_F ry\Hb Bwh7JFM/ÃX6[m! d|_}bBL9tA>\2\03,a&epVd% 8޼M } ARSB*8O#7wU۹I3SS@9\(cnH\s.o1e&0 .B\l*͒PHf> t 4A(_= `t ox8% ̧˩I"$$iMOGRSmQc5綈d HxpzslQHEjj? (``U[I|ϫO~X$ @(0=zr%I,Q(dd-k(ǴC:~')GXB UkZ>&$H<;;؈sbz@~0 !vLK*3DA<@Õ`IljX1Np rsriB/!ǷFD3=!1i$ V!MfDb\(&: ƅbXn$1qhR{j*؆LY@O(qJfG|HST-u!v$XAǧAi/}_SYe =a``axsk6O冈_jsnx i>|﫳G'Jd@ѧcOI:bI&zP̿?\ڼ?>}O3qĜvR E@@AUp`2v crbF0t @ߗ+1HDJj^ń"> >5_(6śZl?r+Ȭlm J?aE1yB{$v3]_[0=Q9ۍ'h_ܔ4S|BƺRd= ȧ<~wF}EB>6bQJ;-*H6C"yKXR ln97*Yg&a0AR?_}5Ǣ>e*1@;j}7/ RKIPUR55DD90L3Z$Tf1UDAE$F`A,BS 0"B KĤHDRR((Pm;{Z*u&aDBc^j*3<<6GDU,`2 C"/oEoaܣwF H:%9T+r_ta%V]Dĵ IT2JRzIs:"p! iC86DSƅ'm`va )Ex{\;ޭ -U9{t*L,rF@L#/[`dI'2wlڄ~}UQVdSǽO'y4>`CQ+K2(#M~q+9ᶏ1tIC ykUH n~USp:R*Dmb; !s/9KH\x]bHˢC$>b43f8Y.ۚ@*W+֯$<FOyի^+EACKLCQ4r?~yPD o>avSIPWfgT;,IȬ֒k8ƹ@QZc5"K#lCȤ~8 QG\a<:1L<[euiBDGrn]Zyx#[8m"[$ߋi*ѣę[Ui Q)NEF1MT؁wXM^ސR5BPy+w׎rf61.`"u2V9t`^QLjţW{CQ5G #upb#b;1šsYpQLAh1d,l AKҶ4,@EЉiդP?8;;ȥ E8[Q5GMF" Hu kLjP!IH$M(3 ±(E BMLBHU*Pv Oi=P 1~ $#D(9&SZJ.0mkiMr'p0CIUFƂĤ/LD$D|t#8@Ԩ?O_;# TQP\ٚ8TDYTHB*cB>V "6 #ýC"ܐc Xy]b,@# p@ʞ> I/ 0!hUE Dڑ J"aY"JkE"=V!Q!JBR֌G3%/.?(i)X 1؂na@Ee@'?K 2-Q ^!0<]i: l{B#1&u)<Sh ac϶\+k+"&dxSqߟn'nU(_yJqC$ߧpl1#kk6V2,}2U@7"+fmC{t@C"Kxe*ybgԘ1e{ZWUudt335 =]K: mlp >D i'PҒ/?^̝1 l[\9Ix0XI5k'Efx3@)$6RB3!IdkT#(4Fvd.&"7c^]qiDXl!aoeck];W6M2%S3աQ|UG*,a'N$j:EZ:z\,̺P6#8m96SLmY XӎIQl3eL̯n7`ޞ;2@aD,Jê!rIAakY27k!D- KW$l hFuL|bOPK YA]ψ@nc}NaB|wR"E(IMk~vI@2C`[E !cHdЫ JUP}cdId3< ҫuTXfIaܪ!N63*/TNwG‚,.ے:i[jfQp\vP!S2 ¢nyJ)n4ED;݇f|ʟrjfJ[$NZքStGdPGO o5`%J)B)%a*)b@(VeP()d)  Z!h)hB` & .&`(&J)@@Bb9*LDԤJBHP*@-MJP % P@,+*AH 3$kYDEڽ`&d@4ntP2"B!pR`ȎfE^+ɲ,~&Zda4 bZ$X!24322U4k4%Hҥ!g4aBB;RGј%^<DѳYF: $:㗥\ ZifӤl4zm~\勑Ql"F1)0Q%h 5LTli*h9k|{챋d(J%kÄ_fr0Y<Gz)1g F(qH՚a ˌu N.hV3>Slc_?")s0Q 8O==APQ|c0ؒu#: +OH TTdbSG9b[4/73*!}lH2y_A ^} Di.%I1,TuPS@t^OCXTq`y(5D.!ӯ(P a@@Pd=N2 Ms8@V`Ro^^>j΂ cDP$HDC "}a}?*RK ~ˢU%(UhP(PeWB>4kHIM34]zJ|M>1C^ ߌCgzIdb!V7$['{ljHn=J"BR*JA@;HRJET{l䠈*)B(R!B"5ZBUF3,@i C/$ԑ 6SmARle@0(b'Ai+uc oa{m~kیy9綖H(b|_ RR(v"v[!`F+@#![$R1Tg$1ĸnj!J' \&)Qw\͢f F/fYlQZ4|j#eb) ='T}Pq#t >0z=NmT'u| ~6ođ)¾}{{&`&Zb hfipnfu0 $eڕCF2BahV>@@ή=5!kvt0N'1:h|90pӥ`CFY=>6rKl̉~QY?bYv)jޱ7`5( \diYheN¦ڱE#Im1b U=J>-cf4X0J40KSzt8Lɜ,lkStCyhZ ]D1ZDM7anV*8CcCj1OXNB9ITədOF¦XHV#ևI%ܗ̨֢0D$L>Dbm(w!$%3,qho`CNƱ @ 6Tfq8{k-< (Ss;g6 36'^ϔ&#ΡYӱA ˅Zs;I8&, AQ^ GE )7q${YQnY F%~-e((`JHL(ٴL/D)F\LٲSJ t0@H=Iĥ4kc Ω)J #f8N%@1d[*E29` 1JQBOąkO\&qR4i1 aT~-s mgZ#Xbalb km4SKOFIAe]#f>l'"Hib@.Q ѰeDKA3(IeJj9ib [X b X\IfIdb=ϴ|a d2Jq )gM}F h p\8c&s`i19P=Hh0RIsrg݇- MI _jy >_Ȁz4*}xdu(j#Qs @uT(Q2gHs&!N_w l}7QTPPRq2#aHK2Oˆ_ѵ`yHiupj1tqJ( (gm~r-2TCǹr3Og\CGFm1`-mm6GEp"C/%A?aU{CΣ@gҤ!_|HМA e0@v!%ŔX4:kC8b+2ah3T$ C@D#Dd pfSLRN.sze@^:W\B=0hiZ(iQĿQ@koO$Q6~\6}8ނ H|!5LUNx} /04@$=뭠Jd#^lYOSA:TeQƛG=5Vt>A}S`@5 PD$$xa1 srcmLPpt ?!IT*+T%s0(:~OLЈ I:++9L\ً6' $"HYq"Q_*c{ P͢.?O΍O$XIUy< GL@"~b&" JTnybUuDHPh4EHNt–#3rt)`H 'N6z24511Dr@(bڴ 2;C :Qdli :& LKߢ|H ?~*/ HG%Hv*: #fUZ>xmssijbCL#l R=kXcwil̃"!%{L'~4>f3\ZhO38s#V鉛tD8)glq;fdʮ7 Zς1 `#U JB x $9dIF䯉[wfJcB`Ѵ(#4 ϖpɰLFWnxsFv/E֐mٽL^$ae}vy^[PT'1B74 Li4"޺u(-bh o 5y#i\yx@UIg׎ XN9ISd H>a9(Jhh<#Pw=캔 h-22A\8nAPސ h <$䡠Xp.G(zSD:"jcֲ)Ʊ6яY eoG3$OІU<(gc1fsW&lVmBg=\+ʔ\(] hLsP¥J&q8ACſ= z9qF3FQw{֑wmi hL@JB , +[$F@0Ck]=15:n0Q W5|C*ą?:ce8.;C;FȺG8INBHG՘3zٍmH}jPm! ,ʛqņ`΢e"kj`<AHh8h|#>?{pЉG4S'HB?0i "VpNP*~|Ă;P@(Ê>ł53|%u)@>9!B4_N oKJ{زKbF@U6$FlV?h"j**Ү>\A]bIQUQ@5fIا*"W|mOH4bB~ELCȑjDorCn MeܳyS,%5! I$CDCΓKJIvpe!XzRǪD- s o 6!5! 7Bc`$gĶpTrq,LOT$;g6~y~>%<%4TDRjG3;l'wE| Ku:#~&r)5 RM}ۢYjA,NStL 303 ȤL30o#ٓcBgM'6T|s_+TW*G#Os:ȡm8?>@ {. 3{9/H=´!#"HGh4bxv}a2)R Pt47bۛW%"1Ce)MbP+=J@8Zʦį !Ht+H&4^Ƌ]HHbPT eP7s@>0)WQ@4TGbLe|U2 /dD=@ V<5Q ks/GOSFHw1c4b709:h zN! lI@makm+ZrGfHW"|W72b1+A}\,U)GË8SIdct?Q w%0#GHK9*6 *1B!ꘘ>튲u^dL"B;,],2y`6@ EPb=S2bѓeb+5{QR}^6IJ8juG ؂KMK Q ESfD511cj"CUQh Jh(Z6l+Mv?93+-U~$hvXvhl"XiibxIR;cFdLi@!!^``(qn⓽>NB菶Kpvqض0v%Ւ4H@0J2.\Z&9kf i`bMtbNsw &oƥ~cjm!țvq *./Sjq 腃N'ܩCOMA *P=TC׶"&b(%d8RNE) :ӠI("ōCA>蕯Vyi D4+ c# - m.5+Gp9%=lM)D  $Fr,] KH i n Y` f@O JҢ1FDǏ- `ˆd :df `U P1B,z."KJX/038!Ҳ,P$D{KUT$~ƙhO^IM(ДDC2QPڛ`$a`*-0AH"X)NFs7(1c҂9oo!OW>|PSU HS@1~i؀O@yꚭ3%,(Aݨlz6klH-Rdӣ0~u@$AVF1NVpz ybuX%PL.S (U=lʣB4-(RЂ@h sHD4;a3!IϼO0hTQY %/td3\5 G`LCC }t̜󨇃w*gļC#Po$< |^ 肝@0$@;%P< 忳ԡq~j(D0ЈCi`I"rM0_Q,ɤRHm@EoҰ=<hsQ [+mQKD?vOѠ` hg S@HLT-D@L†i*wLD +>&D ErH84 qyå4* BhC3DBD(3UT߅3P8T@P9 J60_G8ÐܮTID;0fNvw2iy(a=m.|KSLJ;&;%HPTPI:YuR*R- xCACD Nش =ރ6An9J Qd1heFSL&L[뜟~pѥy&"Cu9 i\Z<7#/M;iuۼՠBv}Jp,Q{z hM\q 6X20 $::eUulYz_&Fc/1ԟQdS~O (E҈D@M=.M sAh %4PI4"s0L@m@@~%$q@R`&q%,|I} & >Q ?ii!TBR_Ei!ZB()bPAH:h)R%Ws~;/eǰuPiP?хS@(!DYN@ K膕R@/BAqK!xa"x*w3c45R Uic OU^n몠TA킍 ":B_1"]MTd YUOUE_ t#"$]@ˉf]GXTy '@(1rIHG|uwy006T1rpc]R \ģ8HY7M L9ǖ> imR&KZf9묣qmȾ㩷ƈ !ʠ0]˖-PiF4%~0Q-lA>Edmq\#vH6ơ"h$m;Rg7615*Rq S%I6#@DirPPlmOI%`niDÌs1A3ZnÖqpؚ6ޓA2 F#ӭaG8yy1.STSi4%c㭜U2#wB@i O&y7hH ̀5T䟋'Κ^lA2DA>=c0uEp b8Ql5?nFsA:&Hr6C JQ!f94rܞsDO]AѬXWo L #0 30BA,̮qeC ` J9I=c# R'.fkMҍ+iRVzr>Sf+2]DxwLJW<ҫHM *4qG14 e $Cw#.AQ OU|$ "7RiI2ʇ PtRJ>y@jg_4b4DFgePZLHE~9A9R+2.uuG>a5)-4T@TMM$R Bfi *Jj$?yM__E21#94ʤ$vK|"yBs!RHK1mF iCF#`ldhb%0"8?Q)@ }}mPQ~I }Ai[YPxة*t\؛zqڼ]uz|Uam[h"gFZgZmPAdлh" , E' 6w7A7 O\ئiq.S?ީ}\h}ozI3ƄY pdI w"glNҒc[ÆNms80RALLIJ E$ ȉG\;0ϗM`H>X-kIA6VVKHFLo4#o 4Kz$NTNa(BJ`Ab| רʒ/ h9zOTh/HqZ9HFؙ0~@KqW*nG5%2b%Ea~>@W NzsR%c H;-fե@;}MctgָDdx=_j4;/~O+HG:zJvÐ{RIkq8܉ -4Q x:@Bl a,;!n8Maŝ2B\:q1(IVD0X&&3lLA~Aܿr:x8f>\qLZ* !q! 6jHb Ӥ )tKZ/6M %b"^#K2Â1a5 ş%f%e(qG7*tBvf<\@ߣR֜^I H\|# u"7zDL1>:Jp.`DGا=0P#0¤ [rZshA9]lDLto!c k H Ch"ys.ПTD*|@ |V X9, P& 8aQ(?6-(sèf_ X(2nH*R }PC&aaa0 cɠlaPnB!oY,g/B3OrrzqKFmcdS3D2BT,!R$[c6KI!7h.=5^$#蔃U@#5nhBNjp4'C<.0Mr JCvYSŽ GB#녉8@ʣER7б,k$ĹiXFj1$$Z k,r?gB?;?!?2 2'?N?T `X fH ")B fha*X  `z) 䄍YB@#!חc9W6nh /$F> 6bI`*>j`E2^GkM$ 4PacQUy x p}aG NB&eF!#')02Lxͮ؜Uh* ().>U&ľkF|9$\BS |~^)`(扗#ϳְ(PRGrQ'mkMy^g@%ְ2M$v33ĊHl=e(v EAXJ6tO֒B3!@TM3>b;L?KXuǃGӉݓB4,Щr,}| T'Ip:P{aNl?AQUtahbN?Y$^ pcv`;.\0z%~+W|5 wJ?L*+?aB֒iR/s<Fe'@8L& Ä`p0 009 Ti>QDdϊSȢs@6&tb7#{q+Phs0!t0upS?{=={PklD%MŜ@_'OoEDBRBݩCs D04ETRDx$q o\}BZQF7̃C&QXx>s'Nb$C-F0!,}M@j3+L&! N'ͻ*HR vgTէF6=C9߲wdi"R`_4?oݎ}%&Z߅y cO,KA2jђJDPUW0ੈmу/gDRm$vs֟2R΍??7 5-wzH(CS$22&aYVF9YC#戭&C1}(b k(dz8b&F6sR))gRQIP|4PRд6ƬEV[: 9B|؈J 9 A%*h\%ͤԧ]jyɨ1:rIJPQQU%ꄤvJ X I3 Xā5)DSLQ40R(JQ 4Rlh(H"g%wYZM4ɢjzYul*GA4V$"(YP(hB%:Xڜ*L4DI!EC zEp&>&5w,#0lֵZM;C#P+'zHCp!8f..z4Sk =dD>}dP XhkFe~Cy0*?J4ǜ8*QR$(Oq֣y#8Ð3]f<\6U!R 4(KeA SiFbZi偽 G&<,MG+&pGů9$YĭT}$ɦ* AƄ=cx']]'PƘbS*(>!lB1:ޘ}$MIya?BuAצR'h$N^$!~/M"*Dv#d[Y_'+DJUD@ IDJ*~Z ,HE%D!`bQ F'%d`P|>8ize띨P{m$uRePe~!!gi8Br^$IE9p7]](Hw_z>Ҟ >kFZdv=<@{@* <Ɉb)M7AyXC ]ż*&`,*JRQH|҇ҞQ6r RtZZ6CCY|%M쨍ڥQ&A~[:-5|m*38t!A WdDD!_hō` HQ{!3bĺ.!Qhb1:Z=D A+3bj$4j24[i11`xLl$Sr t. CI@B|d@pPDDY2;eM&dV!*)&XLEA%;T"DHh)) `BS$ME1IaPE E+ hI%AfR"$% "?-OF ]s E  &UwxΓlQ6e3E4@'A@>hH!FOEW$" eەmqUt)9P'j( 2 Y )!B)"%y!iBfԯ25q5gSsjVӑe}Ssh25 2gg:fjU3 ;A^w!aj36ԣ% (fֹtBRQsR 4;ts.e>4d`d 6 V̎{D40AK^9qF$r8FtH!9DMWELZ|31' 9-7nkܲ*)Y:;F씻Ii$!$@!Pwt++RR e1 rgQn/,𙐕 "mt΂W2 `ɉ'g闢Iu̽,ʋ^֗MCH% 3܈jxYSG#2qga]Wʡ0Kk{KNMJLP#WCP3`+*[OWL9O-KA*f%9q{dLGlsZç3 K hs83aZ{I`@*Q- !{(/#'&p|ߌZlrI^KeDwd 7'9R9dg6i!7{{9Qy}7U<S.ik@LǩS0<7^jRFT]KKDC|=V5ڴ>j#gmXٲ9˨WFeyZyqVғnRCJ{:M;3[i/`{],ޱs'!Z "} Xk#7vtL,!)n1pLBb߉XQtyAJ%eÊQӭhOgÉvwDRF3Η gf*}X|2=6qeǫW8xtdw+W^orwi<'_ ԒSQuJ5EI\d0d(ͦE%fj]/kvwtM[t|2mq8K>:ng=#/cºňKu{2Zqy✵1]i2`p&ʅoG" 淞lzHM`}_#;b"dpCH,n c#HKq;pӕSyCUJG+}JۓwKCDyjy\iSn#7ZX{ ٰYֆ#]KfpʆAai?}U !\@D1/EòzxN%]` $22T K b"0ZiZ&o{uMۮ JKzgweA)^9zxv6֊#gęy&ukK+Sz&caH2n;mLm cM;aX3nr2ɛ]̼< EQ !M*j DƻYecH3yˤ CnnXݙL0ǷYr5;|99H-pWӿ] 8J(f)!IwM25HR2" &&JJh)~ D hRA PGM.xA9( uN7B2llnEJvqB ."ϋNx0MA ;+S\;aCހHHB`F$T -.1ݑl='2\Oq}"P4Uxᲈ5*ieI+0dU3J! [l pĢִŭ"uFÇDo)U<؝am6FBW yaI==Li7i'!00xO;b?Jb5k ~zص_Gn Cc&N aXY`/}8E}s3eexÆf vAs'8u>>gϰ0|s9A1ęq7{% !S"pX&mC\8cn>0'-8hRz99u%@inItZ*NA,4\Ťw`EZů[4d&pUD˦UCAѥn * 1 4,SoL"{' h;p6pZ ǜ$5Fk%52Wc^ܖFdF˔5M AĄT͟T3kc7FB- Dl i8C8.֡fF$s"4'0 HCc{uLD0*M-p0>SԊ(bd)Ή7Csǧ z@zl!"INgJc4SMUq+{w[!M*h~E ]xC drҊz"8Ⱥ~ P); {_&! P4zEļ\-OVVcD 9 I .3}=c+-KMD_TWDLD_B,Ȓ$@lò|w^BdsS־(HR Sއ\ȃ%Y *Ԩ !)c$II78^kMၙ+p2AV0pY2PըOQWDQ2x/ #K̟P51]2R!j_KDg~@>`zKA4ă P:* nB50SLm--pp8ETTK]f gS("ck8C¤@1v0mU` 4x#e":3S-֗\`@&|:*ihW< WfH8 ZL-hw^^Ѱ Þޙ1G$~ [Xzq2 bJ&u c-۶l;Iqn4^dr&S- zYfG8,`Bfv8ц6. :ٷS휃#emZ,isiD t0H`OeYi!f|H:jO-e2Ev1%+.&Y#QL[6QV0Gkn|LfdgTTJTb]DSTĢRz 2w 9,yў}Bws{oѩՊ' R[p2E6t3M+v7g<X4 BUJ@4%$NfPĒܳ[cjwJ̖ad,bBACKgDg35յWka װ=Ә\ h΍0X`Gs r&sySȕaΖdjw%$!RFRV E=.:;XV lc9:=kaL<.p) 2Zk%33&֚jVAN/.U+EmPR),ne:NOuU'T2NW٦ |ǸcѬoeF>:f,ZbذV\i#.\nSyk۵r[drx P2ҥ$;B<r ˆ؁(˄sl"84Đc̶^p!#5 +ѬBjbUhxL8g[OZl>Xro,v m9bI$$2bHζXH6ddԾf\^wxﹸ7󾜪.xoϼz CK|mA YXXIPH#EJ!uMlE78KDa@(![0]$`A=(W_, pԠ"; N\TL=$egb(Lh(<>yD,QDղk+=6R"Ɩ6;2H;0y#82ODFFE%IcEbbCe&N)Q;~׬8xD<]#]U4ojϘ9(̀X)G'@e' j4jf{#pStc۲UEt<eB?8>PIBN&OBFZ J,EDyk *)xJPCRM *`[@EDED< { (wwC#;,O&4*-H5neKiUƨ$^1!nPR̗D] B =`\H\"ۈvoFF֌ ʑ(s܎˫|7Y& x5~d*Hy5(}9TW ur@ Nxi3 `A 쀭AA!椴p*dVp{󇄨Tu@9*[M".L1|9Kp)Q[.-,t뢶#?uc4lO=Z( "㞠Q&Sb@ )zpjgnRB)y@tGF嘎3gZz"V eR* ) !YD$B 6Qbj&J*0F&1Dfp:On"9eL-BFL̕-lrhZl*B!* bZq-PtAZd+fɄpe]U;cEHKA!^9AЅ%F$c6 8X[UƊM3-p9M$I$Rj㿁&Um+:Tn#똢 m[cLNw4l9Buppu( b` /ze7oSBH x$mhL@D,y5MJRP_ǝu‘"I Nnq8X^ #Hd[8z#m $@$"D ?ADEȣ^*غ=ak[nMsD:f[ p*REIBDJJZA YD24|, h"Wٓ!qCl"JJQRU(fBPd!?UO2DӦ $_ib<56dDpz!=o?LÒ9r+攺hG*j\9@X"}"(9Q aUdGڲuӃ ]s!GrFs0(?Oe2o+=]4L;aI|$pD 5'l!tɆIv-HK^Ē92T!Z3X@F3MUjڌ;g gFG`K6ehzȱ=C{ 'fsĉ[DA%t6teÆH©7YH9m֫]86;G̙D&16̆k(7wd432*FћfS C Z& t9#>VH%,ևWVK RIvWhG2.ah5ćTtYqF%΍|{x"߈^T& ѲjTA[Kfޖ&8F ^3*<$Iј[P(mnEh(h.""2op3g )9D!' A1ibDtH@si`!l"!! Qc }OJj)K$$q IAk(.zr,K C땑 l f鮺J#Omf8[tp WHzC8TDUݑ@^1P8x e5!ti}E> 6Wєi[@9H 1/'evJ {_ќÃt\8#F dt@BOHd`a8t.NyVFGGo@>?CRPId$ - D2J D=clw 0g1iw 8֡Z!w5cAb@@46͘l`yaf!$;+I hAaPR./pT ꊢ~CP= !!Gc~{pc_iR'! 5ACJP?opDaۖ# GH<@@T',xש׎EA P! y$^46>E_' /RiCOM߅8耇 X "l!KE $TiJ2sx-TY?sP75QA(8S  /@$XA$@u-xDM hy\j +~-;><8V&o?>HcfffffC3 cM41cpF}x&Q'3|1:Q(օ"C&y`<R`!"GxQ VVjo2d*JYa3]4 hm]~/1 гv7cNzB 7s)Ex'.s"޷>7$V!PQ e3ݍl!sh Bl? OVcE 0@z({ȍ 4=FʮH ۬#'}%18BJIઽ1SЙJ@$Bwel]aAH )b}vI6Ì0 @: ڔ7^ 8 4_AͶк݁ ~C i6= H`3%&^O;ogc3@AQA DMDE5A-5)cƠp 2 +" &?ߠ?ߩIӶa!.WӴ?nJ5zj3xTDJb:he儀`- Al|  #0$B$`$`=2>ϥ5RD&[W$EKNC`J_w|T12:ٙq>ÓF*l\;s-"`̝"#JD zJA-UCNn#W c DqM@ӬG:Fwp*$Z0E`0 Q|xqA1UM|7q 9z]*q(JJ!* i&Z" %.=K_)Da"h;^4nˑqg,9h8fh$wY:AA xb:1 !9P:ā@=:N*eD'nkۙ@IGDRST F a(h"K(]&64,01:1Ehj-x8KTcE< G-`k0O 謳pa(G! ],^8tz!RX=^98!h8 `2$F]ArRJ  ^q"I~T ƁeBÐ ^ d@BJ$0@04`XQx(s̏3? Wxio=5~ 1?qOܦF733aq%{*k)G CLXqՍ$;@HǟSpfCvLji$Xztp:)ionygBu4:ͺWDl"aƎ0ӚV=  PϽ嫌= юJv`xٝ6`^Ar ̢ʫ&ͼP .eY/-&S])CСgFoN(>$0uD6$ ƱJV%q,gr#+%i!SQΰ?51`6vL=zQU D#ybs2c%`%f_n(٥޾OCFMi )JO^|hxxNJ& B"cBg\(7I`q(`kg$'vN:k]5S#l"#yZ'=)p(Ӆk`NlSqxFN!$ ^ި.d =I;xk0e8}@^@!:KI8\|wON B"h";*`潨qA*zkˋdY?g>r1x>_uf},SYiVFB}Kuo?p+l èȧDU~R(Ǟöe6K~”7|Ġ#l 0j%Q.5DzɃq̡esv Hl&LPy@1ʒ0H-P"-;Y,\&[]c-hB A@ݜFb4ؓ&mqP ̀!UH"J H4 A̒ևh1'e)Y[00On(DjW#q3 F+tieG%jNw +i$m59CcL})|5PPT}KUD0œIG*+k?ZpomdH3\UE4.Q~A&tطKNl|]bJe/J'YUM~ԡyBΊшP9:"coq9|w|`״wKo`}>!E{H?bD@21,P%+TDW'vw衽  BP"@# ATDW7'CAl?O?SO݉XuuoH26#?} !(C~tOpo*g;d4t3'@d_ޟ_?0=poA|w/?|8ߏW~1x"`0?e`+_zhw TD?_:>~Oo7=H}2f·j>_FbGzA _G{q{ ro|//u??{^G{OK{1AY&SY}cH{] 4Q^@:ݓָmClٶ[>kgwq!;1;wctCs1Ywr/wsm'f4:ـ+CTg@|@Xp:E|󷞼yVu*O;g^{ףT%]Pd(}{+ҏf(˹}y')^:ևwsa{_v+^}+^tz}(}:|\(}yR9[c`Ԧhhެ>ID@h}}sg@h`twB9_zgj}4:%@s<B@7aq:Gwsb@t2S}e#щw!rO'oq{n^}ϯG}>ּK븴`@w,J}>t $@B$(P 4u+^4 ;^y $(@D @(}[fPrzwoOM:-}>ѯ}[Twn7^j7gno%yS=6a|۷+=}}]OW5wLgSz经x 6vlRk+ZX.h}T6k޽4Zws4mnw޷u7Ws{zx*_z|I=`ۺ flr'ggm (M& 4;jԼ{Vި= ^^w޻FבtYՃۻ6z8$ tCm[mYj׽Ά{Qv7XnSM[,4:j(^Ƃ(*vWQ2R{eqWC!Ѡ{ۭ՞Į^m^ZAHYѡozX{;)Z{6/o{ɵ{|ӧO1˻K^J>"4]޼y۽W˵{{μz;7i ,|'s(+ʭ [[ Evm%JK/ݍm=x`"/]Gjk)f7Ӽ1Vfю1+(e׻tһri՘$vLYNY h{JaUO@6sglGMHRԐ;Wh$]]nfIiA(滴1+nv6ٳUkzuoO0vJzBn}}c}^nwwMUčt[+l+aN6 ^C;rag8v¦ŬNz>@M@J)C4Mlbe@E@f, TRPhjӶ{/u}jh=sw7ku={Ωvչˮݺ]8+t=uڛ>Ͼ]۹t}w9m[5R=];f:΍tkkr:IwXSW]8h@ &1@&jOIɦiCU0@`dڃ)IO&FFQ ML$h&T)mQ 14M=4idQ"!h 2U=MQPTzPOS~jF")B 1LF40y&OFDTT 4 @%?j14i06&4I=08 * = *+T (./Pl)RO2q|JH P!@&ʉ1{FU-%RaAsu?ݘyή#H ,4?&_wuI8M7?eŨh\JCk̡9&=X̛;]=4*3!wWFfu´!iG6R%MMӍ1-o;MZ އ'M%F9CٕbÝq͔C7ƕY<0YVq04ޟH]VǓTb9eU͋?ᒧT qj3bp°<S/g64AFFrB̹xwlFcqaY7h^2P9r _4_̎isIY SNI fVWG{*ζ>+dQ_{}Qh UHXP(;']t堀+1u7$%?+dȑ3hj|ɉENWio$).1UWƭ ||,}>4 qz :801oMX$Sb Am|fdi~Ʊk&dmȯU(d7N0 (3wCLF im{CNzgGaX;F\"֠Tq1V{J5fzY@}tTLըXũ6.-a$ƴҍ9jҲ8 ,7`QU+0Ұ 1^lg-{]hX)SN >]%Յkf9t2H[as3C_M92Q}UY峖s5a7M Ih7ɭX:LCV2eS?" ,i}gI瞁PCYC98эꠖh{CHBc,/o8sEjҡF d&-AkFwh_U4oLF)[8oʺڨW6ODs/FS{`jw㉴/MWhMߚ^e7l0ry^, `lԘên;O3DZPSd\|| |u(`nZsTTaXM">k@,nB<1Ăқ0P+1rhz3ơ.xM{&Fkb鿫(!ȋl1=pvz<;l=󁢿X?*O҉PՃĬD>$ *`b`ެ4#]DRL`ǽQ7hҍD[&9zpSyf'!Zj!a$nF B7tB >;|iyx.tD.Bӝyf{E͜kPgjfoc>7iTAOWZum/8m"QB0fݞb9[ŜM3{_4"֎VLfx5XXa^Yiovi|SF,}3 ߝٺ>٫1?,G;(,6~C_;q`탮 FC蘍W_BbuPGW?:_l5G?K4碦ՑWʧVh5y̢RC̥75%TJO6m/[M=Fn׹Ꞇln$?܈B%YGoMeMfF#ʼn,qOiM=]SI eVG[CQ|ApQ/)f{MtRgk{{SH̐|/Dz '!|s&,=8R^t>ʹ.Sy!S ;(/H!_Oyx 4Ƕ.Շ>W~"8w!S,CJ7U /P+ZQA9LVptW*MxVSM,9&O_zˡ82zsʽ'܌(vC4bW1~/1޼aztlŏ3jD,SLxm2T+4bDcoGѨq>|/u?k)E ]ᦽ豫k/ HndoA2O'WƚS_MX~[+_^$^~YSd0[x,[{.T&vkՙ|<3LD}I`ꞮH[衸Jү>Y?3O%Z;Y"xt1:[CS龷QdOWZl;%QUT%t}H dJ!Wj>|g3g.zXEXK͕Ez[|XE{ZNW AHG"z≫#h/Z_;[/\ \B^u͎Lfowvŗ1-?=H *M6%rƼHbueӡסLWgu&v/D<cyXt+IzjS'3wz4""Z&'8ެ*iĪ'6lUQ-iuiq4uWhEQwaչU_[~n)dAKzQrZdMI qs\GtnG5'>EJ[ܶf-qBִEy!KKnlL_^xc](##[Ec6TC﯍nY>:i֏YaXi+X HTKhℵ 27B>d즦U+|V]qQJq⦜ Xo2qش$BըF4ٺhgn.ӆȤb)u@v7BҦ'Z6`"b|;T.t+@qu5Dqb;&3{{djf^rut,F-nUR,>vGq$rw%Q;Vk.I4Zh:KUR jF԰c4I՝|p^-~{IS׻o\-h*FqrxRb΅^?CwX?F T7f/^yլj)~0m $趏 mDQ[nf7}\f85i65#Be@s&k_;E^vg0|s|옛ZH:8uje XLqGZ,jG'1b C3mC1\cll寧sLD}4AQ)S&!؈|ۂXF##vL8C257?x>y{yz (M4Mi1zyO0}:O 2bD x'R?2v0;M=)Mj ZHFʶ49 ̄4ۨ1t?ǑZ˓nj58mLǒ׹1Dzt\h kY :NqGEY?,|{*=!P9ӿWsaN$/ MƿKy%J4x4R~l=ۛ4̖*'^>)}QP&V!pOcJQ9h9~&0FRՙD`Fh3vXD$@R&RW605&rQt@:BJקM*[&qRj1=  > U-jJ:QhȦ4!"_L?nURKrU6a{^%- zubyfotCS9O%!JxW,I-?qD!&3.RoOTQ BS639ƒ!6Ϗ|xC4U`Mof*D^1VrVjWz^,ljbTAe0<;djhj"5 9-شE3Wr/P[U Q˴z? &_%f$ǿt_kŢC/^צ4c*%=I9%?Ƃ:_hN鬩's")]ટ↖n7&W+v|!4_Zg&Hw`f,qԀ0&wN/gd}[*c4AYh>/YmcCnK-7ܼ2a&pymT+sG0Ye-U)1^~\_|9ls)8'!+|-wۍo'2}Fx2s7CRnʊ F mػ^XqI1Emf_?n KPVyON̔;vs=_E!]4oo}RgL 'BGGƹy\k,s-5e4ƦpŎg/ qd2ݕKejd hC_u1L<phlsd3AF?풫 _p~QK!w =bWN3^d#")Biy=LS<3U >n?ϊM=H}3~k05n7 Jm3y7 o F+>:+ec8r IF 9ڵZQ`LZ]UUkR5D=*tM 1  GKgD2w>ϻD)њvN \S|=ȳAdY\+qWuk01Ws%lͼ0#tk|ֱC4|9=iΒǘ d dјsjV M|Ej!MܢUJ}?@:w||ё#獎r.ZV8]ӌo/ܜJjenoK$Ǔؖh] )]w-Wc&}|WTcףI'|::oY] =N-鈲/Zm[ǒ/]Ktw[DM.ӑ: -UiN[{V\{&31!10EɁ'Q} =< ҧB8XҝSB&ڌz4H$ޛN,}τ݃œ.G>XጬAwzaXX-lP8PZF`>BraGR|ؿff=b"j oHC ֛y$>K&nq7;iC}9p@ܿf ȏUtQ~á1uwe=΋^;s9#`phuE~8r1A0 ) "ӊ7ɸ/Uo6=·V<1@x7/+ t/#Nϥ ux>\c}aIa9왃o#NCOD( RV-RńXK))C{akRD۔4A鲁f12V-6?\mF1[O؞6Xm CeC&t;&Jnv{wԣrR>e,K"Bo5FHS> |S4@F:Kg4P$ȲynF]%I4O'&j膔BuSaۅ9f޾b7+.)G~ַv;o,azDySKuaR;w2$q]32c C8y񫘟Щ0I#Lsӟ]X/<#dKR0$QǠC[l4㬼N N!BfHse3epu Im[vk!Ѽy~yQt$賿AI[hЮ8`ϝ ?ټjrwL"[oĔeqvQ=lo0{[ltHS/6YPҒI6$e~q{c  qxP01NvM"D3 2=&W HOyvLkC5Miǒ}~t4r}ڕQP)͕*2DS}oy5ybFz6\F&_fs<7 C)jM94!]Ad<;6F||,~jgW)MtևFft7v=S.⇕C3C"}_~ʧ1bC@˂kT-5dP?2GuP.㨈=,eӿ+ov*8hfԦ8YnEuSGG_Mb\$ utb]3v숇5M<٦gtL:y!vFRC:Ӧt/h >5Yxq_wg6_vx)sTK'}_ҰD-{O}<2iΞ߷HSR*HS!;lamq:6ߖo= /Cwf"Qd{-󝾊O_ 0[7]?1.2fO5f5C,j`BݙbʥCj3^Ls=VBG\<6/9T/q(^3KUs6zhaUZ օ*.|&sa2 ʚq=.]/ɫϫsY>zpnn[:n[( * @ *'J@5B4"PR2)KB,ʔ$DU0TBBH$  (DT"|*cDR4Ј d" " " D"LE L M,B((X8XHBb (c @1TDLT% PMRDDQLDL1 UTHDIHҠ)@D()2 $*%THL!T,@3($ EJUAL(21M2HRĉTR2T2$DE1A%0!0RR0EDRP IR@"!AI@$KCP22 *HDR ()D!RJ A(ġJJB$ԒDHPR #%RU A!D,BCL+%B"L(ā@DC"#@!0 DJBʈLBH(JJP  5"4 0 P KL#HABCDA D*%3*TBJMLPCD!, $,Ԓ  U*Q, P4$$ʴCD" ( B"C%0 H$ȃDT*% !QD$*2T@H R2P I"1 LȔ(L) %"b*@P"P8f* )`ad LC JH"" D((D. `9+H H*KLS1@" 0DAUA1+AI3LE S0"%RSS"L AH"B DHѐ־ɘ_Wi30++aF#LXgbT࠸z`>ȫc3> r|i)ȣ0PUd!AہU5%Ld+굄 FTJDAZ>X0.avyJ]gX(`h D3J2nXs{MoE4mAߞHnC3nOY!"x(T 3 Ƃ;op>b]!%w/8k6 F)1ب>,6 سYK M5cNkm/k3VO(AcٮP-˾hjʿ}2z#[^})7@Lmӌ|Of>6 8u=]_jhC㵺FS|6[-xn1~gܗq(gNV\HqZ^d;wh$lsAXV'f.sNJ4oۿG}}˕>qK}<^u2km~S;:'tsm%lX-xӳ`PӋ$2GxݲZl/b6P 4R:: vݛPCYxڛ5?ԑnٺ>}+:'zxeὠUK~XSEuEs}G*b͸.vd6$Vߋn˟_NRf}2!4v;8l~\]m+ 2èR~#oY{UHtxxbfO/>{:~ﯘ]_I}S::NljOٿ7aWh wo\L; ںƺT1 Zp>@@8JEdo?XT@LJjA[/Z+aNVQ~_.wovUm 1nI;v om`B6On2hɟPWN4[Rqi uw6ݗG}(ћ^1𧻓O]EB𷛝\L7XmQ;yw9"w^<5N[G`'\ǐ%pڳΦCvϲ⡟.&6p<ooþ.Τ[K=mncȽKC \u+|^ɯD:.x{bѬ{{ K'7F|^cJeuzUMc{=:eӚ~c}CG˞L/ԦlЀo!7>yDChXۨzv|2qCޛ /HD`0fBGu+}ˏ~ +lN_{i7ϛLwflzʾE`w7&g~o4gscXdx{Euwt ѳ.}7"eiX{tvtl^u{F3W\ifNˏpşkqfϛ_ ~_^OVw\V>WW)^G@~dY.e|ه¿c6G^eKF9w} OT\D+P֮žh[GWVIuoW@vd.jKU _^k}q櫱uwq?9/m=[ǟun4kO+Pc?T&"fAz(f=2F}2J]>|\e˧=XU.mz3bzrů]W5?%ϕvɳ(M:g:4QOL7'g=$~m.ف|9Փ VKf/Ъcѯ4X߆]s'ᯏ[}3~)`wK&^vRwn7Ͼnm6} cpߗf3aǫlk'{e=7}^͍ ox\<=x[f.>Gg6U_WŻUF`k>c9m뤫K+Jkmu러|36Lk%1cY(swL^ξ 7ó'GO~ޝKd%A{w ŗ߀LFOn%1#po+}<U>}u}ô 6yoAw( e.ih*D]۲+7u}[ƨoub^~x+ w>mۈйB$ocѶ{(l+s:2 جMMbȉ@]f@t΂3օj0jv(o2U[q?^|-\]d lώ.S#)Z:2I<0*x rFIp"٪C'3vH _~^:N~>0].6|Ϯν8aRϗG^<|57nnz`C wݷGNկg|a^$ݗ Ѣ~tw?={2Uz/vN[廷WPؼzeݵ}1٘tzo-_/wwcg@N^7Lh_kF)ÉŁ2$»ri˽}2?`ϿCI5l^2?M}Mc%IdQod|fT_MV8EYPnAb矎__si󺀩s<:(J.p,5Qj>?==_ݻ`W[uS?l:˟wg~^dͺXdjY]y<7v:erX6$s`7b^qy]5㳵"ބ@_cuJPU{'>A,I}{`[k0{_rQϗ Hr, Hƌ|xqKIVbpџLm~}#GNǩ 8NL v{suoD{NlVf@c|*/nn8ߖQ~lGH.}r[cD];6[6l v++91Xco՟OYcͻBG rPgXՉ盠QT1=m|<ϯNJpM(qJʴH#I"!C&!)"Df̌Z@Ȥ T!b!(h ,(iiJV#w8g7?T`M%1ad sLVwb)77OИaX1 BY T Clj?6XV jw_)$hk3,RY|7j{v^wU,ױر+琈,z;,won}tӿW_BEBӞ=Z^zC=s};<=T`4Į+m:U~^D!<;9ϊb-,:wYvLmxߞ<͗w61] %u:'yhca >=`Ё5EL2 067mٍT~ W#䏝+[f~8Aq_<^͹00D AQ 0%~]2__~܎^iۇ~0ԊI4z5ЖI+ A6: {7Ѐo$lBhAhcnZ߳V10VC%o<({};7BcnShdN~hv_W\~AnpcԾ~"U217h^yٗ_ 4kh& y h=Yyuvz)dZ6vi'(,[:;Jge۾;>)7hN}8hY~_׹.} *ùCdŗ>RXACEa+Tmw:ή",ݞ'{6DyxyIrcfh}׫o 2h#Ta+! Š|ݵW3Vҭ|zBG=éwǜz|Dܔ+I=v@דu7 !UV襏 e Ozݿ㤉¸=]!+rV]oBa1*3S a4Dk bBG(͑)DNk:fM4P:w3A{'Ayuȇ#e(ʮ|a򙟟RܛM(.hnf|ՋףF<Һ FqXQ~|&?%@vuAfj-HȰ]@wlŠ[ U@U[%#9 :L*tǺ0n[qS{8+¦Y_uPƄ=`+cLŔH_ c$&V_tHxl%}c]>/W7`f1|yfޓi'tkkp?|~+ϯv7`V&qE hqדJhVdb<(ؼ ;COqc(ger+`MpmdogeLb"l@`,{eA:Q*@oRS X=o@n qHՒ 'L9@jr-Fs+u01uEU98&yeƭmiyMJH*[#d Fr\م%O^o6uv4cuu.}0.B!xOlF?mûߤ='"%;9Avvm~ ZJw5CMr( 9Sr;>EX" nMo=z>Wu'OŬ\D;-ŀ% Ԉp0:\^@Clϩymf{ۣ?)33)Yevttp=Ǐ^#ݚnuͿЙ =18@mcGiF݂RHx4|~ EꨪTHdfXřtij^>D@߮aX[/~jYˇ]7Ov.]!ҬueM"AfuL}^Oߏ&\X{ \eGsz+*imyz~=-GrgZaۊz|O>yG&Þo+.6LǏv- wf!zj8mv<0m6n}]/M9[;)O G븊>0ݾ=-xl_ pH=Z"nEu\<$b(Ϡ"H,|#F=}Sl`3Ly/_f9֯lu* v1"wRGg5 j2'r?; (d 21"ߎp J"(էHM8 {}>"/*Qw U[CF@΁}{rYiN W۳p!]TR %Am^, }9i jPΦ,k[&{y`KZj3;=r0nM,z`0h?`2(ϲPI!p`Lo ;D(`)ىk}@4<2L߄uvZe4u$WEi޲N??7d+}/cf"fM"[]HcoQ1XYau34~ܕ{,ĚSIlҗwks$jwat }}<g3)l8"AC%7 ho֛3yˠzy㗠OA}^HX`B/0b֙33"r ŝypO;Xc/^W{jG1٨oOjv̸>M2VTP1aEߛ1q&.wa^Ztti~rjcM#.|YtE-K]㔋˅?T)r ^̈Kng72hnA[?݆^MNSS<ؾ wcώJfӽUo3{ *;%h"*) Ȁ~*}vfFw՚7,0k/Q,U !IcOR|))^VS -J%Ei9Z IzzD=Kk `&*`!DM.rbvgŜ?_|??xcbRE~;lV_BojG~ݎ]ś;}ћjwJVQ%41↩*R",TNh怨}m][/=Mw~eq#) i_M7ZqhJw,ƈ$NISABQA(A|$A%LJa!TTӴߟ GL&Cf(zLV)h(9)y J,ff݋0+.ٌA#+Ѵs0X16̂GT%Z" R=X"! $C R `$ "dmPāNni JR0Pfd0MYDCSUFܤ,$>=k61IN< ,$7r&h`4c Lb`)sREi*B cddFcUQ1!\$+ 3.&AC "A[PJ(me44:(9FT9*0 e ٪X","^튫 PaJ7 TUfM3B29%%d0%s$RHP50i-RԔ / Ƣ@uT`zФ:1Ȧq4&^`'OgiL\w2WQf*q$׹}m2?߶ٜACwD'8"ŽReQ XZ]ٿ,}ţ?lWyVycN~iٝ (>p#8:nNg}}O"=H6.ͱE;-mowo.q]%,^2&bt{Ӗ(] V!mtݥKMR3%wvԑr-XӜ37K{s7lp~!UOj߳Ӱ]z7 ^ + Ct#$# P}  Y=n$ԇzI=\znE1- lzzUќtv,{c@E7^ =Wݝz#OSc;^hxhH`0fgCƎNkpaiF@\i@+">^CJ/ashą!%qA , $]ł$u]ᦟ!rnKv!݊uSXJyG宙@]Ԍ [[z` vok&9zfiAhblȿ}Ϣ,kt?fjFG,f$ ?Y]% ᣯӟ_L;yzG? E~adXVg]*; A Pc[mѠ?&U1E8fv9BEVB2c)o*e' @gmQRsdKkN fQa R"(U<Hƃg"_!î/>_C\aLȓ7sxCjXS @6v?E:/YS " HLJC6,du2`<:E9dN@q=qAKXMռ2%s}$ޜ$bVeEPo:s{~ν]VTP/{?\,?o'1U J4"sXsl4Ձ@rgXa"OY:M&C-NTHKXaՌŷYoZ9(fdAP~8t.l M LێͯvSM[>؜ɤ+c*j4"t3ېhA )`ROukיZO)p`j IZ @u`bf޺> `CiHIKL!4'qmN[ ,#OpmV`&=R" 6yFLeNAjx;j (&{;4|n%6kšN-=kZIP`ȠDLQ*J)<m~쿾:n8GG/o6VVef 6N6˚L#1j hިHw~sYq27Ė|qiLbZp*,#Mٷ{`Mu5@;l{X{gymP'9{J֕fՖ55"%iW#m`L#hR$|}l ǽoa v`6n7\H_n?Q8)_pq &!ʕ| ňttE/eغ񭷿敻dA8r@mKSHt7ݶa}9n"4w$caeV&Zs{!L$n[fC^Ibpuv Am fcSi`}l6[^c{Il܉矠tQD㥶`!}ʓl&1㙵ݹe귁dK $h,x MuD% 2X &8?C]MvV 4XJ.`)LJG_P~S(Uu*zb]{CrLȰ($s|n qӏ9uigskϜh{eÝ)hvg+Hk&&ݎiĤz_4&=הB}N{LO[_QBzUEtt8y |J_~uPoLIVֱWN%۝@bDlZ³,;S˶]EV/ܛaا01+,ËkgڧB7q o@a]WH>aIK*D7,K :dטd;gy.^V Y/"!۬a}T->lpp M w=}/xyJ8n[c||8&_Ge.Ds@FH(1ut)@jrg#9^}M '& %rf-hq×qNISywF8m+,ɛ﷧/]=8e;)4c6tF(Mh1+lȔ69,\Ec!&7?=g> YzB "TFBd*%cdczu-,jPEF K =o\`!&d, ";a2΃J*~+ A,]e ojk2C-)yG)c^M9h2]aУ+z_uCouSCp28Srﳿ_}TN]}U/N ;K1g3jӣ00U{z[ylb=;i_vn] _9$38\_#Yw;7o;mއ6}K LL/hiцn2z۞/}M(3-꼩Xw?N]m:;}:=Z0ct,YGɻv[Y⵿_My(P܎윀J\Vq2e$L E%PP5dekSrۊaH) Jjv0hiݙaKc݊'tQE "|nP:=O_G~Q!A\b 6_cb (H#TO`fGf&&_Z'=J>DFPGO- 9m.t%FҧA^3y:}nlL<:~?_N$vVp]\ݹ'aS+C ^E;Y:#swq9fND)bG;b"' 2c,g`EHZ)%a(X51N:wq.DGYBVÔ=콼S8^އ|a GmJfO\<:A@Qu8@%DIHf#$ux%׬k"NŲ@q ̏RI!$h'rޏb^݋9LC .eo-p(ꤜY}w0ǔy73vs״;w)NW&Ǖr aag;E%vDu;a]4'd 袼wlp®/ $iTbF>$'5> FjLn?.M=Tѳ1P1SR8("*8$W"z}ɍ{p"{y)f6/r+e5̏k}o}d7'oU#/]$wQ{|IB#mD,_&e!:] .D8>K =-4粎m 8yқzCS~QvG~>7[@^>f㞉[>1֮7QsiWnr .iW\o2%wǎt9Q)?FAlKƠ⯔m{J/,ffP]ZcgSn#~ҳLJ{m{GHY/$b2j͇:EOI Ȅ CY*#Hkm_J\'v3_+TPD͠G:E'\5Lwhn$ǖ: `M{Nk(Ȝ%xHwvƝ7N>{WLf`Kv7 oL [׽a! :zwcI1xv?Mؘ==E{hs RץufɃw֏O bpɶF 4{qbИ1$?l9gMiffK\ܳ3gGlǦ 1s]? [kacя@,wf%qg>|K;US˥WD|,gv0f{EC7Dم9 &rdID tR-m}tIp׿km5,険 ͻ-J6M((c,lf,Mk5x2Pf $0kBIM i&*͆|GGq~?RoH\\Gwtva& _{s^Ƹ2`u=ξ:л 6\ 7·AwȒ>hjF]+.9RE}[ؘQanȚNL ٮ; GI`a;"< ܆AO.k)Ԙ_U'jeId:Sd}>a I*shG9,l9?j B>b^D,޽oX}ҸW_5sFM:,N; `\%9Zhnd5lVX4UTtVD2qECff^G? L:Yrڬ*}vIM"n4X <~R֦!f CaՎhs $%3P0&iq<* rWZ2#*J:F@;[bs9:@ƃ*fmCS7ǤBs' L~8EI-`.81!>v V^q=;@&?˫6_dB;y~{yv};߄:{%: C\nBfdmQſ84xxR~? eJч"  ʕ  Nh hXMf09RFfj"M"*h] VM f#횞LƋ1ĠLjMw-ŝo#,&Z0@( FT! 3,MY8ܼBlژ=:y?XmM`yvѷM1,B=BHߡn7:z{?=M~1?,}c@+SO zTP3r5UoWvs^#Yuԯ4;Ȑ3HvϐrCM*H$CwOO?>k:Ov\E "3bDu|uoSV8O~_˳\|a,mQ`C,q#)-O&Cqoէk k:춣Ǽ۫QNT4E3i4)Q19 H\Sdg֮oŜP\g/0gl݋%m۶n6|ϻ+ŏXլ+2)1ÕP[U.U|c)(xoT,kMh ]ď*|LY>Ee@a~{Ž6|BgL:zVU0m> fʲ# ^xvA6m%LC{ueXQ Iv,ZrZ7봴$\[^60홞*Yr S쳴EDET⡶V3&D,kqunO/xZISVGW<{HwY[ڛu4ŋlݝ"‡j[/67ݫdm .l*)Uh1FVQ*l`&!I|za,"'4=ܫ٭6$lFH|E׳KlNDa˶xvsm*}jF=b]]Ww0_:5,,l*2BOoFSGuxuѫqxˑnsS+l`ݦtI ׹Rf`;7f[帮6~64]4 |;+f`&c&Ef1տ`8f=Un >ckRcO8Þ-ʓ%r>B6Vtvڈrh*{oؽ'cv xtmtX mZUDl3g>71lQKkE\2A?_uoNo':\p&2wcl؜/-+/6R|'$—%~>u-ilF-g.aFy٤T*RWErme$Vs)yh\E\dRJ͏~ 4(bfho= zrrcjElmc~>߽YrD)Q1sb&;mն xTZmAY 0'$@p_]Z 6nٓ&Kie.ôpk-H5H]ibڛfsklީ&R9p8b&u|~ dz0y:ll#@Ҳw`e߇KiUT&6wKXi@.06J&>XK4{Ϧuq2+{_s;>{%<㽼Q\V'g^0@/1PēٺOgk5Xgqo'KCrݓcsPxQ/4 4QrJpVkzx3*zU cѨsJ+ZpPV8BcI1dƠPD`ᩘ|S; gYkZpPp(!Ifd龮,!5@ a 3j Q˨55LMc2m >W(Kq=to٩q*lpxeۨkdRUg==ίQz{Gm=%—":F6v6Ib1dF:RTc)8(cI~މǏi^{?MĆi=s ?|^Hs kZt1cεp`4>>gѯ FXnmv>aΌclA>>}1 0Cw?~oZq7WLuZ8[g#Lԇc#}5vFZ ep:ޛm~޺'ud:!2ϛ~-;v _C.+\f m+nAD/%NJ2FWXkDBfÃ6\ut*4҅$$k%*G(@|KJܲJE-)m̰XfT425km.zZIS3x !$.:i=tcf$u,(@i&҃rCQ/9[sѸYs192b-a[|!5_cXK7kAktQ+bp)d )4kP(<3ٖ6)> gL5o<",.Jc>V4'lc!Ɇ 8:nauNIhB'E۫*[?E9w# ybkxD9h}IA@'DP&81l= חc=6k7-^sBe3 '=>q.D~Jl+ܑ wZpd`$&/43@RcXqe# j>31nunG2I|9sbq9nQ(u)z~?/MIn}S,Q&sMV3/2vqo{xPAx4?NmΑn.ع̞ *}t5Nқ^`)W\n *m_f;ɍ{7]^I&6aa&-Dx^0[(k/sbkj5kt.͒4J@,G 6オ8sv=u>z\u%}u!o媸n"6uGiIuY7XۜB: R)kL"~ndz?Lri*P鯛=Y(2CU~4e|TPN'g.ۂѽ}*>_;LzAX* PT@x `B@,DOE]1߯waG E#Y譿Z&̚\|WLP 냌T{¡JXuq׺4Cx%2Pάw|[⤋kuo02e8 QVIę`@{(vg Cļp `~J{[Nq֌ot8"VIB1d4ZSf~~ FE OӠݕP&IƖ5:J h㻗̫<]摧üJ-XkC["n cWٵ$+0!%X{9DIiK8KԠ{]cH")L0ϓ_|xQoT<O1D-n~ۿ3HfX_wLu/^Aؾֈ;9U]>v)1w]n!l׬ZYǺõЭPc18fr S$ :}i(`nz"!⃚_c_3* 4_o'Ƽ-Bv;sԃ]ڻ;8#F+;{Y2:pǿ..$pK?N(7vz Cz |h=[vs`?tݧ_pmLFa4>~vO?s6AH}GڪG8MY:p ]mIvw ht*"&d2V2V=#ωn糄8Hv_)ifXØ٬Wix{zO U~>{8,·A} !aS! L[t;㮷n"]#ke0cK lg51 tpr50~z30\aBA 1@ 2,cK~Q?UcRlv6Nݩ8e]`M\BQB#߈n?ﯦ&YwYY쵫 Ũ /./x9|m巛o.8$.C"q[jWV:C I#p wFwD%8*~zZ9ڣ罡x{%IO3}JIk^.=k SeY}eXբ{z2F`ζxJW5&( 76XO콅J}A7=6ǡ8=_fY$w^Nُ|t]LDBm:9zôA^L֌=63Qe6%*y"a֊5jd WxJNtۮꭽ},9fZeœ*wn#{dCZc ͸:mUIխب@c",4H dU".J⒂AA\,|͢{s]klY[Q[g ߘu9@_?zM |z~$u6*DhݙPI'%wR`=\rNR0(Dɔ!D"8]Vfe0.,C/Z1t3}J a) 9Is!`z&&Ffڀ yđÌ% ~~9M$eXHZfYyfϭ,;Z$btMG}m{5P ڄ21ob+!ϸrڗt}nA}'~QO3|))PD|[9ȁ#]׾l~yB=!H\w#$ܠI.sT>;u|߅6?wݞ32C}#T5[Q"RtZʷ`ն[$!ϸBŚzv'jeU=mޏ{;Awzu]dN1[ AՁtȸm^tϳ51!sݿOM7Ai-0nYvv)FQM|yv}SۂzHoa(f $&() h`)(& R%RT"Wnjy˜9@,`mF KCJTT L 7+fuI?=ȝ2R#JP 4(xrB&Be 1, q" 1*jJS%@ V!H6ɡ(4MӬS!cpV*jHc8kqv:>]\;YM2&FoYNkQHW S}c(X|=eO*$U ̆GWv& M0)QyVlfO׬FA|ns+U>t`B3S"jKZU>xX-/a!(1pWjjAt(t@u u3ڜ\`v{C;QMl[VY_tD g] #?K5gGN?O?!֯o&fX\!Ν]Vd}S~' Owa^Df4f#ޜ6 6w$'6^ozZi'YNZ#lj q)[TxT MNQ0!|#k> 4#} ȡ$(ױk XDERinha) =NktfKfAkfp>ye׍XCU)we-&9Z\E)M|zD鷂J.tju-n,mQG˫PDA P&Y0Rs%M d08w %ʽ,1) HHL`2 tsyڜ7N9>HVcx{"u^;lkF0G]n>7HgCӊ1ўd~s,\oTa.KxU7g3umaz|*~XM.!H1Tu7}5 8]oOQ35xg9?VXz"/+7NI\6`RϽ:@ހ&*Πcye)q-!>,. DBB?$!:Rj.d4ɒ"hKgӠP~O yv#6ǥf2&Vfx2fsv8w*5lHJU`0̼Lb z'rЙ&SOF3ʄhL_ӓRnR j>6/|ignm<ٱ 闡 q0D郝"HG 3KKL @0z6)Lπ `&Գ_@"w?~\NCyib"lvi3ӼA~m2[8Do)#"03kP-hA󆸁窻ԗj <2pPp T.(Vv-31oB;]s&nMwdDt}:rzm5!u;?dD&5nq5zQfEH|oX0d-,DEIrb2>yP~ۤDDDDAD$0C~C,E5~g5K*{T3XrG;bj=1B00CD8EJPNJ d)p*qu66 D/XɌ^wC$hHIDi-  csh&C`|y_aI($exˮ;K2&qNŌ`]F`k966(ssW/tkv=gkl*ɪy1ŮGb  ǣ"vqexUg `q5*?5gS3#Tkgm0Ӧ_}4&̚/]wm)VI7/ ܎N{rs`v@3q=7Ts 8hc?h)?')!Frٔ8Xc6)Z*cG_w*k3]$[%6ffݷݱ c?_٘/BЃթ>08Z /f#|P5 $Z Q^Ɋ I*2p#CXhQ:ǒZO:0) eOVP?!it)V𨌃@$*A&wM)J679#s*ANT k Ap;a%byR ͞5>!8I??VGb>{KXmOL |6-u )7C!N|:!y~AY}RzT<+wާsl%Q&45&C˴AP[.A+H,d|`,C]ؤ0^"=塥#9h%| pOyM.7kEk˕8o퓇 ߘj#҅GGtSx̭// ohZtW.DMdz"6w=deO,vYvU}9d&HxMݳ;t33_-Iz.10ޤ%#{MWU>=sǛ~e>tovaKNXD9AOߺ }}^ Be|@&,vyO_9>R3.TzH4;M+"C2TY&tC=tb{z;rǸzTч'1*ʔ't~ϨD⒌K ;yctlQ΅%=Yi7ZŻ/͓^?d!RzYt-2 `R)H:e4kZuN;J fKY*I[`bFc_>KBq1E~qZy0ߤi_xta}듾v<.o(:Onۅ88[,~v=W{YN)f6&7^fED H16@{Uo =LNY31EffP lqa +:Gx Zҕ3$C]%+~ 섍Srٞ(';k_!~&RBI"6ͷL!޽Jm^8S.WBdyԃSJcnZ[U$>Ty0 ccD6dupQpKͅ6;ɽޛSm$D; o4O0.[vOӢu\ W[SNx&q3 '.e~́2jN"0s :q:= Th}KO@}zfe QOk2k'l-'͢T_~=ś:y ߒs^ tMĸw8GL~l+8z/{ qY ɻ>}w"qwkiGã^crk}#.2sn纯NO盒B{'M{Wtqw+gE _N tD z;;zcRdDJĹX#ЎD4}$eE97jzKkkA{' P$,$eURZ!"n;Ả䱝{N҃KbSd!#-Hꍬ[\!)f1m !3 3F>_;g(蒲FA[k}Q~V5)T1z}I3(V] "҈dXE a48Bww0Eʄ|^K1MmG lxk0BmgwT]fAe^&#>՘<2)!V֚gkͿIF{n;l ='x2I[HJV4bn|Mv]"Gr%9;VP@!Zf/mz9GMJ0 M k,(fZNoϽysJES CDRbI(v׼뗧|>1OѶ9z"&xxX}g? l6rGިSA `RJ|y1[rvJ?`@9 ,8Bs!Y!HdP5AG'6_g@?`oi aǿ_#~"8,Y5ZT2U܀>L4z !'$|Ti@A>H\ҭ~L" ξr|SG<)iě*ׇ͠SM+ۘrcnsƒheO q8&S'@>N V;_apX+;_+zSyx=;>qFXGgnۡϲ}yws5}W7^A5c"Wܬlwf&<{_?ɓfWm"5[4$ f1$<{xބjWw'3|7&W]u(: ˈ>1>%i:1o'pt#=*,![X/NȱAg^+Cɪ4w̐ږzg|u|s~[~OCϺ|^!N@Ngz(AfI,NW}-k$[7|764 _=xܚuE,\ moAz7gc 0n$h`CJ1sI7-a Jz"(!<5[ \;ok ӭZs,骂Dҡ$5䞔T H@!wj)GZg@w~k6Lc.{o;O.`N9=33Ӓ^l9^7WȽU(T^//sGͲI6&,n#h0;NOviF9y ݯ5^hC\8v|D 0aEfdߴHsW_ El{f4LGflݏ/6? (=?Lj&5r'͈ì}B+Y1P@6ٮ=3s&<m@ieUU 4L !IIu h7ym#>}߾Lқ˾$򙽓eMcբT:e|?(>̞ؖY:35$g2z9޼:bPk^X$,DHޗҹۯE͆4yv~OJQ5AtoӹG^5lks=`ᄄN4xzU^me$Dbtf c,Ӂ6<{vAfgūnL;ʼn)}^{sW}6u0]1"c.9CXwk8ޛ5tsy2Ͻ8NƬý0©鞑_ [3hc9`=]$EM.n:>_ӾC,3P1_0]ga:M6XN<˝']Z@$L[6\vr(0zR:G`nb<og{=lGAaѰU0 $]$rno'obxw-{TٚxBAl\u6%7ݼsނnovy~|:V B Kl)) aR8 \ #jEFiF ]3S6>yؼL&] ̷c!ᩤ(.nH\ϧrߥ2c(u 6[EQErd86ۧв! 9  #'8Q6nYM1G`ytC@wa;m~PAvœG>goI f fM4my+N~=;KPvjMzj]s{0S?ͪl%=zg 9D?7qX;ˏ{;RwG[{x.w<$}U)3*OMnoCuZ3QQL|яuBTUg1N&V/5&x]qsi4%Q8v{PobQmܑ $!&L5ibn1]L$9]T3S|Kag={q %1`(tàzf\hw]`o;ʖSym)!nXG)s i0fe AV5&(Um_ׂJj0ɳ {ʗWRJE|JOf@lU> y$Eu*fLJ_Si84k6Nw5w6T]S7d?ܗtoz=E3f{Gx瘡Z]6Lh%8DKn±ֈ?7[v)2Lʜz`:swtWOc޹Z`;gh|izzA ϪiـJX$p ebݯEmzyuU_F;wX[%AA\*gQrtDbb.y~P^iV.?O:9rvQSf?1)Mw9Z[C\z=:3}wvL Mm1l;Nܣֳe Yx͌}pT( +lq~y|m=ȵljt`m7v"aF}9im%4`x;xq^v|m HKc~]G82J<\ﶊKkakJ\kDo$Ӥe2鵞tg7 Vvq_Kr.5oi;en~|ʇB{y9A2Kscm>l.mD6ľ奃ц|"qǥ1usf;/GM`smof8x%gA$~Wl,tKc᮸PKGg$bB ?gpGu|3=,r t6(a,5n0|캥[{٠5\b=Fm߇*GÞQpNN=>Np:*E/.?ǪsO9BK |ȇ;]jRM)&&PNPFb KRJ?BLEvB鱤qhgmgg|naK;K$.܈'ނnB %=0 &ón.n; $B@mw>{s.=KO{9s磬GMUMz'Խ-!BwݨL9m-GHv|g&<$S*绍mI%-FK#64jkV[xCt$x32nfn7FZpW}9&=tT,mtEMjt"PBI^\Cr0 p'V8% [LȑH _IPYh,MZWoXrsga] q0$sASX=tAgƧXO7k4s8jt^EvwmcFLT%;J#QK0Dsl>]~,*>}g k*{/4"Fj2ΏB)ܳ2пSn`XT1f>* g)mpѶt-mdK ZN2P6E&&: m86v ȍ8j"HH%SJ!$;u%EYRIptLv)ﭞoFnC1]%KHmκuJ~ȹ`sI2?gرv+C-21'&f>Ox]rN=nh py#巟>*cb0 =I`^]ZO-Drƍv 4~ٜF<_!=]u DcX`qX98udJBICw0@,&ךj eAK, D)_R_=ǽk8̨ Kz9"Oz:0pB}D1&elGVY PxD&((f" q)vP[A23% 6?J LVcN|$# 0F0ET+A\5JźqEHBBr-D $d4f\磵E8lD XF[X@?ؔm聺9p)anء?ZU4ӤAdNJ,Ee gP#ujÐc*:iAp1l> X+눮 Naf-ŶM__ghDW= (^7 xgȘ7!2DMV׍˾U @*3:QN+z%X A#VZZ$`vX<$o8/JiyRJɾ=SF)xA~_jPPD.Xa>X0Z, !Y?u2^. &G2VEF7U$_,Ca& `9Yz$G-LeK pg'"!uf։SfE,Dm!8igȲxL*kk RbPUI љ J3*Tbcn3whl|z~cajTC MGİRbsXL~A>̧EΝ "T[*)jb *) "l* @"b)h(">zcRO3uk747Č:P1WX`**9-@$QE43 tbl!e0DldDMtÄ70ȀI.bV)GM4[e9Ģ Ó &GEST2$0A [5I7)DlM}nTv؊ ٭``a`f&y 31$5GY` J̒ R&i8g 4$7E$`)* "F0hLӇ$ !bbeJTe(%I{SrqXI4,pǻ$%DEU 6 Z$b"R |y>Ѱ$ ʡ߃M!"ZHSI2E$wdsI$fo'r =u ֙1"oruQa8d',ISZV$ .\8ˎACޠ<=%$KbQ=A2݁P  +aۗ]pDU9s@1'iz* 9HZFTЎlB|ҍ-幈MBRɶQHG?oOxEg|\Idq-/_⪟(.<ѝ_͟-|@_j?Cwܻ_o!FM'e;~ZaՌjI"9?No_Uidsd_H}quwbZܩ,/ "S6F8bˇ+mq?Wͳ'5'd4 XKf8 Y8u{"õ {bwʼzrV]2f\ʈQp[qbuxۄ벸P6˲&꿄)onjӯ6ZL{n{/(Kd:VyNY2Q%s|+NZ(:߳2[ rWtpɋd]7NLWwpVO_V׍::zaΜ 0?CևӒѧ tWpٷp%"^6Vx`/TЦN%w+Yc..#v(hDb-Y̞_#:aG,e-4wsr{e `Y;pYy=%됢=8IUʿJ1G,w7]ճ<:wC* dH|ɊVƆ1 3惠ѓW}9YϧZ{aOG^iW%hNnthskޜSݷ{9@06%5[\X7%(d`j$[ 4'٪jߟ&|*r@.N42mw&tqG1@TςhYDB]1S |~p)܎HʙcfgP'7)`T <?S~w]/}l>gήfT_[{|sew,3@v4#D@}mnlύMރ ȡֈO3T,stdZIЛpR<X\R] 6Do<|˟$].p`Dz!6o5kd8gD| F }+ҿCɅO 1/Q1=An/&;n>Ux1%:أ0~t@`^ޚ5Oc:pb"m} `R17Mu .1}.n}i }qiJb$i*/W!TZ:._^g67M˷iz8>[OHpX\rhgu{g??ϮOWWIQ:Y8<vx:cJ#oh_&}~reO)0)>:u{sG>(ȲťۣR3o64>tCnE97 ,p8ȼQtl̶b}K* k:$DAW b};͝)+/rxzWOIyQ*+>N8J^c['~[=qIҿ~?]w,/N[ vtx;k_g}{~ơړUevI\::R3Θx Wk2v Sg?Gw/lx̙kϾ h1%h=8]k\8{ZpV:fj۳',>9o%/v;uGFe: ɾ:mU/Fq~IꚶzY8oϋ/4>foFAʹX2+r9*W{kC3>XzV=$QΣmnc?wMuH=^Ou' u|oiu|;=9&g*b%*gTUvG-<%vezW4>O 8gg>ufvcnM0e}k?koNc3:~?xS'_wY> m˭z|u{+Wmr),nv75omQN+>$7}G/n1#H+\O`qa5͏O &fC3o,V"&7}nkJ?;cqTdv-B]ѾX)]HZ*{93ϦONFO^ fn~&%e1I~z/iǀ ajxay}? ]<Ɍu3(zlI>'wGN()KlN/n*c/g_oʱ߳>5{h;lwݟ~seuD=dg|0Y֖mEٿeHo>>P#jg^6z_=>^ow|s._)_[=ojo>%MGPl:֬Cfc.zTZG\eq{=oQ/MnLUzu " _a1}>]l: 9<{xlCXk.*_]_ ?ݫ۱Beu|A$W'}?ALJoeë"t\Xw.o{~. V?}}K5qwVNؾ#}OzFߞݗǩ>!-E|^}yW_nGO7v)E +~#_#/lAW>HI_ W];E  ܯꇎ&x~ogfj_%9CD޾xWHggiKC &Һ ѥ<6 a?az/Z7j'<( W<96:qB}3 v?$rͰQ0-7S<{a[`DAq0(dAH)\E$\CÈ)ABQ/COmMsݓhvtKP1EÃ1  mr]0啬BLLHsgKtmM7:h5 1L5]s6ݔ"QL 0ʣ)3YUS vb ؗӅU[jL2i΃ b#_$PBI<,ͷ(zf-\cAD4Qd͒3alʘjÉ D!,FS6vc;V qb]3XBu'C0"9+Drmj&{@x90Bd =1=儁 eFҟ3xsh8d&:*QE@؁@5 x ACD넸)¦m2hQTם#gĈd9#I[dX;OXE ʠvp΍q`)i u!ҒɀʣZF">O/j${!"ABFq'^Q?[D(Ta*V`F(I@G@hPfgW8;cJWǸ "c3zbXE #J=&IFHͮ _c΂H62zNHft}lzIa ўHQ43iMKӜV\#)` NsI@ jp.dv HV$';Wzoi|s ZȓhHmP^ڜfyqݒundm]%?ؽ`-bRagGsWOQՠp:^ ##㈼m9mF);wM38])_MA1_Rbs:UxgTweq? '_.cV>bU Y Լ*b4csH(w  @!BRxɖ>xxFPTW:ؑێ<5'WI:*"3݆a?3֩B}x8:4gm}5^ BB02yk ʜ0۞ ;`*3Ht%LJX&|{̊UCHӍZƷ$ofl_2II!J2-$H@BҥU \Dۇo]yғ 6HlkF]e>Yژ·RI(tU(t[ttIc.&EDЇz\b]"S1%jU^(v՘:AB:!s!a"N9_&tK8̹/cF,i+k(mkQ o8`jHwx}>Z\|i|L< g ANT;&9YO:(g ?#n/ܢ AYn1ȎnW#5'[ͭ#|Yc^%Q#^]t 5cf쉌c5?'>ٞӧ֛0zm͊n:Lai2%v|7H 60\%bo vY.n%<"OX22|Cc(7JvT/KX"^)YWc-րpeKݜ2]`q \D:Ȓ F M%L^o]!~na_cz};(fca 쳽eMkq䵹U?M6o)vu PbJmmc=1ý3^a5kX5Mݢ]{d IMF63l&m^ B U?S:n#ߤWâw 3;S?\ꙓ$ۍl$MSq!22㳠Oâ}~v0[ Ndu Hbm244 جE/g/UmRno;6%0la]Ns\v7ST+˞~NnzjVY뢌eVuok^Gj,9Mrj$X|~a׌DUT$j+`9B&H {f3Z EmS}k5?^Fhc9b(^IJ`s-';f>qo'+,[;ou eiєS6]N;3}[v,t]5O嬆:$]4@חu5# ե-c+A n  Z3}.i})X6SwvnC(_k68N> @]65$:ty# ѡ`di% i Km] lG0ݐ~K;*C21ۦۜYY$R1pց`f\ 3K3gm>ٽpcfb@{p1CG7" >vI]”_Žݟo^%$Sxnڕ{fߋO v*mz)-@cCݷk`~|UoĮg2^W Pc?ikŭEΎ3^z_y̏љ^IIzvc:L`:SDXƃBHs#}6:\s0Ȧkލkvk`KmpK\zBV.BCY[*I2:_^Hq7bCTܲp bIsp:%沔"-pAt,ƕ3ഛ 0rtlA!mդOgc C0e1d][Rc̲&۠A_F5q/gec&9-ϣ CJ(o:pjkb~6{}ݸ.x{8ĩ&ްReqѲXCI4dpv^l+,? ?x7a.l2ۢsS6gbXo#Gjc~Mˉ;4ɉc]v~Y#?P]]wC3Um>uYufa6s>S3e.kڤb{fMtM>zĦf^y{s FD}g7ظh 7ow,933[W{Ң-]<,$S$0۷=+0=fcmC[PNAΘqş'XMy8} nTCf21Șn͵6vcf`=Ktw&♁RqSu\ne+1CtT D nJGІLçiVCtdLQCv^~,l:`9GoG[Z5z'ѸݶZ;I|ƷGf~vM (! =yLsmm6dZsp3/`ہc5Yu>Y/H^5g.HB! xt$}W|nn >AƳ:KtAmcёg mM{:m57yʝ عy;gYAgIZiv`m@^w8c=y]W1nz}ɷ=m}3ӴvfoU(Ѷ̫&$-fMx8/2}r۵c\ V;mK$t/4q&e x$@dbXz8-t:6#QtÙ.꛸h ^.ͰݓScg}j7a,,%ps;nn>sƬᠩb`n,rߣ369-7YHfMA"e DߢGlNvEPvnCqOWg#{C-غk\ώ=5ˏ<`bw,}Zʻrz0Yj$^;:@.?(;&Ȍ ;:N_VcU4cKt_ nn!#x5}(8\@̜q\Nt|vщyƻyC#t%=_vݭ4LL?ǫm+`v6zgm NtXF)\.agxWuGJmTy9*f71N|?0yfor[?GJB]8\#;ZGm6ռ7mpߜ߮o~3rQC%)z/.elǔaa3۷{W:C X~cuQܻ:& by!.Ƽ>޽"4ۤMܧH!8fׇ~CEm(굿w~=F%׀@е~jll6rRLwpdBE8vs=s?;n,:a-fgř $rn^qLaaY 2! gW@9ŀ,-IITwm}!%O$8f+' 3=N|LLm驏q5S=ѻԗR rf²fC/\!8wWbzۮzbvGwZa 1$ '"{zOS}}NwWҗ;Vrmz2SI)v|6p)vg74bOGBw\x>gV/|2}N~H-Y11_kZf?t~}E(N Qo6zR&DL^>£/(WAӠ܈,dgmVnq0[9Aߖ4jv~q*Xh0¦qJ7V_5ws]ZΔɻrhX^0WE-pM&]fnrŻ;߿M\5~5 '<}$cEe?Ml;ꈺ[{bmK*'~G{qo7eTk ,fIU^FCc *)v+_Xo-#N~Du"</J+? zi7E>Q.[?֋[+c<>$U"b}ۦVry8/_FQ).v/]]/&&M!S-HwQg>P_J@g>h"Sw}.]*b/IraːۈUmV*Z uMt&8J.9 G3c\1cJ^ĎEISp1&0#(ȧG6% k;#H9̤Cp+nհ/̞~Ŏ6n]4ĕmuFXmP!'ևQ:+|Vߕ2Dho~ sr/gjFnh2/Q&å+(ZQ$rmG,F?nXCrTOtt aquQ; ֦⑴LLfӚ9LOLr?LI 6j:+)JXvJ9H"LBr2,qf)B1էwF 2BhR^v,t!`G?K7Bb%a؂> KQ5Xwo.ʕn?g _b0`P9{Tye%qa-KɣK1{)!$='} θƾePވ9@gPJ0up XP*8U.>:/ZF>OXeQ[{!2EKΦ;Iaj:-29Ќ&U-JvwBd̯wv[طoמ)Yt#IFTf(ʳJm1jJB+duFs6Y$ُHG#P!!Un5"es *dC &`I@s\ M{w*I̍gLut9VWc@%rk,uBۛ˦S{ԥi }TA0TfM*20ů*2gq;"JgK1,H`oP쏦$˙sZ/U$`šLP1m(q ah5AK첪l׵15B\#`nѽ pAn%]23s_H9[H=9g]|umBĆNb#r1-_, *WxOqi>&i6ClchuwxDII1ag*.Dh)K0d.L뫣[y[\jChM*Zq`PKRKyp|1f'u[5Ο39r1E5GOD68v[b!CbͲwV˻^&-CS,#(AHLN=ݛ@wp3XY A;1L6?y%OvgZLBDKB vun$Y3].5՜2M\;aH9-{!$AJ>[D4F -Z47뻬͕a,˃s2]j8긂#}^+ioa<glΈv}ǚ:תwn67f'R$Og9yf|oAbX@wLM} 7ML>߷=/ΑKR`:nA#Ќ{|;_yIH? $:!/ɝ~JYׅzY+ƼgňBN: KanT[N $!޿wB7˺❼%#׏O>Ia^J4kʔ^Оl|vmZ M Mtpk}"Azpn6WTnv̗Ah>^Ӻm"ΟVL8~mxj ~mP*ѥ^w*=lo^`kٻ 1iDE#Z`2(qMד^ۧǚed%ĔPD77;Kܯß8{mt0eV_ߥyCR:Y?\(G(S>RO2LYfesRZX ׆>h0Ĥ2j "9ͶyR2HJ1`3?΀͊t mhH)|$:%Idh@ևPTa0\/[,Y^&jU/ڷɼxګ gEt6ވT6FR>,9:3dP H#G':8St]~+5"-(O䴊UK[s=4Gg<; -be`RZtj@-&RhbY-q|jV0m[X#v_P556 C9nbuۑX>JukΡ!s†&D _$+iZy㘕BG6'iSiXVkhVg_ZؙQ!=yМWc牭><7prv)ƔaB&0-Czt=*E62 9˗R0$u4ȒV1Fq`ʤ-f>rg\Սiv#1e<t#v.l~ )YӴ-XseӚP~՚xdsTdSg61L&Q I2FXQ3i[K̡”uiBL4вRX"|8Fצ'2Ԓbut/\2Y- L18{3α8no&gkk'N6ӋBn_|CMڭo䮯M9Ux]hݖ3ot@=+v K[xs5w>;Ci=MC-6~|#2XFYlUS!1s%;\nLX#8qnXAAu~f-v~j+)QE35o4TXO;B8;3@ ~_n\NR'd_.YE;*Gl*duUx7ڏXj!$UA䝵IxyfLk]7-ѡB)#LR@<ڨG,H{w‰`n7sJPǶ[Κ?72vϞ0י ԴWu`qffwHݚŎf* uj~W1gc5-Yi&T)WZtqqFE/߽ק}q IjPK" Kǣ2F}?Fco_8,vcLu\/bs ]:IK"Q(MDR\SersCfHWY8/d\'<#C_BbMjc.#Z;M`; <щ |@x,( ߜ$Puy)\Q^*/i! _DZ>=aY">fJNRŇg~\Jik^kݺF+|I3r;]S~z詽1RM(0Ì_}5b;:+4$KXw^S&m]N/j): 82tG#+.x]'N|K J hӇӤ̖['mM=FOa֤PQ9)7c PрC뭂s0 8Ćh t,"0*D-YlEī" n䰓-r(f F&*pzw|,1Qt&f)KUJhsAZd@h̏M1ݒ262 `Wey 'PTPhC%M|.'fTpnfe1cݸmAM3 {oA뗈G[~s7eSAy^{:h,zBۆW"|+Ljhxot}oū@3 `q#v_17 Ot9dqә?o0l<\b{f]dlk 9eij8Ag ){GWE^|Q9olLt}uJ~YOt ukdy92>_Gĝv)}tߕ!dOu -j_>٪C>XGC)4@:D^;ٝtkD朑 vzW$k:𦧥觚TH&2]#(VYۿ4LG:*Թ4m}:) nTЛin),Lj.ֱy[NMCϭz?W 4ۻ?qx⨤J~kD3θ04=wi@:W_zKOf} xuRGPu*۳ΚEצjW!xHQ-0G/XEHĹ]U-u-cL{‘GjlISE%&\퉖IW%ō 8%4Pm7-v6l„MEOk֓ePSfPI >*)¦gZV̞'Xsׅ_8(P )LN35  zH}-GׄU+q?y<8>Ȇ=Ħcڏ{|Kpi5r?^`pmaUN(޸# kXܸX:)jڦУ zJu0m;Tj4&nehCH4R,4nԻd56LRa!PSt܈Yv},_J_G8dxS#EM:QGV1d-;s-E¯=2[4ba7VmBBG HԎMoF5B$.T@DQ5uQhLB5'0MJ>c pᦽ" >/@ԙE;ut*n`RɶL!uld+?$6k],2x8bH/\ =`x'@ab|}+O)O׎)ٲ)ԧ pJcb H Ρ(t9%! g/fw'\Yb=}F ZQ gPLDl |x9\6i;F>qsWUzi|q'^]'4Xo}#7ojs;>Ķn7Z7}❧1hJ*x:~5<*mZa'MS'%÷>GCG;ע%<^TVr3NeVּ _SZU VE֋jЋVSyRhLDmZ>Pt2Bq{bqV9Jاhy\z׍GtlԨ*q-{Hӫ]J7-O0>]K. kpdNױ6 ַDuY嵛fK5Tg0 M794DU -jM"Zj $SS)Sԡc?HD uWk`u*&u5n= $baTETHMƢд(4"SA+u"dɼPsC<($]0t @PX2&&,<ݺC|D>OQ!$2$ό #:2E2%pu."1I D&Śؑ;e3gTPDC`OXwfSL7`cZƉ!fTcHC }|g?Oճaő2tI?pMljR3z|C(Od(v$DzD!1F5mH{KѡH'vo?sd9!WHf|zI9ryk+sl|>y#Ǵ91:!n?Oy|nY'Άwt ׼)Fs/GdG ^ׁ.Ou9')n!׌fɈ5!j :tQ+:(9xŅQ" WHاs/b>ka|D9j3@8Q:[.B>TA`˫uHMPm|B*)ݪk X@|TӠz22i]Yq[2Fˡ;Grvr46]l:K(!1^.n2"gx(՘{@Qw9:okDBIL% ssEH&P,>CfA#so=Bu/1 Vf%0Ђaq{zb_~?uYe'ɘv"20݃W|zuќ%eqD0Q%, PZ!^hՋÕ2ai.Pn3$^?I*#(A}'T%cC7 ۲~:lNMatݻa:`N^0uA2c7}G:6M?ymYngu $Sڷ Z8#ϝ#2q$G!Ht}\'kaԎȎ`eL$=gZAd~4:RGCB_HF>SKaՅ sop2kEbM `c-yeQe! /4{/3ۂ`oⰝF VϨA. =762SO< 47*|#sMRq ۲QF!K+8MXgeLQą:f@Pd2$fZO~A>~_ ݬk͡C}MofdFr/-rn+LjJ;y-zb ~g[zP9m +Q, 1 a)0&0fREUяfI2_'\v%#OY16!Db[H0&2To]&f1RC~H;&dmM9gAh2$lmy{o%!:7FuX2dLCpxR* sH$TtCe8,| ='ݞ7Mͦz]߱_EmGEF(LԀpA!~?'[!=squUT<ۈ֌3g$NZGo]vщ5 Ðd=G㼺T/ 6:3 J w(a@ X#ϩn ,n{@\M娣zF 94y ,I̾Ѳ$<4*=j^˛,i63OS ) "}>Ky~nZjF*s\vgϤ'8"Z ulw-|KF-5T=zȓv1ω 2o$Ċ`{C s9텩OVʂif~xbNm PRUFH-"OpvL}xmM"ysPtߝA &.ņCޝz QNir.<RALAl~ZvC s2̜aLYN r-/3VKM6Aki58$ JqR.mBK<\WHt~ڙGC$컧!NBӺ>6,DS/D OSJXc!#fxs6}HuVns#|r 3AN{084_8QfBâi,v5u/gGӃPyHmy!e4Q̳"yd3MnWaܦkZ@Ps5{z9^_}Es(pn<߶U-6ϯR,y!nz߻'7xhv9ƶOz͒豛'Gm^솨GhfT|Rz {cy Kv5Ghq|o^Y:HsCJ%[ȁ(ߥ8x%,}қ:cE&fI =nm϶P;lrwDl5|]f>}u䟭n\Z{]>!;Ix`.VrMd;yl\PY;#P`!eNmnk jerb¥L R2 "DC@C}x*7_L?<~x7{1=&26 [@07ܛeLE&V'`6v1sC GK0` R܀j+4~3 ON!s[ؠ  PĈPʁ1*4TA$D"CQ$%(! TF@JJiF B$jeD @0LD DFj@TIPVVZR% *BQb)b@ZA!b)TUiPITb)>X 0sN=J|_Bcww!O~BA Ir Մ2WNo%E9!ՖG}0fR EY3 dushBD">f#xozKS8g4LCM%US#Rz B œK:r5 vbcŏApo5=EƎ.2 'J"ݱ#Վzg_-z咟w!|XJ+뱁pО D^kjNxq1cV3%Vɧe9ŋULT#QAj*ͷgVO.TCԼc7<zM-ЮSsѷFNɟG^kk۾]nVNZN*p  5@=#޶|HӗI9앪]S ݥ#}#c%9Rw3:^1g^Êtzƭ="jHO4ȶCͅ2}VPD({b cUbh Ҽ-* cz}s8s{?dek,ob '&W*DrAp}9VruH4!$UUc:M ?ad)c_0AEK KUO 3.5mo}[w; ńX?.DFksڰGey qGq,D4tw:.!TN4FWc#k>$SlqèpXNHK'tQސ':YC:n[ F7ij87MlwP5YʑsK5D UI:a8/RA)pALLk܏tE1@ Ǹ/V) WHEq8)eRiH`3?A`\LOBU3$3.GŜG9RKz?MV R51Vm'=>qzTNF;ʹ?,rႲVA~c|9Z0BA_?gJpmV4i&8JlZ,EST:$R5\eRn tx݅7|QwvJ / `"C L0o|H͢5֝]UYDn<_l?û;h攈16m\aYq,(o/x:8Ή*;sᡇ"6MzphEhq!AC~ΓǙl]ݒ7ӌOE*q?]&)D-!l aT60F`!~ %9'.F~$\¢+Yj$7⾌3{7l=@O=2;%`8T61:9 4+S[o(bt%Q <$6|w_U!a#]yMӎBI DQ'wooa\"n 6TiL΄0! !Cv~Uoj#6 PREM:\Zm6:@bsb(ƀ` "q9֚zyx 2ddZ !+M#Q' /cׂ7lٟ8db9 a^;yG#cs?^~mfIz]n\.=L͏,5E'ힻ:ϴS-\hNpЕ k.15},"=_H $DI٬wr>} L4Yk|zȶ*蜖]]}3CHݿCToՐYc`LOrxwqo>TO$$=ӕM81OldQ7!#\#LYĢ rgLm8vrd9(NjB137 _Hx"Qyn@3d`f٣|݀  Mꈱ ID3`lsԘ)[]bn=}V#30#\S ͟)xvEv^@wѓCFgM%8C]G>t~4f H vfl,^b ,5Ԝa_=kėaf'ń"@ &M"I4w4l @`MX"7 b_GU=j0Н&mEK (#nk[ΑGbIf\a cUX#a{Mi05_#%) Dh'rw_oo#.gٷv^Q8Iamf+TnkӮMdn3˙Lޘ$H#k6ud|+j#c%2ġfvh:oA( MF7v7sl ߀;` ɀ;1?"ΰ x P 2eQ3ٴ־|HjD&  TLPAyAdD_8?cAn#'9K3R&.z@`CF Hji (F(i("i%@*B hhi(e 1$hl* jBH%OpV3M C da&f3"*y :TQ!VQ6Dr0b Q"R'QrFEMFV R!1A`L@ز̹m, J$ dlA6@ʆ2i"&Ċs qވɶ6 ;ޥC dP4>?kԘmv!jbx~K?{2T_X%{.NY=˻0,dz_;ÝQ7vΝxATI0DDfdLsc3p%>0D1$iMTMQ DSLLMED$PLQDT%2DI$PKDRM DDLM%RQ-QEMPK E)KU4Gxgxl*q?}i8K~ \uɼМ9fIj QD`TK1TL@LULQFDei `r) @N+DLCPWp!1!S\ab aTpP ~GIh=dJJ_!+`G2"(zeهT?D_$'2@@tsԏGtRĤ_B1d:17zp߾A]Ơ`D O91)Y#95TY3J'Y&KhsA:>~~VU3Q_w >13a=iv)QڅDㄱ4J퀧2GD)k ̂臼茨D2w&wQ!W Q[3_[HQ3(bE)DkDQLWkc[j9bmKJB`(D7hߤ?nhDf:Y6ߙC(4&/$4~x.(wB;00;,CZ"2ΪeS_Z2h:9ȴGt˅t'0kD/~0*^{$j#DP@2򊙦ǖb@d*9a~Hn@hSע$$Ps u.b,9,{PF,4)HW>(acG.( Kޫ}E%$?vq!t5@S\4#q3қ~!4o ('1sQ><0JqT`y6ҎهO"_QHQ4S $ (#i1M*4QNa쓮O?פhoHa1:lτK P: |9ϊM.~ zt~ q$$ W?ïCs)H-6 /f s\8M |SmjD"nP?/'M??Xrvj4*Qf&B$(G<\, b< R!@ $wOn(b Þ9XiMoN 5ND;_ZkJ([ PYCc0s9L(?"c!MA0B9%(ql< e')/6 h!xy"F-y>>nԩ!@eCַ182k@!ZF$=m> (_P.ZAj7eU>(u 8g4 ``Z~HK.#R%?ro[c>z-be%t',Eܧ+"~:z[nCn_4+9.x)Rv&GizٓOλ[)ba-ӧс!3v1ϕ35q9&"9}pwİDbCVs{9S\lYFV{'l6;/z={{fC\l{)<ﱢoywm6MOծ3?z+0 3YBtS$/`d{{}lw~ף'!tRugZ)TjPJ$~Ei iVPʲ߄um 8dŠ̃k+($4(S0|>GY+R͇ߎ!X򄽂Ɋ5ԁ(-" q:O We<޼A۴e+L31'sΨ`oH#†<,OC'3D<ƘR] ! 1Fzx>[zep!&dgCdp!!!dV>})b$G F$ w|M-Htjr4\dt5oj؎~w Ⱓǘ{3߫"fg_[‚Q!YŇq+* Q=qqu9S(iBZPB%QNL D&@8R_ܮF~4n~˳) l"R+lp4&St qbO& FNY޹okV'zQ?x>}{aǙ)F 5G9z @ҺG{8 .{]Tv:w!aK?Gs ^7m,#fc{mK)ѶjxS^z_빤&T:Zw~6ܑ=N=8HrVq9|22~v'3OE[GDg86ujGkЬ_(Vߢ"PwN:s1fw^z Ѭ=FTKl J}ȧ}rtջãtޚlAwN܈^ xx88 ]N (0$};쓄%fxj٧VLݝkspL]W=D-;;e!MO|JQLEq`{w;v ps}ז|t*QC<,6B+za)q^放̹DoI3\4e&chj6tXAy}Z.|͉0);*T]E28jN$9뿾ԼԬ?d2h@P"q tL,-;(~/F!/`-Ϯz=5=<Ե,C(2Hfdf% go1H}3ЀoPk/+Ea[xxpizȉ!] U,fc\?I˹K ~!uN#ŭ&t@cj^Z{[3 ̦_bpED7Py3i;׬1ΐr`eDO7V?J{3뱑קW+[;6ǟ[yXT/G?c%p(i Ox0nF`w4-uX_D vbP*|Alfe|L`bUYCeep\],`1s2fqQ # d<|<0#[*.ffa?rr$~FSM8.S ;?{Oie~brQouC A|FxjPaxW}L t&u{IN!>άL"/6?91{\-.}N"ţ3ڒ6tuу`*%k#Z֪ΞJ 7q WG'8@6s1iI EI8uf=/<釯 1DeRMMmgP0i.9Va= {,#:Cz|7HIz ӶZ[)#>*@^hVk n+d"ཇhez十]@! ]%s"n/0B nu!śt  !D7`!J 6㿤AZ-n9*3p2)T;8;QK4r#kGhit34DA_ }5ϥKAE*st! lrwΨ'ƒˊGfcɀ6ۂZsuLކk]xs$HI ZVpb6\tWVE[qv\Lf] x~8W]ht ¿BtÉTMCʂ0F"$B8ˁgGiy~ˎЮLnfOPP^\ htEV(o EÏ^Ax$d$Ghvs[bfjD#+@{ <) JZ fIm9 4! '2L33$vcVy;&CXBUF#O $vF"mOY`qdDHJE j h|kNN[t ݡ7Rb*%SbߘeÇde^g˫n<lWX"|RȬ0y ۳^zObyVB%5VkM@0(3; 1}O5bpbh7` `qi`p)4Њ##K[9&\TUALj`DsTI/3!D0^ 3`oSٓ4 r9mvI`ߘ<ഘ~5ifQbHQ`npY"O |6<'=N`Jd ]uӻ #mP)çdBLk9u>hKӝCYDf`YKq6DeR"t LćKrv=7GIeR QۍBD$QL XUX#Wp]#(HBN#"ﮕU10AIPy 3y~LFS90e (*pUcN|HUl-L6v@:#!%)B0]ǛuȊzys=-RTnøiIAP288} ^ bE.~ 6j@ʑxLxohһ{ cp3VDq!fz H|-]BP\x QH>:3K{'V0Vv:ar1{'[AB#Ĩ:`ABIa`ṰN; ݕ!`a#vnswF!V&D$mz2` 4߭ ! $+KҠK>oטR?cƻà4(2pt;X &Vm 6C\ ZEfb(t:60J p'p[kklgڋkXq|5mMYC{Rr=Dw ɶ1ܰ`(VڱF&Ր[ǣU#)Ic(0Cr]vw<"HYVMLŋ2d$z {bA;G>_,.;wbd&I4&t+57uqCg=Ude1ɇE&B7֨o6GA=zI CgAA|Y~{Kh#RnqzD|ߩ; }OB8ݘ<׬KHNNDE2QLF͌#( Ci88;mjhSS]Asbu2Ləffffzo†ׯt`Z**Ri&}Ǵ#^<a;,w;#E<~5<bfșlyLȅ$xL֯ܰE"QmNB[0GPGi?N ?s11Xzd6b&,)j}q>{:`L +BQzF.Aޙt Dfϙ `\ &fR1Pa ] *1izpxh v $Z)4LꖙA ٸ f*NB1 p ̩fr18@la'M %h$m3 w `er)'%]fuCFBV"YG,H/ggzjkEdQۦ 1;:F 4L3b> 춌Vdadt3 Cd a8cl: pjGJ 0L!mm5' +jE#h%X䣛ߍ f7Y- b,TJܤ7]ZCs6ɀ#(7l'xibh9q(NX͍SesrjifJ@Rm=5R1FЯ!4VftBsmpãE5x0QQL&G/[X]CT\  SDyLb shQ6,|}PM Fdp0jA2d D4 A$3`|lJn< ;Î,6FPen9!ȧ5Si]c/;G?G_ѡieI(7 zL>qEd eV3b ۿ[R'PR,A6(aP!Nv{TC/IAʙwYKj){x4H(ĉ(zUN'6 zđCSoОs6ѺT 4MΤ3HhKkE__ _n_:I7_ z]zyp!KExJjj+% wGi,dx_C웵'#B$6 1rSixmt`=8t:jzIØA>NF6H9 khn됩d뇶9~ȧ}>Ozh{  okeG_Mq#kih0kqtSݘUE@d2R. O: FDTe4pisMv.!hX]C^ou={D.vۈ 5c4eKDd@h8bHԄ-Y2%6Z"1A+HJ ("b"fb ()Y"@TEL0"$(he o zJ**&*Qb* !Jʧ*T*(b"D RBDA\&ч]k uY49fvH{^C&~ZeMChVð8s3y%ڮtb7`a4=t֌Tu',7wC`"mPh%=M\/(P $!Bc#Q"*'P:)*,# ,B'd(#BM*l{)y7 xSan!RSyMN;j(s%$QO 1)J=wh+#? K؂ a2yƖ+YF6aYYF4nr3d6ܪuʐ@0.}h_Wc0z2 6_)4$?ڇMn9)@ӱI1$;ݦQì|C= mvs߮_sa$c$33 !٠쥤쇃$߅0'Gtr`o9F!"-"8F22\TDnzdء[IMLXKuя{c,=]%iQ1`s;w@fcߴAFiy=fIyt'\.@9h FWօ8AȈYtv*Qc "M#aDSP@RB HR)C(p}#U|O ~ph \If2Hj) g3"&%鰆&i/1(Bl\!`1!Hb0 .] S@I3@>鿰W Չ~~\Tׄx%֍U ªJ$ ?"7N!Lr(n.`<T1EG&Oa&B JXZ-ap[Ip4J>EJBzR'D}G <>F=Ihhy`?(aW~#O'3) =Q p::WSa9 +3 $)@ri$1)hRdf L&3H>3Ul9?Q;e ġ0ѲHDUERPTPw9LQl @ TPI@dҔUTP DK(TTLIK#U9SQ) ffN$01!22" B$?dC#j I*6pnw܅ 4Ѓ9#A{@aeodgdYL >^CaUPV8Wv^m $S:#t і1಑?vf0.8m y_s*GY sS~\HO b2T-iOPXHV8" 9GXDok}xy$5p Plm0DERaЉ8GA(EZE ?Q~~;k9M f MDQݛ#Mqmosҩk m@؞v2-c5F7"YwF F O D@ #䜺it9Ckv%FsS9`oDX8qN 눺V]jJ%&0R3wr9."2BtH/B`^Q# š21!n}h=uqD^fih4)$(>Wȴ=(zXɚ{¹hI % Uꪘ1x5iH+h@=Ӓ;FŰكVjgէj.i2I׵@vlɇT;Z% OkAU*v05V"O Fkn=Ոb]$J/XRwd}BcGߥV`:AD#Dt>sg#H $p:n'p7Pgs5 V"J&isJJCp`bIR|(A9Sr<( !TTBH'~?F-_LNs8] %8Ja;b:`T?l~& %(~ʁ?LRÌ:m&Lt_`@=/O n̜2SdĘ ?9 6f:͛Q/@7!@BDqAqC<!t}H6@ gCLf&.>tUUx0#]"Y7m)\0fD{iC]#ǾwPD-إec A4}i7cŠ*DT:~KRԥf@G,4|"eP!G*(Ez7yu " ((aJs1M3('u&̦mj=Z?;R6ؚkG@ wߍDñ]*JlG"LD"Vϐ7>^>y+L\C%;c$v gdth}.upPy&i#J~n !Pn=H"?eDUv:@ׁ<M ܋xto%|,(`O^zoUQTV>ui"#Ev6QxL}/ONt桾O~9 ?N(|O-I(M_κ}ġxQVfo!H蜌3H~0Y !fK 1%QLUT00*!2HA*9֊;*JFA; )Kڦ@,brA C(bGwv*h;/EA"n>dZue krJt%L$;H_6edLgnZn`VwHj=;ufbOwLI]Q/ ĽKd;"܋/ bBhLW/_(u sIF(&ukzg;i)tf|s;oD@A Rzy ~>}R S۞L3*`a֘ K4*@IxTBDv}._R폴 2MǎDHzD.LNGhV1eK)*COB^zOrqΛ\j[(,YL֗'8Ǟ='\TDFKІZO5 @H#CgA!?IJR"<ژ ?QS(&^W_>[(| $"(R( =fMr@iX&(пGI:ySt g+t.@;nD"~Q="R|f WM5JVG)c|ELNW$dT #bpJٌ0QģF `Ԫb @]= {G? uW+1aHE# D(廬i ֓hw̢/}<}pC=1py41D"z[NTv'#;. .UVi ʅq򆍅hwY'!a=v=Đ@,YFmDáT'L"o@sV}b]T9 qj`Gͺt1P~6xAAY\ >;5=ZN &9~kaԦ"ӗ8;l(34 eL >-Ӟvl5(r@4:T{OH8M,%NۈS`ٚTh\dd:t@ Twb|4Z_xFfFy0^rz>gԏ~X~5| 3pKymgR80q(wh47 qlq#9}uc=8Bn~x)ה--Ww&v?ji?2hvw^w#ovlOb'M\!(٘`ﮣxy8qS\_'t/?EOJns8APd8M@Ӫ Qՠ6 Tn^6Mf^On?߿s`n{Ss 1yj;A(.39im;aĈ览eU>=PbѴ*E PYD!ZIc0d H[wDJX$Q%K|@a֑H IYMibb B%`7 pI@i FU(jGa $ ' p2 HQm+h-3 )6P2Kq%EP$CUs bNXPDSS9d$1 C$QJAV 2g$0Lm,[[iKj@P,BeAD 1DDaAD.0$ 2KL#mE lhDZ Q H`H`!m hPKVUT 9((liQ נx`t_oZ\#p<}g(h/~;Wg<`4=_7P{p2ѕZP4J钢CIuhXSڣI 3. ~c!G&R-\KDU`$l384s26УuE_fKDnqI"bq[14F3QJQqO&8RhZ*S &HL@x 4JotLPXQkGh\nXQe20 iMJ%3L2 ލ]l !bs &4~" wt*,(G]S+uyF8NwQ,8vf^Q FK QFih.I7B)w :sf޳$c)@2&0'٠JzGe"2EiwEܫ LSC8(l;!NI^4ZpE#@k)(JɲH47&FaJRKy̬# $HT !ʂ,aͳUZbWYAshe)(a" ^2ki%]b9L5CK0Ȥ^ u Ĭ6`cХ CJf'֜yZFll[8޶a3,yΤmgHnVT\2#ss;285^wEg 1t"dzL Q@#؞.`#WhF0 l(k"ӳ$+ ˣUp^W6HӠ )B|'z|gДc0 rۿw|7jʫ(d)~ (AbeL#+iT1̍Ii M ..1^2,1rƒ !ж$2Ѱӡ07c$'?}w%fUOm?Nɮ P٧ P@SKptw4 "y|LT55~~}~Ltªjj75ǐfnxyVIVtO@pPyCuXM$ROS A%({  &$D|_M/A % /z@x/ED;);lc~ Z$pBj `C.dAZ3m L4.0*;Ab7 6SoT}"{St 7"qo3}}>dE`]t{09<>] ROS p}[ :4`4.}bfyB*gL-mePFw|0; 8, Fb'NNQAgϚc)P5RmHøe%'XxL7aA< mdC#xQdJOM 9}e7f+صkUeU8ㅖ9UVCH<dr,Q980ϰd!dݚB1Iꂄ& {wL OTuL 0?X wb1 J`A^63rD:O{056.q%VXuF 1=m~vg&#kh}KBIʧSO?iԒ>W=X/=`9R iD} m$T,ukt7xnH&Madq, 6XF>|Db9_}1sq(Iр9H|k3q ,fH# B{ K ^e3~~)<BAA5 W?/seD2$+M <Ƒ~wUvBr/A!OgADBd*U,+*!&vloa66M$ʉ3-_ZE!v 9챜~|>#]Ea=wJbQ-I.juzTҎW5NP^tQUiUoF?wxsI?b1"6=鎖ºŔQ].n6Q߄U[i6I7<;4Ӗ#keY:jw8 !g:l t٢6&:z)p!#َmr(g|e;oV*uX 8a&D[|Z14՛fB f89kI%w/g:6?K@9{r;6Zo6MMw7ñgRc3&wry16 "Ym}O:4Cr$/|HsYժ9ü]I tM(˲'6p5ӉRlKowbۯ vxR5VU6 *1 :}1Lwxb" 梏;FؼBeݼ`r^+٪ʍöp##)@MoWmV7EcS .g0m Hf+ر3wᄎ ri\9dO!.\RuKa($V*e(/JF:}}BSj3Yg1.\}M~pVCjdhbkQ-X'NS,]`"a[\ m-.֗;8dEʖ DӺdG.J,Ȑ̳7Lbݫ`mn(ߑ*'vwNs״qa 5C$n[RX\8꠺+ p\,-Yt9 G6`kSma"1 Uk:؅MLYbdh85Eb{ˈ=9G&(s2m DFPHP(dqQ^AF'=KwSHCH^1E>ڀK=\7/fRמO|'80f`,jD㐝֑;kY#-=c6\A0]P 6׼1zz✁6 |3i V*#jxSn;iT4a dR'mS%tx՝,Ug6ߥیՆj`Les/y%fΕCz ḋ Ǡ¦e O9s%%QqNԞ@+@Pk1q]Yc{dRVךV8 õ}䲸,/PhA)aJ,F^/q1:94'VYeNIL͡&_ Wg-|pǩoY `@ﵗ ԋɗd]*<AA9Vm,e-EPoxsA<DiN:IG7rj^`:Crn tTXIɇ#}xYL9Zp\{Yۼg2plAQvFٴa {b19/C$T(\(oNbѝ&v 9=ɗ܌88+ԷFE-TE8+T<Ɉrj :) QU4rHMCǰDbImvm/rDEߞb>rz95ߣo9SeO*@t׻: 48R!0leTP$ۍ&_bmz2N lJXgމ8]8 \,%F#kC8TʼČ+ex cFͶk|Bb1g~ B f\Y 4H (3YaXVD+|8ZK]Aa;3*htS9ƒn/tJ-)1rm$.lH]!̸=:.8úŪ Mq+; e5;M(|tI L6pb׈jًtWImxl~d 'HZ32(<ʣ;;޳S*m<*hjt.|m+9"֔WWi&i䈉7pX] [kMkQy(t.'ޭ!+hzpbG`|N:ȵ15Q/& ޗ)fI<ϩHK<3Ju^ 65 >\Mn;?]& RHƐNQztk>D0Cuw|a46`9;n98u㠉DC[EIbP+Zu'sr- C;8*v t1kTp=})FFAt/J/&#Oj5,*S[x@uALB,AMI`g L`k\2ūE |cLu]!IȞ"c,'!*@[f̃) # Oho1"h>"uBH>FX%׀ ;;IG؏rCw"YĀ (Ov?rM),Hǡn_lh60kN*sA#(p6NZ6\Yz*)|aTH|ܸ@y>E? 836бp&TECmENN D9N'n.= LJ,ʤJTs" ?qۈlAv1eɩY g^kd~ۡu]i!1STy+5Fl3); ^x(dld\MKQ=ۆ&4$P1)3(ph A.,aKo03N jޟ*CSa5ˈw 6ɬjZՠ,94yQܜ<k'I+qqŪuzR)wt(.jݳ%@5YɘJ9c#k}8 eHobԨ楘Mt_ if{bzՎ8;60=]طCX:F@ '0&X+嘙Z#OA 138 eDk/&r"4,*&lMNI±^NCjk71j񋊔vߖEN')ԧRE,د m& /"8L;4юalr9y1\q763#,"IN𪨐N]8Nኟၩ fp4Gx`"1 %H"E@=! ~jLzա=oF~p"!^ :F;b=7vg?#S JxY֯ v_ 0*i( if# .n5c'0HYT% )F/1ă,JI( QM%S9taF5KQ[X6i%ԤVim2X#XIgCB'SC$x Sc\N^~tcO7q:/Y@j(F.Ħf2 2%02W jHb\!$rG $J &&"R!̬iDt &hE)_Ψ~>7d:XR^̢M7r75{Sㄮ#"D6O$` aSg<kbD-ΐh/ϯOr(C9>a>oB/D+7JK.K,R0*pS6o[j:BW˭ݩ):cA%(P3Od=OB; B2m,]{ LPAf'˃aMi1^O9!wȅ9n(4XVQs"X#`STG^ۉ:!E\Ua-M/BǥzgCb0pP'XΉ)@avͅ:nY}Ωy:eV-@*d [i"63 C =G&qΚ2)a5 ǚ <+:UF+;'a* B@)Jc,.‚h"R><"< U4 2J r8v(=pdsBH!94+n9rd~E=/=CLmjնQv47_lF}p:FXbVCt +FO;QMʪ CAko@;7F@~D I ؜:]=2Sbd $ؤ;ؾ#Y8-"ǢF!r&d]t(KmI"Qf54@>5dc˷IIӋ76CTv-!9혮نuj(b"ubAFb-1tկϘnf<A<!O>zS+?4='벨6ɃF'b\! Ow-DC5tK) Y˲oqW(v%u [mDV&Ɖ ^ɻZ3k&BcVGYcȓSR5%Im% `(ѡSJY)Qń01R3e$>R B DEF.$S@34A+0(Ҵ$2;g|?MGI`iʚP$ɥq?*\HwkmV|֟Q)˃q SXb YK1~&1)j><0A>r~ ⴗ}wܻ| )#h!CTU "4A<4J@[9XhN r'9oe5tC't?NhQ)5?,CHtlB xT%6Na0w>C|8:tt@J!FdIB%Ą,*g^\ whfJƑeM 0C`1ťkVH)Fb^J*CeO0L7P a" K~$˝VrHg$bs6-Ozfu#[%44zCp엞LSϡW|!BJoGϧGA}pxBx~^Xi36׫aKtt]c}a=:\F^&C͆1a3mhz avbUUDG%f% .HF0`c ع'!'>Jh'`L<^d>YrT`V$ j6ł-jFd*$ "jĦs!("J9A! i 23%|Pepw @6 &<6)[1\*F. \X JPI2 ( i0"q r62CdFËɜf3 :8JP! C2 $GQՉ0Y`GZ$ɡȊ8O,-&Ŵ^: Bs-# AMAAJP eȰchAI1[2O|A&I%5hIS6 Q{#`CA,$ *LG`^n=,OY%#Ƥ㡩5[ e R>wXOTQE DjT֘ n.O*=4toW2v%ߐ ǦIOڼb;  T%:g~-o,NYV" PRh,m9Հԫ^WV] < 2J@) rJ&- lX`X8~N u{m"U;ӖS}Ouj?g>ߦ@Pk>Td71M<swUt3{=0ƾ~Z~|ko0"p2` ?D"4Dd/ Wq<CB8bd:N 4f@;Oѹv(UƁJCWˏ}jv]K n$iawno!|P HE.sJ!!"+PL@(A Hy@8>#j(vKчȈ @}o$Edx܇E^@S>>fsL<\C@QԜ zC*| S$6;eB"$HF6|YHfe0a"I`3yj[dI*9i0%eBa 6cpL"hҒ!oK͇s4S܍tq[9 Eҩ``iM`T7qCI}\sh4u S%ǠLD{zP$B|rb%a=ӛ Mu*(=8#Wx`Ϗ?89 6FH1p!Q)CQ?`])XJVR( C DB 21 (`h=a΁ᝮC؏e2G0D=KqBf`$cI:jsRR%!jcgM {$$ H%);|5ՑR"(i)(CLIM4d:Y*anAQu:)bH XƉQ\'pgx#Dȭ!*Q?lA54hBdEkE㘟 ОM;kTRb_ymۙDmmjZNF/5TIyD't;{LtN)PLbdwشf`Eem/ߨ=9=4M#20f+ϭ![(-F~9ngmL,6b"za4놏%$;v;v3" k (pvB?wefHQN2Ѡg^#[pvjcjE dhZ2> R_H"LP2A H(I@C=]ux.z@ yJD&"]ժtm^s_שSL~.PSB}u_fƔQx!Y$JPld%P2TnnD4IQ@UPEDRPDUa0E$S3DTndELHE$meh) jTG}Lki}˿\/|E!gWM_?ܯeeWߙxg?d ~z8c""*FX?:+?/yYbQ b@e%hh" "@ )((RT$*%f RZBbj)JJjh$ VDQ(E()d%}xFgJ*@ +9C_]AY%Pr2tziFj ܃cY~`'R:bb?0`ׇ;OTpbt/Gj:BO!AP T,3 Ad| 9B!.ڬA x!w"%~'nSL~o.vkϯHSBdBHiSDCla 7iF)m Kh#9Fua~  cixtz>@ˤ|ރx{L,d#M3{vv)7rT~p8C.zE[-3ԬT\˖t6wOF\aO-]q3S[t0-Fo̶]\m@$.ΛGi$y0@tlhm3rr*6\1)~O$3A}`HLy*uT5wf$4Qs70r)1mfL}-2Dss8~NZrq2RM ԡ%߾B'mY-t=~޽$7؇&n!T$-.8'OU!->CCy_ A!Hʞ%ur Cb<~  N$ T;{~Bspӣ9zC4x=/u@Yד}5܇D Q\4f&|n= aDDRUC|XH"۠$72}_lNRAbx 6`V11]auqܴ رAQC@֟A >US~=% &TVA5 h*fc*FڂT%,)f!4 yTc TVS,UqPŲ+ D2s Ѫ >JEG*a#TOP̯*(j&66(&b*IR2DA`)Ljb#y[ uj̲)PL\XpE1%[lfPudiR2J%9bP1X@+4h 3.WX3r6(i6".F--Bd _\ +H/J`B d!ҨR)@| (tTD8"$>OW`{88FhThBc<`H:)d>l %R!v@68/戈 b`("@H$bbAԅ0HWWgEQy (B {{T - Rē""bdzA6Q&Hx< K4̾S GDhM 8vB(R%DJ &ǘT2ShG,QoPB Ĩ9i),T )!+pMMG򐘫,?Hx@'Sty=V)R>_aHȠhV2\*"׊i;i~rydI|f*!D"i! M!yKBمeO89CyYɪ 48끆>^݀6F#,$%@1PĈ#Ag$Mo>ay$_4UM4`1avteitm"gLtSX_mқecpnٚ:?]|'%X鎷49WMsFx,dۦNmsvtC;lf^q< 1);,7LYt2_ ͳPBM$UK ЄݶƇzI j{IRh(WQ׵ P:AHC텤))!b(JuCpBo )`WPy}mXa`9n3H (-38~6`+I͛PBA"m E"lRj5buM2x*/i$m Ƅ(!/x 䆒;Xg\hyÃ8s査EqUt`m?LZ(A:ƉA6=zi"<Ƣg/2Z)qR*-R%Adj%Yb 1K)D-d*+U 4 VHe6 LC$IKE M P% Y dFDX1i `bAL  4nd ى4"eQZ(ֶ320 I %V!TRj(F))*((TTPS0cS\D:v,ؠzOU5KuΉHp艚"9Y76]%)M) $YI [VnsG[P ¦!rYK(д5m3{S[gGd6`iF6Lc;Gm3?KAq=_ o9e#/CgW8  ?S y$~`B J%"ὟW<Q)CT6^vJig%)ǫ<кäml_<+K)J - 4I$, PA),`\_t1GIDvQ' bD1R@GDyBs"A }й$/z(.'hie0&Xz'~(*(3CM\^RPȐrS!vuЁI]oq)+ @v)ҾC/\0 !a)&T[D~=s!c:AR@O "f DpxO01ߩzZ#%p]uJぢO7'8W) 0rX RHĤI4AK=DGs.U#>NN{CP4 ٔKPxfif`'LrOi܇'hfꇤa!)(B }t})JV()Sc hbD* !H )0@)BFJBL02%l $0V,iMHb …a #sp!䁌 &&-aBf|oXPi6 iSO38$DuBO ]ƂRHbt9@ 'ׄ|oh`_?#u>/=P}ɺڗQʖ? ~hm?ժ.̂69DypyZ,cl4a#HBҊөҨ\G7Z)^ssnd8&!.m86ՠ%RoF)X.!儠 ultvt͒Qm߃khxe4d >Fً6/MDM(ăGZ]f&k9vASW( RT! p>I i:#PNWS8 .! ߰*aռtA R!T!s3NL%2H|> $`xڜ a}4)o%)QD=|~`z^f. 8/ =O'Hw"r-"g;m.Ab ٙUb8bTj`|zwo2&$Ϧ; +7Ș2Hc!NsH'[5ыI.idP!JRRSAy+NXFdžrhxmub/[aT aJg`cm0'v`mDlJ1jidlQ@K`%s2P(k2"mRiḺ b$b3lh"1slY(FffoPiB%5*2لc`dAa%|u ((jTPEE6f`فTC*Sb M( 6l3-Fe0Y,U0TeA[CkJ"PZ$3 , (>>9 ˛sF{ `6"FfI͢f Al*P BQB!6&r"2H =$0vL`"e sik-J/!Փ %H 3!R+ И$mɹR+3Dp3 $6-L$5.Sl ("AH6!7wŒE{h++n3̓aiDA?J[ Ǐ1*gs]؜9fDݘj=jɶfC{>r 8d0K(ZPJnk("3g0 ā Xo8{SB^@4MH%9U+EEnLbvDD2L@!xiC-!5*@ZdI0Iŵ #׈2O@o:.wG>n9&/?m6=X6jS : x]&a>A"aF|Y!m.RQ#ɸ(&4Q˺ saMQ%T4x*1&8;@<\?`Od9$@Lʄ%׉r`xh $ )~8B0`\C^X`wBjJl4?t&G]/$YFq;ȬRJ.U1Y-I4 D1Y ǔgӯ5>VGsl+olO;DU4""e"&MBKE z Q:$v32&WVRۏW 6ZCseDTX<6v&2iFF֓5S$j3j fZ[#=ib a-ҙ6'GaOX:ca[baYKHjC.(P 0ƛZkB #q;K4 U W"H"He&)( $8Ư-Dh:an3)JvV|b Z ?ļ59t'LB1"`y'nŠӨuX<*f"*"*,(Ш"ʕ`!JQ`UUمkZS$TDTbdK1f"[m-(mp&&b j'Y6iWy qGun#*DӖLUI`SJQE prNJ%D3 APQB RSAX6"T G!)h!|@:ISq@6! 9S۾!!K!@ &D4/L7'޹'qYY$Xgw j8O_ŮFO DfPFx`L t—mu6MAbcidxV@U فv JQNnE 0aN]޸t ǹS:{: qފ{P M@D< 5WDt*K\ $CUЯ>y5@Q$-PM)BOC_>Br8!Sxۿlv`aAQBąlVd#R=&\5 QJU4I^˜GlZ~ϩ|&H!AdƁ %)S;vKoYF /.jS ٰvfٔz%'Csq(2hjH'7hј=']:ރ&Pra;ydzzmEPHĨW0wH,piьH $0>O~â7 ;&8JbK@ڄXI |d&8L\5\ź٧#y4E !amwlGz`8pt×Ce7bp63hDLhj ;!4qaj(f}g`e & b]~I'0BZ+" @pJt@nxO,)>o갻Kq\M($5jP)RB"VdLreLsqRa+ה 01Av "R` Ri$)@% aZJ !H hX&) i* E4k/ L c qQêڨ,F뀀$# F@]A:ΧH C @d|ް (V ~+5'0';" kFG@wS(;VGYG&LxǡBRf*C|0Cmz3G:]ռˣǟ`hAOX^ JG-8 Q.:ZqdWwV=_cL}عyy0<2`/F nyeUW TUoΟ⾞%uw|^/tP璄6fF !f >L!M#ȶp`]|VߍտKôػw> 'cPСF/~&3 jc tŕ3ME&(x3?2$ׄ(-|},Ѡҩlbǚ_GT1 ѱk"1ά+8v rc0=-d(cd X+C$d(\ ȂDvvsܨ})W\~qa&;:B+j?XN"&Ch󼓑x ^RET5If#EH|J)3(iL,tzs>8e3f>% )/IFR@iB(n3Q)S>#g$66 (HV2Qps0jҔ ys;)pj"Yg62:mS[UQ.e3I:;'$9xp63LRmdJBIܮ`Y ;=QHwԖu(eGuv43<CqA o=o! BhI2D ԨD ,DAԀPވ'T7%yxc%ScET5jM=a+fj`ֺ#$xP[* 9m,+k+kQfOt4jhq'Z1/'O=¿ڐBĬ5cLi8D—#?x6mʷD0Dž -WTO]k.K2҃ (hڡCRQˆ˚)GǻC\`2}'M=h;1uj*(~B>~4=x 3@*? m(zWxvJ$tBi=~; \z~Uců&608HfPfAZA`p(D=ytO>#/YYϤP| 4( t`AzϚ1vv ZnjlZE̠h!)HgvYb)}ᛶ֏I$uffmHsJfOpyZ(h}88 ˟N PT`G, P6 1*;^3Ha.'6߁u'ff1h4RHA)mf pV@WEC`RiLEBVy bm0"Cj~h JU%Ę/L#t iBTN 2bfdA'Hh'!yxqwi4ȉ~J70weI946m& a6s M[g };ɵ#N 8)fz^`k_ 5 "h*Ǒ}} ghAu{iEIÁ>mdYe <omOAAM#DKuە>W; ws[_ wWRz7Ϭ G~LMh۾{'8>@oĄ,,ө~vm?m F;:Q8wj{~\?BMT BM[-W4caPX'E|%B:%& `_TѠ幑ۏ䲗3*21%SSp]D07s|FE]i0$r ;3j8ƾiy"ǟSFDټOفb*%^sϸ!~Վ@/  cZx&aY)xꐤпڮޙ'{)xpâ"HdײiQj)"IP)B_e"DN|Ӄ1cCv8ݺ~`=}M|qEu0 2j:HP|0H|*D Q@JzcTv]زBUl t`V ,QYhRS|щ*,aARfY-lIXj9j(fHڦ7aʼnwFc Q#Z= ثm1뢹%z1h v b_$Ątr$@4 Lb33 fPZ@2G%k 0HZSQڃq JnB~z ^7B`/{CS*0{ 1x`d37Xf u|#J,flP, P֘@R&o]*jbpd #γ3/C !EK3Ť[KVns/:uf{3 X&] ʡ&e hû4 ɶc.yՃ=uḂi}%RoƂ'jJh7J}Vp;-cK˰d XSiR"e~sDu]{XTzPLG,!D :̡P a ZRpnD I4}g6]Fx@ȿ(p$LT{H=us I0| ~$ qDgf<]#b b$l&LŔ $Lh!C,lĆ(S, L@@Ѡho=-wHA˒f\wZ#% efbn FP4ьMFX (Qc].I!sm֔KީfI䎥fH!uiF*h0i1ļs 8Œ(Ecb?okXN+?pǮ<;1Ԇ9N}W֒Y5D7LKpNH@ ĸ(H\8n35¤8*ᦢmbJŀt[.}:8sdn"uF6]:޷*uVU+Xa3n\9wcm4{>{3qL6+!$M_Xj-54"elpqS~}ҲBzC|W!e@C?}@~2yoѶGڮ҄2J q*('ʲM݌  1v2ƗN)Qߍ1&3( pQ{A boɁ3)+i:zJ)SDSTvhjL^5B !B`Aln;RhEdk ,PY"I;J7 !#wnfdITL'U91u&J4BtB iЈ0i)צ/,b}ebDqA|Y/~pCUP\!M);N-לĄJ(Zf**, q( X@ DHaDF~Jfh❘,ʂA@DN^ν#ؓqaPןF9JrKrDg2wKY~<u]ś2{``BA([ Q4k- Rd&6kU,lՇ Ǎ)" nFqwJm) V%P(Wq!?/c;9Znh~9IEHv)V%P /  #BLD "#@ĭ-%@%%I"l|sN`a O!= p="Éw9C{Gh6!-lPvCe@)RJJ0 H HӤhiᙚf'v⟃3ؑ='~~AF1/hbDU &K,4q4a? >$|QGp&o*% l=4I1Ȇ e0?+!EAJF T Cn\LsзA%߲X.asLU=y⹠ĈQhվi}~AHT@I AWI^ OM]ut}_D g}Xvq:,# .1AMU)}xi1e띶1/6OߏHbC0;g `O*"  Z"X挒fZV)"Ƙi?ܩі ,-}csノ΅5dX >6DUCVe0QeddcOiMQҚNBh'8ްGFF6ك#-tD@ąH,0`2^$n`9d;&֘%D)&7`DaB6Y'GG[ӱS[6JlxH ( Pt*$#oȹBX0)l`,\a>cwβD<(3:AjNhtś2Ua=e4Kގ<~{"`&wBS/,iRYۃCɰԌfa i!Ў>|` K5 n~3'z\6%CR[@>_"j5Q5EbE(,0fU3Lɑ`TFb2*A- 2Tm:2(9Yc9cYCFrSNRɁ F E$( +Kc`iY(Ie9!&#Dᨀq (>JA8P)׼.LyGF(q>HCM.<\h(7^ CϩyיhIdT%j=~ Gˣ53+Di X`#ZPԯ֘QA?|p0' l!tzbi N2ӣ_ L#)5**$$ VB PJ%","9``eEll@mIPMnMU6 dDEV:*+ Cde!8+,@aPB3!<}HI+zHwZȦ$ '֛a| Q5J)T %HHg':f[49;qiA` LC>@ɡ4);O1$DR!J]~]c*fbPIJ!JM0RH N$B1 C>udS$vw"5=Ϗ$2C2`l,BdRa34a'NhY l|@E> ~|&`DKVa$)8ȼ%]E{Dy֎:np4&!G bߴi[/JX~(*"Hic2RvMa.FUH~@dadѬ?\Lej-Rc6qT &\dXc`jPTc ւZMyYL/%HS$j'f ӗ r)ca40-T G見m L.r%(HP!VfG BB Zb eL( 88VZ2QCbrđg7skm.pRJ*" Qʔ(Rr@ҩI FNhm9*DwJ9M~#[dfcD q`eGdR 0͔;?9&Wqx0] p(aMr A @ A"ayD١9bÁ2|"R":F(~gw; *" ,hߖ47gZZQND!oM3C=:58 fp%2U8 UP>'<*{ިBX3zAq-4M>~ϝQܕ-O×9CQZ[DXTr qC:cTɓf !"j3 x6,J`az/FCu9]1|:zZT0NZ&S{]Ɠ"&.Xm01ڥT2!6-t^kX^8Z}2"FklY'zL[ 4ņC,gh u2op0p; ( !@aaŐ ( _]jm66Aز/'K_MR/M!1'%OqJ @؆Z6]>ӯakLY, .UH"r42 Gc⸛m2D}ǡ}qum}6ѫIE4>:ZZeI^h`;&: nw|g&M+J"w.efDQ5jUf dlmF&IШµKkT )O9KR<"pjhG!8%NUădM#Pʜ@h3Wq%{:0G#6`ym=^;yI-DlHYy<٫B`D@cS|fh7OR\A7E6*(+LIDkr74Z vaL'T}C{MSja -P"a\9ݒR- #r \^jSa!չN / OɈZ ]M{.[}CaŐ ~ȟ`R5!Q7f1bA#G3Zclkb793C0AJ2RC;hB65 ef_goa$j[eS9'! JJlٮr%X>M I+ y:8w2Xf&aO0tϾY{멢 QE[u1}hq\M)C9 1aoz]a iX JdcA8TN>WH:-j%BrZS̀٬(xrj r:1,$dZv:ȣ9m,ǧV2c&jdQ0rF:XS1qab6֘,G(\Ɩx0"S2*PnPr~/e)ҙR@RؗhHFx?"!:9K$HГdcI$D9kEL̆%Ar0JOt9+r_ϻ\O It<`P8f,MdRd _:E R @Ӻ"Oj~"G)Md* Z jo` 0tHXMMxQ5oLD!Q,A\BMaB Pd22K!@`C270a"^c"bI(1ĤQ;<*%TH+Qtn>% % zCy+u;bRv44AXwKzU~L!X\M' 5?#n4>8N"/4A_/Ԣ B@v(Z6GQQQ} p"P2Iˬ!>۷  5)0ARąQ@1 $*`#ѼM«+DpUF g.cXJt|C$tF5C-#Unm/E/ag}}I?0|ě>?an@;J`!?ТI4xpo`N-{#sɰ&L8Nq*MrA7h*&)/9Uy@4/J[@ C   OyWšCZg6Bz;hթ-]0sЪ))#v3u|H4 ,'M~Kr@!~BJUqcj|͟3'i?xe=mTlϹD?b:?r~'/w cGt Ƅ g]1tnO>8hTY֫@|lQ%I#?jkCJ@,A̸\&)S"ᇛY?uA2^mt &V_ۜg,?t^Ifpo%߮}ZREP5R;Y$_7;U+5Z.ЭAJXTƪ5\SOᨎ?N  _H6('UAB(5hqГwF$7Y۪u5B!!!@)Ks|Q/ҕ`' Ԑ`d@}_p0 %_?an/ H~ do'!uO!pg}:?o?vt\>rDLkHgFP*ҥ=pewU 0y*4I6PnaFb)Fw_ځ?vv?a>:t>{@;jRJa"lmFr?/nьc^ϩWhȆxb \ F H,=I/qΗ"J(mJ-7sڶՄ+1L~k7"t$MtY6KTp["Z 0ʂPҠ6Xf`zվ?^4%Zb3)={z4JfZ֝xxg0?p=:x>I :~7~NvCDXRy3 `~_IŅ'ja,]єLPV#az+Ɏc9@-# !N,T' f{!@FCk;2\~TLIBS#i9iz1xdH'W!f\Q2˧Y/e 55{N1 }# w$A563}Ɖ Y*jJy xvaM N  jFdK\#k9͑_Ҭ6va&w>lʐrvcPuR֋QX( "S,(7VY2J1+. a`zs̲e? z;&@KU@8l` t u\A< ӑRǔ;gL)oX`󢄚@D"0gx'0!d,2rUl@n&" $2֢*Pӗ0d9Z!t7 g]c  @C/Uڰ+.qOF(,`J12 Fݮ&!,tpG7W!㭇LY z~tX2D#[ZXYi."6JD¡0# d< 30L+Co@hd|UJƎ)^QQLbst@^ʄ*iJh+s!bEf'2aR2< X "V(*iZ٣"wW?pIhGe# LW׮ CTTIs,0TTDQ25fJ| Z\:C0%)TNRNTQ w;r|lF4ñLF `- @Q !$DAQT$AI@D4PLAUK̮dTfFBmAlJ-h,Lt$#sV)' &ie-ZcNC2DU#a,A*ՒŤL)$Z`(lș,"h0H"(9M13(( HIMz4*)U+YoUa[ZQ5E&FZ2KPɖhEq fSYgH t!X{=)wD6AlB$HǺyT4ffhhhЃYl2Z%M1FҰ"K(]ڙэq22-݅ +*)tY24!P`$MԢ (QH-Iiٛη#.*J$i YJ!a]ZcDRXX#QX5ETmi]iT QuvإF!k4*mn.雳duu8s3YƜɏ$RȌ46mA'fDxᄭI^,83]5{n I9x5aeNKW:4Q14f7Zpӣ4J` C{+鉺8Fݦ7YJq!јPfın:ٲ֤bBD@ `Rx" e) [b"RhJ=KHpy=feHLjR(S62#h*`*^;MEQ.PXxxDqp+"OL%DNBw忠MA!x(EpCצ:Cq S~H$l='Q"5J:)4=*FZ`оaDEC1HHx1"*]4TGQϖpPY&A/"8 T24vu+͎ `mܞ9EStcoj< 3NՃK>q$Q\+ ЗF\M(NL+&p1ƙ%K(i~KoO^[iPuch1ب]d()`dmڀ^," pfN- m.B%ZS̆: 9bIV%*ޣ!pzݩtIလmqa=kC;oȫ475eGky;0zʧ R"?CuH &ڎw1l\^b0i ۩X$QelM8r:ĊjRՌ#5DnTiM,"d(e6 јc!r9 i%&A6!02N-dM5.ny$DifF0UE%F RZ%4KuF?B+_ofC.D=u;uZm| g ~CB b_t:C4t&nڅaILd Nlhd3E/8_CS?~y.qGVsD#P D>#JhƣaUTjyPZ}ɘSMPX7ҡu /;Сw&ET{uفp6dchu4Q G@gC#ĭ r)2 @蔥hA(Pf8W\` A0;^|`J.T|u72:,E,iTE)^_OAH $!zAD=E@ЫB< :{1s?sɶ^ |Y[hԡlC#FS W5R@ʤ1r4꽂)Uo6M&8"%-hE*WC?e93h䄢@TH|LX/BC`{N)Lޙt paN(iARvԑ_ڧhv@ߧN0i)Hŏ9O[SgQ̂{^zrm8a*` @>Ϙ09}4_b,zN~YbNAD<)WZi&SXfgg}]2/)hSv0G;Ҩ1[eb[(TɊ1 6X "PbZHy(Q?N7JjEfQb_c;8tALI@HaCbA(3"v1< J~0~’b `A@q?݊'[@u':E";؉U(H'b=nJ/AHp^cO|H&'J&h* (*!BZR w?ξCY1gMnōHf&:MDRaW 5 Q*4%,TpE'ͼ=i+Q["m9)Or]:,gvt~.n[֫aM%D/@MA f.A T &N)&JH I1TABU)K@PHЖb9"B"P@!( ,BPMTK! d4R"dWT= ~V A{:< 9Hmd ȫmPC⍄)zAdyĞ숴bb)Zf!bF)f I .@`#W2 "VJ)H !b $6ſ>-rCP=y&DH9 22G$2)A  R"&&&%p2Y00diI b!@"rEJ)  "Bj@(Jp%hirdxb IN޻ 'Z$#2 Ą6Y_Ce9h΅U9 S!HSqSsРJ E}'gA!KBIQJDQ) _K=D?ޛ 'A|SH_)ɁTl E "R#$ ~` %y^CBtN?T"j}B-j|$yv͆[9V&@] S;KCD4)!:eFUXekYQ  Z-PDATN6X4)HcC@P$$",a6ND9%AQIQ5#A%UAH B5U1>*ĬA ĶJ# AAKJP## ZD"M%h2) WpB&ZD)i W$rH D&3`d&h"&e1DSeX wM]<{ʲ;38]` $*Ei%=p0A `q3@ N?9KHz^lO}]*pU*9>ϛUhXJߎB:`KL' -uŢ9Al5([ 7 =@!`!hE}|QM_i+yCPhK{AJ0,ui D8Q&[`DP@%-!)iM0b*thn˗XLM3M J$Tx#l0 9EZiJ0f!޴5W]sfd+T5Wk`&Zϡ5p5ݩ#R[IcVEtRGQd.wwoZpsB-0([W39{qq6Ε92xL|ʚ-S-;å̖ufb+x&oMtIpzc*O|]tbe2뽧)U(&,ߖ˩RMٷὺIi%cO0yW2[,ozﻏynS[iU ޙ6jY ۰R<|Vnվs59!M-KMW䙰 f sI:s،b30:-kӶ11ln99*lܹIM6BRӈLbTR>B;CPHo2uVY;^)u%]3ֳ˨1RIHtOJ J\_D1Pd%fir;~w3\v*$/gnYqBŽ/#ـ8ڿY[ Mr)8芝fEDBL#DVؘӣe͑Exnz^0<(8n50מrh^IrK7K .tv7xsiUä]M[5tik3b0soKZ.݈}MSpFusw-I(5,mޞ7)xգfA)ChEY݌17qUbM8v˦fcd=w홚d㽶" xyD At9mKʇ׈tq40t8q_3vp:h҉:W/w}8:by.RhD2md՛v>|>aTn_vy}2G*]fFtI8bG`,e8C\Ņ'w6VymӵV cFw8&p izۋK+!g(&*hlML?h4j[)E2'o l5[3f2Sub-Ohi Dmt }X7h*|]#}BQ5MYYpXPT[ڄGثkd}&ѫ2wZXYO3 L3yi#g5[Ŗp3sY}mfͼyly&M>ļnMI #,l~gFT-5 meD0C)d1wIKt+yUzәǓ۸P.cC+w9ShurjQeM3{d#ʚzCV(pfYUc:p9nFۍ:FU;ȅJ{.5ziK>=D %LOA0Æ{EaZ/c%RZt`! zx kCM JhOUS;iI#'I %2 b9 UOi]!L# 8[+ó)z< )Ҝ*hnGl8q ^;X[/QALWvBo٭q%oYy>U"<`dq\ Iytc(C%ya&41i" d0f"vC"ZRιLˇ+x6gIrf<.*.O>Zw=oJu2T+5{m4C~Ĝ@ ny3559FIaw#$, ďz~#~$W4;5ӈkVTpw4Xr5`h@M'~%+U¥f;F!=Z%'WI Fn/fŤעDfmvΎΩ9%lNڕ̶4ӵ3 P:R^xڋLpb.Z=E1m7"2Ӎtu@Q1:qIq]K% w\tg_f[Ν<*[NFY sůj$vͫA;;2{_|m(tJsثZPL ;o핇J[Smr!8iCpFFl13R½ֱSݯn,`i!Y)0֍T^搧?KESK1$.lb:At1d`sDsjZASl)d IյlV8jh8ƌ햊M= r[4zI݇!bRDLpEg! pZN`/q&3^1dm`hGy_>mlKz!XI543wkpBX;Vnf8q6^TsXtN .)>'~q|k0P"aDŖ"T bÖkȰQzy:bؽo>S4jEKNp,뙫ý"q lV0ᘲz6 JE"Tв_X\b\(P3ևG"dC.;dbvwR,Z2wX7}Biw>·~1rO*0H쇙"bHg*ّL90W˱g.|TUL>Vvej $L8ĺT.p>d ? FխhW!UD);3Y lYsF "5^IIW* U*|/z9jFⷵ6OKU;w&^=]N24_Ŝ}"uNa)TʶLP '%H47b;sNRU% ,&K&)<Ә^)٥Faif4fGJ4 9űksSEj*hFȍVo%Y7G r)K.~ KNPk:#WQ)$ͺ;^wj\wj}5$jo+2wLn#V.*_ cmLA/*y3 mv7_Ŭbrĥ5}]6CcMX.[t&sp,=sE&D&[o{쎫 J1{y2}n7(0<M!٘ ˀ:LfÁðáa!D%Ѽ\^,,_-Çur-[U< pMv0aNq¸yb`anrH7mymn_v;o <]e_ӽUgzAY,{Pm\vLMoY=^8u&RLCÚ_QSHtMozV79fhs&[o jx{q=C.3Rڥ^r;LP"L$̐bg/j/ܨ^g.B#YW;d% Hf ! (z0&)}()چS25 EA-4LA0N3v܍cQQf kfV4^01`bځ`{~+ 2dha~مT\ rb&flg~跣wB%3[B;kWkK0Uќ#R?¤-&"Г=2RCZJO;q x}82H $A!1+JՊLcf&TCZ&$Bw#vvR%wqT;[4fB/*2Bi:<>GƔL9`fHt+U QuHڤ~g:NgnRpw>HDQAb+貁n,%Cٱ!@+'z=@),8 ֜ }Ht0DOk1"cVR{<-m)%=ZPKQZpI6690-s:;AC@ly 4;eq6VyeIy^y/57Dr_1@l {.թtS*MAUHX^Li`XKPa8~(@vz~{;NaМGoΩm=mJAi5 I$Ֆ[ˋݏX-oPw5iK Ccf4Jf DZ ,2P YMcE(ӫZ3"Ɯ! WSM`8ְ0QS۵r,l#M+& $4HDxs4f"i fLqw2qV3QѼ!5 aJ1{=\u2oIn.݌Rn /k)Fyc/pg(ߒeP ,U?NL'6T ^# rx@wk1(6;=]ڜZݒܰ$TjfKṠ&u8TSBU7zfʌh>"|2q2vcNԚ\hѱ Tc6cOJx3>U<3L_ʽ4j i ` X8ϗ0k6.Xid6n[ ` 1M3;iՇh{%{=۱r}_zFg3>]8-K!W`q. Uyߏ;'S[AF^cd,O"<I fFi9x6PH='f TqΟ \EL;6fX͎Lͫ8͸Mo7kdL\{i" ѱ}G(ί#OV#8E#]Vf9|V:[g__C!IԹ>z!G Gz-eUG[*)˗O,dؾ.%膌:k=D-riFlF#h(qNAK[Ka>P0Ć1 V2iPuGL7p4RAT DD.@穥"( saײdS Co8iAf!&3jJ 6Wnn<A9 '0_IV@ x#; snib(2DX<4CPA @AV |J%H!( s>ub]0όBTb ᐖ KLӆiN* 1],+Y AEˬHRh,swB@.eU%&8s,da &cC/5_?@S|?n " ?s|YD3"HmkCq(`GB#<$mz_tN'$oTf"'_1ַT%Q%QbR BӚуtႷ."#PHUJJcڕ- o0̥*/+a! fhbd5E< BQK){ CއA*W?tT6qQeT,3ejlִcȚ Xk-k̈SMlh<,b%N<  =Ć D\#Cy8oe a2Q)  77 +12 #YX-Njh%NL t!Mh&1Avl3y+;!0/&D20vCOPgzwإ^MێF΍ kqGSto6DӮ ᩳ}XiP ͒gRB)}lzpǚi)N^{3T=ã] r 1-h598B 5'ݘ+lbP1QNLI9 OoN¢'^%AMx)4w'ÉMOtxo6{[aXݱ&r3;3QK8aD@qa^iR(pZĸV'JP]FKVA"RdЛE-<Fr\d/fLbcux({#';ȹp h' Ǖhh Uv2mLڃ*- F>HtX9)a$wrbb(oW A*L[+v.C!]:a֦Ll.Xw*ʼn _[~ڪߊ\C“X\b\c%zΓdU嵷YY:4Br*d2S!hDAd` 8E`pXkLNAY bi M6ĵ].)LEȤאt4 Za$׫+;݅]^hN+gb{syײ6]L>.w{=WA"kS5Q,PY3Y=驳Y+'-bҟclbڬĶ:ir,S!Z/"EZl:hÀ$Y ,%MzK3`vCE6PD|'V!o|V0cum42kl2P³`AUހݠ'#v5 &'La6]:^bQ tXq -skՆm6-Զ0"xn/ 0DE<ӊÌņSaT ^3Z^exOOuqZgBB&/Dc4,,L$TIٸJ2@,JS㉞P 3((D*d@Fm E駧,RHIOh&۝C݂u>&᠏]`/%mv'h$2@FE>{9ܦiaz仐d,v ӳ:;:,5"kEmfMЩB(s]{s[['-[́N0z ԧ,SȒ Yr:ĶD\ S ɽ\bIOb&^ßIjqJIe>Q@$6ρ60Iɇ{)tD#Jя]`f(oslRN#ܣA8J'# 5+Z.: Qbj+^_ .aPACm0$0<ܗklIΠɡH:>:qah'^}`.̯Dj>I8xVYwn;CmQaŝ=ZV?a7]a8>0z5Ptͬ*].QpVS3A"QDDPSo1`=5axu$;D4m ;h&3\ #2ŰQ3 #.Uu(8Z AnVuӡHxijd nj2,] OdߝȖ;t˸;f\ z!T \ŋU (ez<;NN~~6rCaBzruͣ׼rt}5 (*))Wd&Mr?; طɐtS+K]8DMJ)gOD$gI5FSJ,/=:7PoQ0uit]BZ Cb iPT5-xcKt&x̠vB%E0'ǝFCfRP:4<=tzx S ̰H)JR%H ijHhffa[ U$" 0 RJ B B  Bjk3(r"%jj*i($ `*H")$` beJbY%!V &b`JFTQ X&```)ޜ{yuW :NJ'0ŒUADi2$#pIAC RD45(JDo>Q!)@AEI*"w:"|s(B;o<~PmhʉBIyqz 10$Q)9z#NOq ΣaAX)mppIS 'E$M\͞f"KEfr NFBB3LmL#li3q,EQM4AKɅELј7K o?`FdXcBeH;n+ќ‡SueOE2'DQz0P1(+&, lߥ67a3HezIb)X|j['ѮSĻT jM- yQKNLO|L(9`FHyw;J8@C)pl [IV'PD4\TfHQ7nu M5r,d$V6 3 ⸪ǎz^͵Aq/&dƫz\ɩR9=i߻hI)k9z1Ma`0JG D`)fmhB6μN؆ F8`E dr @!QQ$G^<|Gg31eHddKf1q<ν@?"DAl;73q !샢?Dy Wîz\ z14I1MlCfj hBlLBvƶTHmfʆH)d{H. nD8*i9d%^8/XB(U J)P)6xUA*F((Բ&R=Hd=#B;8B} /sStxTe.4˜}[$MID71aR1  )UTp`8^D>PPR @P 8g7_>]XBFnu PfQ1DŽ[hQ7f FM2jbv7!ı&";4#^Ȇ`eDCuq:ZnQ9c=Id"T{{R4DYgŁ{_^9z| | UAFHd$?|19ηR7?crh2;W Ѿp;4 .lZ[Z#*02)σz g?ys9 @yHClM#>/`>X CQa:eUaPIF,WD0;}]]S߾\'ǣb޲ hpDFΗc.RdtpL9KhxC$oMkU R^b.%[Ij1XD*Z[F",GdW3 1cbij+Z3(YE+"TSSQ 4\L'HL 6W"N(0UFQH˸8<" M̹aDd˚Wc-Q9Yp`A FO0L&ʚA$uiS|GrbmqS ouػ۫uUrB`ieUPn3ze5c̿=~{T ׯπ ɡj _* @)6AJ\H D<81}yFzSK({ he4N o7܄@{DBR$…)J  z#Q@P(/Z; ǝ`GpsyM7H"V"؃"zvx|>)<a_$>o ^Ckp2d$~rg "'ch%!y-@^b@ /j.:t9 u NC5ƌ/tdB?, O)HzAٴZikwrmּ.|\Sn3-qJ;LEе'ۅͪnҙ>m35ʉSE`^6!=al3 q nDvI9獙ףIǭ(J^%C-ȔbSFˮnN>51]7eއ3<0}bj^,mT}m)w"Wjຄ* T@6cBx WTrBTg2 DCMfV^VYI"D K]̈́$L -{ƈ=t e/8QECYÆ"}g,;J4S4:"0z82 BWR"CFGd f9 2魃5}KH/ 1C=Ra a)*`G%IQt*!CSMz7qӥaRT!D1F̋08XbLG  }Gܞ΢sDfuJZTEpJ y0!B!i ^ٚ|FL%d1&6\ A,?\`o,B֧.n#.2bo*1!͔̗U ˧ۥ|A,ńPHwY|^/y*d p7 .6d sCf-?Y!P'CRHa)+qTF3!hkZ2̌.0FJY`KJ!JXT P/ )H>FoPƤɈ0 rCG(9aQKC"t{HD baJ33H) HAKǜX^vm|=OFq#ad.M6|Jo2'bV߮bşͺ٨ m.%vR8{f [ݢ|@AX-AQ=&2"Te4])2"}d/ Ae"UD Z.$DG?Ew:ɈP|MN@^G{GX&Ic;넢a7Sd w U pH UiA\- !JED)@:J$y 2d_ &'| . QPnVI"@Խݶ??UT"%HB+QCdCܞ %6~GN3>h~I<!M4CHhAX ʦDz@>>@L|3৳0( C3DM 3 \bZH'70@%("W T=C@O%-Bp=BP*" B_0`+BBqP9h#J+% ZI# R` &byqo @CD# !"~)&z~=ϐ;1#wCIp!LL4P9?>4 $!>PH2Pi_gsh՟i]_A-O~岀d~nG<EC#P=g9܊1/ޢd99H<`)SPTO{FX$"MҦTd&%)fndmFĀR6"9(*Gr3CZTT (vs0ٙ;4/7 II&H#4A)Q}z0"<`!ʰ(ߨ͑|t6A&bcSD>W<:uy`|/bVGჱс{6ܦ.A%(P!yA@Q%L 'fvݴod?f}V3hf9Xgu'hu6:SDowc >1PDĔp n4e`D(_ a3pM1ra%־JU 2@y~ mm5t$ 0{fd/$H*"!Jp!dZ!&˫\SԁS@D?蹸nCr&) HKB( L1Fu({8q4lu/1 p 1YI o%k3\cJ)cr ?44>V(%*)E D dW"<͐n4CY>r t9G'A!Ss )M($Yn,y^Z ]RJ>Qrhy7?~A}0hg#7 C9"Z*CdE}r )ދvut rETNATDMDIS$ ?d@BU8RMͣUdv[;ds_vw15:G>dNxpcHkln9٭%Y yq2ŰÉ2Z!AXNf(`Ie'jB7޸61CC$y1Ugw/"ցl!R#s2$\e94mNn)IDNѡ˗Xp[@&81Ai+aYlY`%+ZS (R҉X0aluagwrB""D=TujATJ&1b2^pqhyi*RU0+aHƍavȬ+p²  J4l;gM8Jq"#g#`+A+D* 1h8HpR6{S16tPmPĕS"c Ҳּ\XblV.ar7cBlR9ACG8t&09M:%d $Ʀw{ yAS*2\`:%CcKᚈlE"֤F#" "M[M#9*1a3|حI$>j6*ii;nW= HBl!Ձr&Ahjh?F#,8٬1kdZDq5<᠃#^+A bTD!+HpdÉ=2,XgzBLB_yL2xA{m57,ЄmcKf}ݞϖtTީhPP(ӱq0Ư!w 9T\>GAQ>aR "oKʏB k8^;F[䃿Qw I6LS'{"|"΃sjhuL J5#uku%z14̂ \w'S#N%X$|:=j$J@ AOEvl 3Q;&0?M ~d/Coy$\pRx[\?C\v+Wm\S k\JPyC-D d7țW]_"bn%Hr;x95b뉕ts,ay$/Wb-{rz&虜@7fxDs4iFiCvUM4bIT|$ǯ"Ҝi5b$3O._I1Hu9t.&:d;i1yS^g.b62Pz3P5`=ΎMd Ot2ZցFp߶d8k9AUSb֋SJ aT&:nrLC^(-hJZRK*@-Fc@,J=y^:p]PCtMu"('e#U;d'0t/9/Q2ICI!c%CdW:u&eFpC3j^J9LAv"$$yVKĀJ63,E*rq<`;fV䣣kQB%:NvC 5unδrFѶ 14C(<+@~#X^;uxM"LT̳*652³k0;iWs )%@Mx,#F:Q4ĭ,SCf&k7l{sz{sUR]7q8!a`#a 5O,l"Ņɀ|xs 0:HWBBTOqu$180 "4|l ϡ x0 HD&7 2D[. F[7˜_!h (k'TqtDHT~@ U("Xyxz\ 6 y(=D  BMA>m4)E4-ZΣa%!Pb  |(S q>&%1xll?!Pє4ЇZ@nmǫu8NEMS8TiN>]REGd0: *rރHZQeI I$iDRB"@f(T P)ThRG49/z+IpsO:tO TSLy.sF^܄UKTU7SAS& 1A,_vx^z R@g(w1J>x`KWvA}%Pk?=p=ޱ _lj1_X-{`8Ju+c*ІH > qjq΃ ƆqEݢ6 'K2 -,$Ip:!ۍ&=ʞMrvNCA~>h6G>$u`3s_DKj8R Z@j|)ӗmuylP}튬9Y Aqs-4,ZdO6 X߫7"*D/FIH&3xR#͐ _9ɮՅHL_.d0z e8;MqOXrbbV/^bDsYX8+A@HS H4A TH)VG?`7_ IO#WicgSEQRu0th~1Di|d_| P,>J릴Yt `B@1i?^} ` ?$<(xHhFv8$`, f=b@`(k% ~zDT)UA)4Pb1ci0L{4M[֠8:. N(XD D&*:aĴhA6QM2-P-"XDߴ -UHOQ[>٥ShwЦɗg+opc{7Z:Ưj?ɺY2)8|loF>}BeMY2{Kk҃hPX1;>ܙIOrcG:|oi(<0 X-cTco0~`sb6$xͲqTR_ ecT>) >^7^=?`ܜ>`@AuRΆsLGgn,b܋%F|5l6#4#A<^ކ*' .c$41xr3Tt>agRSLt[л+ TֵA֋ﰠmM 1AƇ]G1\ڑux'߉A'5tWfD~lʗk1mQ.1S)NX]9 2uj}R]Wd\tf7nNL*:Sb!¹GeiBD1@d ` XtŴ980lU2]hq6Ѥ0cf ǎ]*Nm&%0:7xAmn0 &ʪ|/+!@4d6BkP98szd'>˲Uަ=\pߦ}WJ=LvގHQ`gځkeXLp:5H0Ck+Ѝ5P!{atnߋ=ֱ:Fu\_K, 7V&Ԏ.þh Xtcڞhjk)W;!`ŭxF0:@v>8qӡdqbkz,d3׀URLLS*5V^۫lB|Z6# oV~t}C %̀Jj3yY:谌F'U+l\mj$߳oߝLt|ԟg^k)[|E,Ƽ-rԲ(5awVڜ ~lQ40PVIM^o((Q@UPTUEA@(3&ZjMZ**($=OX;:}xeƓl'=Z ˌy; {+=Ϯx wڻ۔V;hD:mfj{;96w+ڭs56jr;) dt(Ufv;kNj;sSe͖m⽀ Rq T6`CB{IUc( iUHVjmHZ EmRPPIX$7m>#0t}5yA6{G{5_m2&g[{ު6eTE U% * yT}΅REWSY&*#Z>}{]=͕vCǽc.}wj{n]++VNrk̎ Z<Ͻ{>ﳽk{ z>֞"!IGʋL ѶdJiRF[5UJȥ RE6-J DV[3mR k+}vlR*W[ZQ{m-ggT٣ikj}nCKpqT PaveJnUN'Gjk}x^7:nsviB*f:cC֛f֢=i=j2]ƭW9O}7^|Ά7uzӵVU>I#P% 6J58gTEJ) .}}4ڍޥ_}M=Tk"[n}-BUH@R=ow}@ۏx>!Wa}c}Q( Uި|vԊuN]lSB HW;}>=(A={%۾( J fl}z/k}_45F\zR" AJ}pt3{UTFh"5@귁y1U)JP@K>EK Ootkqwp{[Ϟ$חNz۷CCJkfxvpwSUR%Hk]H՛(eZT[j&*5Y* mkl%JiB-B m֬KZP[iۢ"UmYljU[]ڍB3f* %ٍ:Zqv aMVmӋ3hZueՍm$=VZ53ShfUlI1h­jխNJ Ӷ]4QM[k]e*4tжR&KfiYnnkݙMk[\i%LmEfE!6ʦ* J*[mk-aS6$J k6U UJMIBU ݻG,ګ]buMMٖ]:uön6gU 5YnUowf۸U{54fF,KZΖfelѐ&mlr˛lmR2,NCWuɤ"FFڨ+aPjv Z&SQhծm6m)jk MkUiGYT22Dݺ]ڊ&լtӻJյB6JͶwne&6]UTb[MMnY4,5ȩl+5jZکZ&5[EMmաȲe-Hj2F[;(ULq>>% 1 4L`ddѣMLɦ4#AFB(@i020CAɓC h 4dE2ɀ&L&&2hh2hCM dC2hM hM4LiA 4 &&24hѩ j0ƀCA04j=4L&B`ИF4jm)LPzhJ{SByOi574jM=hɂyO5 4b)I"F 2dd2 he0L IOe*q(DP(|KDI$$I!$HBP.@(kIăPx8Jy;PRCuV["dT EbpH^枧<)r~FslF ؛cM!DO,B]Sc#*n@qF\ɚ홫Uu*'AoWm1.mQAH85= vI &pq&x@DSIQ:U 2"뀴0k k^1@ k8.1$EE‹iX% <AK+aB"XEhPM P l\BEU!r&o*ЈňjyCH151߷+M8BPzHfrPA7wzƹTU(R HmBd]K UB2 F1c$$!B" ȈHE$BHEIUD &V#IHF$#!@ AÛ.kD 4Tnle(P RTq$^<(Wz+,T2)iȵFR C" %XcmuTHA{"[‹Eċf( `]H1*"BI"@EB9EتPd6mSl nVXKII$! D Ƌ R-Q' ܋0"eDh$@HDFAPH HEBA$ *Ā1"&q)",H+*(QJ   0 V(@ dXbؔaaXD`+!#B!$`EHDdDH+b`cB*!2 2*9Dj DhX,$R@DAP Y!$!dB+ V+4( $!I$"EDE8e"`6 ƒdI*MDHnaH!b" ($*E R!Z,O0`@YZ!F+bFERn!()1 4 a azF(B"JZL1 Zl5(k׵8W.v&E, PU5(D*kBX"!ij&t.uJ˽0BH@Hhj;Pцy]H!<~bn |~[Jjvp+(̇ qbis07TGZ<{NLNX[NAį!Qr%(%Y(^|4 $ T*;w\R*vuB PP)7!ҩ#kφYx u,uR# \"B(>x # (e‡*eЀ^+E PXT՚ȰDP ' @ Yp(rP-*p@)-WyH(",WU`(H.Q +eB%-ATB$! B,`b0P b X xϐBLX`FEa3V2 A+ $ B"D HHV,V*BF '^@`^RD!{%8Mn>jxuv7 ➎<zjI$I$y$I$K k0jE0)@AA`0Bzi䫻x˳ I Q 26X11$G6 6Dy(A$0!s ¬D_bZSlT87JRUi+>4sӖMc|ggH_ sf哜9f8h% X$.]܆YaLvq'ؚ29"ƼR .H)A-*(,(H[R@+J[e$IU1K Ii(ʂ, ! \vJĕ021Td!@!@uUR|U9XJ@$@@QC~)* DED`@D"r'b90郔71DĠ PE,B X@K(B"*D4@A @b@ ٮRM&Fwr[0.~iBtAdfi-IPd"x㚓NĄBoV-jk+Ԫ+%ڏzZj>OH*GPd*0T]_L}"dʚEP[ϴ?+7 $GZʪF(*ةf)(XTY8Rާg~J[mo!1rVD')/?hCCYm_ ߈NroStF2󾱽<6'x3m-| j׍oq^l)X}zmȾ;&$\!275R .-w;4fwҾRjוz`1hw/vvz20"϶Ox2VuNd[cլ܏iO:mRVS;79kLap 3ng67t 쫿 qI[v,ݶ`reVз$;{δg 1K9o\69u*&;-[?/JM ox,1m2{53}|HA9M(k4ó\IE0uFJ88L :xBWxg35{Sz9yۑu:L8ofz4 n0\r_M79o^ 8, |(etRnMy.WXaÁZ] %x8ecMi91vk頲1C~y%c-vcocc7v[\9j ^[j}~K:s8wn1y.}sωBct5xzWٿUJەo;48 ߥFOAa椦$q`_yؿT}:n#Qc0ܑidq5gK0i=Ӈ 8/^BqcyՋum۞zu>F ?c&%&VGWѡ6x~*$ 1\ŞAb1Fc+NSKe2p=U3n]zx2xw]o8wӥQ2xzݷq4iڒ!/ <N\@P,@,P ۰A0# ټ dQn[ !kŠBn A@pZ B"AP/"$!hD@$ȨD"ytVTUZ ߀Q\ @,EAJ"")@PAn @\P-ȋQ0$ XAQ7R!z4"Y !EIcBR,  t@h1J"P*!H! !BEH DAAC@9x_Aj0@CXXJU1`  ADB##B H*HB P@`$`  4 Š B@ PSQH0P)DAH0p ({P%sYJ)X %IQ]aq 2Q !H@)@R@HHݥš**0("  0 ,1 2"*AQc@S}@"H|=}_MP8x/7T)> *+Rt ΠPIPAAcE=D98c>V X{ {oHyk(eyF!F B$ iHAbD=1NcAv_=4AKg@;*#("z%rs2 =|ت\U43KK^TX )Biu>߂"& =TDKus;>2!@G'kp*JE = ET((}I,O<͔JbXyn*2yhsڮ쓞y枠s,xe' SW~$<{*?~n54#WKiN5Qy\uπ;@q@ E   waK瀆 胾u ;v'}cCOEwoKẠe a'Y:~HO szy}!yt~|?{ůa=uv͎PdR{b MoØ1%q= 3;\{d?(ofs7'x(CNi?` -V tR߿&ӵA;JO1Xj-d1܍/t}b"+z`||s}C|ȹr7m7HXV |dS&)D }!" @̱uw3Yј)lѬr[GV@!Ba{ł}tp?Ez؍׍ D&ehdfhwh$i13&>x`AXr8M;W3wz#,T≯ }&c:aG2ۍ+68*\6V9}ށ4MA\L^N"PIzwy@u{'x?)ϰOb 3!Ո"Qo-B]XK|Q%AAEJ2&_dӥNT9{lUWI `G(,(1[4Ql >7^p y|ۅU@ u>ǶyLe|?Ox"eTXQQDp_/TEȢA ! v D$@ DA.Y( {(#@DU'%`W**Xkv\ fzCܘnoU|C0 layzpKLiB]ON2G۰Ax]ʪ2;{!+1@ eYY$?,H1xNϫovF7N}ƴDžXc< : b''(LflG V/OӪdn{wv((=gzOk<`EnpzG/77~y Bc)m4 d20pXt:MaxnPƱx|Ds|Boєkz 9p!GUhffGGG @),Si%-!~8QRYP}):<3< ;P`#@!PBkd`!?)^/!,!+Gx,3"Rh 7DL;WyG7o}k('_`( yl" Ȝt#N;ɭ$!xm.[ӰgO yOh CA'Fv !K#5}9.ĉ=2/趼(-"4һȥBP<%mQ7tqqZ,B)5թ/S)=FEYxU킽UF]c/k7dIĨgX_A-d$xa oOFv?~I>[nƊso*:0}HLm^~:Sݰz(wIqs\ `jl ”IcM~TPF`Ebd^ ABH' X?&~X"(2*"@H(U@@F $ 0(!\EAH`2!"0$A|DccL&DOy $D$DJ!(A` B)T$B!P"bEDD TA  H$B$aA)"$c`2F#!!D`"$AAdbH I$"$I Y 0HQ d XA!@@x'WB/䀊6+r/iz+f^Sƞ6Ͽqx?>wR ?װ./'Fxnq=&>:Cxl:|7b`Ã}.xP=gG(ňO(@ }8Do`M D"+f @W8|\_We?^AI$.7]6i3#2;P䉆Rr^ީ*RvEx ׸.+|Xue҈)%WpU5@&h٣M$;DQ;d Am_9Tukox}u^;etg5~Wz,w򜞤o^n:tߓkͬs?uՎЯ3q"RQ秹K1ͽ.c|ː '_$EF"ၾP}y2X>Cq(b" υD OQ> q0n\Bׅ8Q%!eG}ZV@!xzLL=#Su66bS[8̗~ok.<Nfȼ!3Y[_x02 5ba rg\ q>/H;bΨ[EJepY |OMlU(UM2Jmcc&$F*\sM4&j7hBembkk;^| N 0f}=;wuuH"f;e_=[I-rPc |~Ѿ0s$FS$Yڑi;{/jʎ;cGXtQ`sFx/LՉtm+s6[`D;lXUW,E"/^Wbp6 P. AAP0+MBdE31=42 a3 qg} 0B㓍dra"^굠d{A@pg@@]ob-Boh+#0T^q[q ա}ToCK)CbS,xD>hfb R9IHk6GOfv30|Kb~ L"vD! T@p3v_Ikk@=8(3)N8kY0ޟnA:Nxg0K$#yJ)7}u2R9_ cQp'ng+&!{c؂d0Ab#qXbfQ435 Az] pǴj~Z0$*n?<&kҞDTĪ'I Juܧ$q<)S(IB $6q|;P0sO_xX%,b'QˁĈw@g2nX?TKГA[G+ xN1vX~z}\_ɥ7r(.'󗡂H"*~I$BI$!+HB$*"*0 )" "(*"ȪH ( H(  H(Hȣ"0 B !#" 2*(2HB( :׸T!}t0|PW"A uȺ`F@d $bŋ 0@ HȄ"uDHHB*0ePA6!D 0,H$" F 2@BE "  (H`H"*+"DT " DD$**-@A{R˯WSm& x0 h5Q:MS ۅ/.;+PAZoM+C*Rxnby  hM DC2t Mc j7:XQ' X&j|#Q5C_n+apRPshI6quذdl YG`bs=dfßQ7^[ڵ&\c{BB mZf`U% -QolkO*,E) 2IK5R:'^F'&G6ƬAFCD9C{7Ӈ֨* Bt2XYwGߎy|2-D0"4!Jxyr-ͯ)?D5WEG} }6CJ _yCp7"H Cv `. g0 MNKpa aX._lz 'hA,{\shqe3қq6Qr_LxP-DOo@8 my8^4/*\vL2Sş{oz eʼn"-R> ;Ό7n:4,Ga6#h1+ K@96G޺Od?~0~(~A:>C[vcB?j{VAasmB( }h lltb^x~}K=WT)޾U5"QJ.9*Ny>ϰƿl V|nұ$G37P^F$[AXdgGp_Gw&;Ց}DGQ{vOAxQv㡲*ӯyn  |SL̫Sw>xtT 1~ b>;RTo(U<=R!CF*hD)󝢑^"k~"[y\4 B ".Z- TH"ziD&?7_A?{ 3g}D{~+.]iTSEP{,fNESV?HaQpjDW"5do;:uGOYxW|냽c*a}ʲyPVKorhD<Ca>1T2G=gp8['Ѯ/} {<a@;焲o`VT\g<}B =EE&>e?Ϩی Ñ٨8eoshtQx3?QfLxW۲L.g^oP G+ښѧ3Ad:PrI@!iɀ|X K w^;=W+9fnLc T}h٢E&CIULa< n>nܾt=Sn7so0]ka/tܿ{'!U삩"# ,Eb""0Q yxs3m9x|{܂)zg uHhߟK ,*|^VLE v/=K CzIk`QyY:_R}EOEeNfܪioS[mk0\\̟+ FvOwYo̷'<ݣȐaIf݅gCa &tȾ \8'|q_lsw ~r<"%/]wĊb#B_~^S3&nYmVc'HD7"L0<@~ !oqWȽ 'BP\$u"?lⓏ8A2_qGB)R p%G4||l=gb?v=n*d`_ܭ6]Uq!D5jqIBoΌ2JQ* _<1q;ef';gtsK(I." ~g_|\jbȲ, "+y>-KE$x8[q0ΕаI : (p>?ru, #=@T˞[5Q06hP>OLQ eIvx *!juW! m[ZgXĪB|DNKqzqH=ZY8'-SU},L\Q .%:>v?L/@mhD@k0"hS)t svXݵSQswȠ6d`:g@_ 0\^?s__~{Qk}5on)_ iL4ÛػǢ?9t\ cy [_?K2I@B_ҍʓ]w2aׄ6ڔ B><$nӖ_FAZXSD+㡞O 7?t^d#hE9 R+'5yÖn1ݒ( ^xXy6 {ԑŻ--% Paz9IW%;<>Pz7ĴŵcY;sG _Y᩵|P A BHm>MC]pwdue% ݭ)q 'f7ߌ!yzt@E0y۳eZq.Hccc!F>GT{`Wt4l ]_է2L~T@x?%CmK'Qޛ2fA/WPD8\v nZB1z!mҫ r uR t,jYF^bB/c{|xe.k,t~nFwTW2@@GԺd{"e04r Zx?nѣظaAܥgGĀ`}MW}Hu]g`nuBv'@'k\xx~>&O9pϺa]wn_,? C  zuo t` ymAxV@wplϧ:<noH$h'?Uog*sj(ekĞ}yY"U'z@#Q';U7Ɯ`&_nޥah< bCGLqbyLܑa3>{+wE*qV'ſw_c9[e^t>{nTݗs .쮞JkvY .$@FfXz?R 5X^yl|̉no&Lf}!z᪕N2'W#\+)_bokXtR )$ ,:Ŝe]@va  ha>OmT;҅EDQ$G7o @R(=nQtTHtzC@7#$|>zϢG5ޣQ#NrXcc'gs+ Y5Z 9 ?z|9ڑF ȺEsHja}b]\t'a{![o0mu,=ſj.CمfSLFXn*?͗M7gR:x^ӱqVlr?I(8݂xoH&@`@iV`|fYsr( UM./m_HOowk w^ 3f Td@I}?¸X9e58{H!?t}(/Pcb&i ¢[Dz׬LVq/~X8pjizwʚq?LI9Hen"9+1K9hqϙyaB^?NbgۼW9=̓lo}uVpBbfc}ß\įn33O+~0Y-; PP'0oG%z$+=Wl$7ygXuуY߶9 >~gs$(<Uc>O4oJ^ "(b)B"*\'N2O+x'w, #$.82IU5qI%MmH&=^ d9I wۿj$!5ډ &7^psqU(nze3*"u-lZ<?|qvż@0䰢<O.ux H "QMepDm%Pr ,dSmHDI&\l0дm[Ϛ |5d70MhZXbli6 /4kTiaN\mnPJA,\ l  PM!460llDgH7,5Q`0vU&p[lM 2Rhe$2p`a2 Ŋ`c`w , AdHy5;~/5HcWkɸ!=(qMT@ㅳ5M[kx9cxe9&}O$,0!Kts$ty,bD1mVO1"Y vYk~BpA$'I@77WV8 8lҬf]Wpe !0P{{3mvd7H#`PJXgGvSPr3[˭h{|@y"k*s2WqʿǏ<#;-&=}gP3?Ͻcr}lȷ:WaG9=@Zv`.k2 $`0{, *r< " W+d9iEWt4vͺ=&4)8X~̑v Zms5?sEض[{7$m#P6=_/\?9OD&?59a-Ց\Pq;xTALosfXR?( \/(?|>g')wMUeV_Odbx|G{Zd[=w !īn#=?%is:tJ |Hh1sé<(M+5ViQcÖdri_$O/MUbTg+֔`5}vYJKu&9+ k\~2\EqkZd Μ?`mzmvL V&1 o郦gI~:U 3c^c,#BRGyV\i|>⾜St )XriS'Q+JsF~+J1hW>xżċ 4^kYhEqo n )q2Xϙ)j/؍|@Tt}Jy>}=r%jΆh; ԣt^yMOgAByҥƤBGڙ#$e JǣCl;SҽR0s~N}g7 zCVTa[XcE{'xc 9οˊGd( 7QA'ҳ?#94B B &m:q1gә3b-՘78cp~eva{7x \_*nJxA4@Oei R ̂ 2ӄ]nE`V?w`_0A=#"V,|~rh2*YӠ-'nB@zNiL͑KՍ 0C^_W5`j+ vmܙ̈ #?asD ,D>ƍ^7QzKMX!Z=_ص}0!/bաZ~fm0ilN IhnoS֨w&S.6قC5kRMn#n3J58 8/4CPSdX`,b@ɼV<ϋygcxg6vN)!7A"6D"`@ * B@H+b2*@" D@˨[8CrO4ZW;mJV& SƪJX*|;昪oԀ- `7^[M?(DQr> dѦ|Ey) _jRbC!Y\߇xwtjzXl-}o=V2\]َzJF 7 bqʅg7k5\HNkZÇM~JCaps8ao/C{.!{ #v_ڷu5bx8(gK,wT- ZHJLmDюMXb/ ]'["Aq qŦ ( '7A윓;2p;h(rԅJXbZ?nEۓ\UwDNy} h *aDV2 ZSmX`՘ 0l`(H񖝴Ǧ5J"phDM :XoY+?빶ޕsLȹs`=9?[Z])JfO͆&)rA 5xЙL[б{ 8i.R Mi}*Yن9ew.%wK g\D8?';l G/?tŢ.9&3E& ֶ2KWiԶv]KCxEK>Nlo?}&WXwX:tD^[duaqNDq#m_@ xbj1?;hHu)м?Q2毩-I`!=0%?ES@(>ƈkLϑ7$yU0MJЦHvRM;=LĮփt#y2.(;o9YUBR'6 v;}.Qm{jNBjt|m#a1wke o4w;b=]hj޽}]\bH!XAX 7;'Ao?;wSד;; Y|W>"<;ڼi3Ce>A"Iã%^0mnyMُ)BpGFcW5i7Fx{qݤz~jrP1D:p8%5Ab (((*)"DHb$ 1DB!PaRQG 0$"B* F"R10)-)((bP"!@ hD(H, DdBR@RiTEJR@E 2hj Q@ i8 @I"MH9sH ՘ .>9%B=C/{! =V ﰺ{1lOJTqV̽B}-I1/.bT@bg*<|*Uq_$6΄PDU- @P@9*[SN7|="lXs<$<{ZO[?ϵ;kEڒ6V:ӺoٞM&pƠ~N%"F~S!NsKg*ݳ[fރJ6 G쓕Rl}d%d,d~zVr'dQ`Ds5ZX v"?0maXzK AWw^@A0Q &~s|SH7[ P+ o BEdp>_c^J>nr0#_H uƱ4% Tpt 0Vj&K9QTu)-~\ZQgb~\j%_v81꧎n=3Ϸz9Y]Tu>دzA$ҘO<Ջ@ oڮS[!c(}DaF"Ov}GcP ܥ|D2(tm?CF/ꯒW#V/ÜkvRm[{-|ke ߐjt3N-t{$Ni =4T\NGl&ƫ|zQO3MCpIZKL ˰wCJ>53e%~rШhi%GO-0]P <qYz|;+7NSAH=oEagBg6s-AiP>/#N`R߿O."EAd6=ZJ՗K;[2qul^)u(e~~<~D5aԣ9_ I'V_{<~EgCP a g\q>p3.S* 0E4ñw<<:-M8T>7"Y79uoau;2$kX.j _w*#>/~q4~~>Lh  p~0%h YH*T$bHE!%@1gmϝO_(׾z\B)JĮ~cġCy@QԨ Kl#1A/z+.ID%MM4Lpu 5^ z6ܧ<:fmH}Bo;3K\G%I&N?'= =.v;x$DuHĞ.DEk bL(,`3+z&o^'o-pt= 9%(~.hn1g ش4Ǟai}m)b~σ9nyXy@oh_osq ȼ(A,:@vY(3.XЗ{lU>q. Fڸ"v@CX?e>㘦J&_3'7h1[ݞ#le6u k"/K_KFJSdT.GyV։;$s(w7ν"+0.4xu tʮ~/WA|Zq<6=UTOԇnt;f w*[ Xf!Sv$ozFM rІ{$6zxHz#U)kR q]5U/S7 DF8 աQ෵H onCȦ5\JS?麒oA 4Dkm޺<u=~pwlm *eKvƧc&LKOF#LoCl%xy%XF2?l?-Q}:pM稾~?FH~OG#-K{q2D 6a uq8lOCHk*kHΆvS/b[+괼YM}|{ߺ|>ս|bI9~oZCj?G'h@'-E͓KUѿߋ| K7IiWΙ^NA@AT&`AW8^@&P& {i 4Sz#}6 ]>5EeCmB4fq>w*8 _6So& $iF@#[ ; k~)>n=߫=壈"OZ8$o_NVKQX:Pv7MsbJ4|ͬ_,RGz<ٔ6} 6)?c#B}/4{MV(2prKeBx+UCE2@oLi׻ӣclWs=TMAX*  +nqX[/?oHџ -G]B??w*BGIhSDUOX˸K?wM[ZdN,9ͤ}+"05U.X2%fBd\@Y SvoZ"}BY\!l3 Zi2݊Gij ^]ߌ4CܨZ1aʍxъb<z94Ȥ~\94 se@亴A& e&[2kG6J/g4 "^BG.ڦk9 lͺ{sfl} \,p=TYe:*wE:̕@E_uGsw|X׾'|O]p<';ί1X@4fqiGY ?u61PwFҟ!,!ьw?y0{ֶ 1rNB8w).>fʼn[Z9!ytS8_JVfJ ޭVzN,%mz㏨$KXk6o3x7l9&T,&7j ,sǡتKVUxO*yu9ʶO-^VS]D;7i画ټTۥ]y{%D}IXf. =UO)5|\pLMtegL3#F6 ?pA6<*QG}A&gՑz]ٓ %?.pW[on*"wf}gaOĂE7P.Ap74ҽ/q7,uo8S=Jg,@TUkWw5"Q|% ܿzi Up/a3PoR?a2${ Uݍ]a-# Y~HgE,KQ+1PQA[cOyb  P2eFZ@B/4w_,C3L^YRo# 0$gu)z`הf^J!:fL$q 쯂 ue%6S\5`j($xU)nfekTc1'9p* ZFSɴ&s~򵜵6wQeb[Ѩ\8矈o-i䬮x &Ltw;\#BTPX0#&6eLN~P ?BuP1n{^1 %8PTeW%;(k9ax `#5D (u[Hw)2GtB:v1”7܎L|5Vh6(i/zkQ P/Gmj>8m󱱰<--k+TʢI$Ђ y;\;y c C@ e&M[ϼ#fH1Ի O?CRZV 98i"vZx YWg!2X4LN1BWSo[)DG58Ѳc_5{;ڹY;$_h24hɽ Vg{2df.X.qlXOmhh  >tኬYW IFu7f+MU6 }EAV8chU67>9iz]K\ ; <Cp G()טZFu|A+?iX`RlR/6@\H+OHh =F XVo(%(RxĽ/ߒUx -$+TsT@Xx,s4wT.ܗϰ[șDYs;~u dycG4RV)Ro!vt9!WdQHr "Vd2:c Cc-7_ȫ6x^]۹EQny:=h64XTciVZlܞ6ǧe*at#%X^MEŭGӴ[#jky.Oю|?-}/*Bɯݱ|L;l3e_ir:AKnmShg'EY }̙n <8(ٝa~R`kk>"51h5CI&tu__9GsR&pnW u'E# D$RR|?,}Eh48t tK k0/dlZ ɁIN]xOSxz2@Eadg\H|,T>_TzӑY3惗LWbGH16ߺP sjs ~cL*[{+KsMi\>Qabp2r`jEcf @Ke_}gw}v)ɝ?sJǧϾZ!6#):!]?TJ G' ^޾ Ն9 5N\6Cb=gQXI. f^6Zב6ƭ/H0i|@/Z|J;D.g%eseU9:Gp}1cfƨz3!Z.Jmu9-w`I;uTqqQםI|^dvO g4tšce?Z3_=懆"ߣ J`.;^?E=FKwǼA\>w?XtROi @%*/aՁZ%n1H4ב]_w'k)xXتVs>`_;5LZqEP-%X C^7BB͆ˬ)XQz< θc@r/nKeV#- 3Am +o3h2u{$짻sn.{hTSXSәNe%BZI^F_G"FFEY,V݊aюێ3x:PwkC–A(X58'fVNVtNf84k}06#\z*U1gm^g)e6>; G j O |#bcg|]PlMzh~2N-eC6]o 7sYd" aU( cdE[y՟/zN LE7jO-Wq9oMeo^,?<@3\{Y>kv=hH)~<~>, KKcx_zxzwJmx 7seJk(4E6>OZc4J3sz#y~I=O]s O XsEbAu{߻ӫ]֕Dr4*L|̓_ǟ?ss݀85vu']  o E?dG)i9E/eQoz wnȶI[GGi^懱 p Tx*i#oX\gc;~wZ3;H2l]hwG^ Xii606ל?P.gU@ ؆7VZdzD%rkvJc|m}-|&Q)m"ѵu澜֗"oGedĠ&eDOʷ10o6= [\Toz[':9"u ΍ :90wV3ﮛ_ٽMz,G㞝 oPD1գG*q%~>S dwIJzE4=T>3.ڔ =n4,}r{V ].9N=kSsjTtqăbÅ47IK  e9 V&덓SzcI%6?f< 7+h'obUܿN^%їwQugxO ü鐪ٿɇQmjy/5"gNf&?5ͪO`{z]礮{y.Rf2l'92A@վ3}O.T\n-f#`ǫk;eD{u}0s*ޅ=)EMUnf3u=x6Hաl׾;tTѵd1._4Ӭ,2*)+?Xv r>>ک@{B|`~h`htKR2MgO\"G547e]cp G\9*y8b2 @pKsur7Ws.mt;m %% Q\([EkϧYz "0W` aԧe}:煭p0"ߞ ɏj ۽{We6+%^yu۾gdY/zZ rC 'ۚvIvNkEI^Ly(ItBW6a~uzqB)̙I!BA_LMR)fnQ_^ \[T0 7 A~3 J {Hg>uP\`D8geos+֒p.W~063)ҡ P_ehRެ#^%ak[UYNfD +GGo~{~ n"fE&/0[}}]G )]ZE<I P(H,H]̥16 X+ /ue]F0T/)[ο e6"& [yJ[1Wqn][]c ^/ +wc--1~\t-^}c] up~ή-X~]&K|=]|6W1LQԾa2[c6;ɯ[Zr5HWz{yU-f-kg{.P۱ʵ`VWǹCX/7f,YZU\ if.x-s`ة±x26¤/iI_R*3%K'@ lc.~;W,-;r'ًx,1zB<sB%8 4%1B>_1]mJ Z4^#">iB2ܞkzZT遼*`E6<%`EӯHWR#!%, HH0g Ī]W21p956f_XCm9$ڼ&8U&_z+N 1awV+oT, *bu0tn:9 E9ij֦e'㫰g- dSޅQ,tWaUBs"!99qKsʪkG E, zv(Ȉ + Lॹ!_݀IT/̚ q+ٰ2"!ehh1$bA.XD/O`O»8` #a[L+-n" Y,-!_(5O [bHpgbq856yAx`)rIB6]~: ʓu1&Fjn,'UJuV6|{+ώm b0Bpe_l-GymND6Vr=煥09= 7EPMmÏ5/\څK@` T[4 %c@=^^3O>ŝQN>wp)TQC(eHNB ic DeF a\*V\AvX͂m㬬^Njv+,"#<;  i1iF4H0"BFJ! GU) X(#!!c$eQ:WU:YJC׊%,+ݲ* mH $ .q^hi0F#@%U@k.+ ꣖n@ ¹,(%פ 74~halK+zfMXm\fXRΡ I/KYA:E_&j2 8KK,)ΓmFT(p8Xнܼr嶡_ZW_k/p3 &pܞ(o7|'g0lsh汚hS%2 `ɛvMmIHCW|?p'J{Y,Z?f `$I FBE8F%MPzɿz9clZ:O~Sνzʹ=cOcMHGxC E +bAc LS$l|E$LjAlmICFG!MR 9 M0 6ancțC"$<("4 J63-A F$1}5Mc@IKi~mѽ}:rB |_w[}C#s7AaV1T[;PwxU61~GbB1Ŏ{4p¤1aiҎy,%S3Md28L%JgS7<^mXI[hD-Xoqo`P|8j2+3o4E^KGY)>8o4!*E芳%C_] %p @[OSŭDAiMK~ ./ဃc;P y79"%jS(![02}|,{:6QQ[~=4er"E3qWeߗoyL6 uLWs\򴶐 sdK=L(?9V F/p1oҢ_ڣ|-)GLe"+Xb*RQ`h `&Ӥ 4ՅZoFo:hJE!97D?XOBDO}Z_wpܝ{ p}l$ߜacj*!qج ^.3 - ̙ OimФGf Cnu۞LuJ8׻6y7KhdmTƹFoeyջ8PtnفChėP9MX8O6@GN)J$ (f&f8iL2!&ŭ]VvWM=LnF:XAՋKE0 :u ժYVDh J^% XW#Y"fU,Xڒ>'9{(^6=&ɋ 2\*iSC lCFI4xWo;"{ˎ"׿Z++kik62] 3#ê\vw[g:vy'qΈͦ G3JU>(xfdNjުX{h%0fc,D \(vglbjJ"gA*B`j 21CBdgEٲx=s.#A^4 ;t<94d%UHBK YRYZ%%W DKA<`=^OX8X콳cl WZ1zCׇȥ,dҎhho wdU,^:oCդxQQT]ʔ$=]z.b0 HBC@Ȁ}dwY9W+;xA><^ qe)yF ޽-78mF6gFߨNw ho 8x(<+]-)s^zmӆSdplXaz۪ hɎto9{ڞc,W  ~lbDJP EkS|k4ʰ@ !$#`1$0`y#7CH1ph=n1!]xwt:5OBxnr\D`1&VfבZQCDVd 05&#C1(PuDI8,bb 2^n?Q{@ %`XNp4 D \33#<%V.!2RP)P)c Ya$b &dd?}Vme*U9@m.rL9oڂ[Ȁ4:yk:wd%6(`gV^>UXu.ƋHUJZz+O& O5Gx=وq;@q _+fhԿoBcRDd%A2a}mN}`GF2s`j-&mc3aUaFxdC>֦ze!Ec7ev+ ϒY0jz5ee5a|WH<5=N+%^1`ŚxT!# `9*nD=ǁ ò3y)F֦@,U@a Xz3H2и7LZ/\1Z|w f9hSh1 !|}\Lb̘*QDVoqN~>mt!XϬ9wGLmհ*c_c%9Cydg=4╓cI9xʅw5\ /?g=YtgCXQd5H(tfiU2'0FY. LX|G=jUE1pQGg J ^4J!T0P0-j4) DL3EsE0 2jB.4c1x 2)ڒJ+A*KƜ:9WƎڗ=8Y궅4Ob¯`6'QF("M`( o}]æ ,ǁ{~KŏnGg?WGXu2ASQ_um4:.řGB2z#G&bO8+b.KcԄl Sb%CRfVmغ'lk+l@T0@a9L*BxY4VY"P sas"X]КWfLQ\kM9g`Gghl\fTe+2cY YW*&$TzERsd$m$9c/vt:jg)uIDp|`Bݩn3p<{tinpdc o}ʞD͠l`gpcVSꨌ!!]kiGP2x8)e0q{q|T(Th`Q U FӨBeܿcW0ȬEYB" mbj脑 >Sml<–ON,m#'q<+6]\lȕ B(so^ 0kZ6҃5@aA *c-֫/Yz5Zzot<g}gƽ2V۽]eưEw x{|>yӬN" aӞ8xy+u0c}GBb0FML#kANh ER BY D*Xa㷃]&15goEL̸Z 2f՚V ߲xpOC2 [a0iݕyXT},sYY ӊY @m!69:ĸxi,kntߐÆGcLn{z1ӱO ,=vG^ݕ ;}˾6e9U@-YK9SǦxk9![H8 lZqO!v&)\nمgmUI62M(ThMrF.5CT7ݰoMU+ cq3O;{L2o,/YYUԨe+)"RdaffPAI$Ѡv6LFQ@4xO tǷM6ƥnZ=iyvkqyw&[&4 y 7m(aLSSi:ϊ$#oݸ︧ԻLXh,$CO\XW*B Vuh!N-^Ԡ{20ksǩfu= wYKƹ:uQ4{f bҤFu2{a̓g SDj8{E^u|'G#\k|Ytzts[ƶxktlAؙ _5V;m"-"}@;cg{6 nblmɑШc bysJI>&<ň@P~'#ʭ!2'3nB*eపdcGml|M NζihPZa`;hUsjhM3oΆtԔcՈlAiQJ1;vm69NVkϳ[kNp^"vf ^X( ?uN:ʬcc& "tp `cx 45  :(u+k!/ǥ 4yxRH}f/Vf z{EO;6όSЍG=X8ݴE'ؖʆ7uȒ;e?$U"J`*xdEfb#Vqؘ01$GfM]gash"cC.x#̼񺵮6[jzY״kQ'Dz{/Mآc*umljV{;SQk4Y{'&c^ ~¨ < "S僲؅y}J_ĥ93od7KjP=:֛Bj]@!/·ms7X7 ;ef6GuzFpDh%I@J_koT^ Ü9gS$_nPe\%2; DZ su(@7A 9Za.ZA5`kQ+6\j4w* s4LHΙ/fqZq6e}X9rn}#:.KŠ}q̷\\9x)"S7D=9 l r@ ]p.͞ 5~kQK1f&~bSv jH A%:i`ڎF* Kb|S:=j8T'M Bul$JfWWJ`U*^x@k@)J)L>@,CLwM,p}m-X59L 'Oœ;=ק;NI#0šOF2[}6'lׇ 0(U'fnxkt:;s @7mr&3fXaGS:2؍@A&aA1مS!~/-;4W7*ojʣNl'7r+ bs!lH!(N[byFbz̯? k,FXtE 2M18q9Ζ[iO46J yॄ!Zi2QD]6ry)矰PU:~{q P0M#=4`BH3s+,`Xju(1 2aT`PL2͗'׺H)XNv)\cG#Q%!F?s햌S>_QjCqiIO/1."0/ZL./[Yj)eK4t i> I~ߑZ=3;uanGbc oo޾%oq{Cr 4Ea >6|g=]y|xc)$Iv]JbO85$i>,;C_o'sA/ߜAy!{a j2Ig^B}kR?9*g3xULA# uAj+\eXtA'8D4}cm1r@I 9 IRbt!6H-2- td2%\L@B 6̶]6!0TkG67I $LȆ`vm u=^;`|o5aHHHI!! (6nHF1, ɒjeDBT#:r0} I8ʢu|k`_΃;{uQ) 28` 6  UJ4qg\xA3ڂVW'er;-Ϫ(+Yo,M #:5 iһ[R" >t-GB-7g=\V3Ȃ|e*n69kyHZgǪVp8k G: S{c` |-DS8=_O ]“G7& K`/ܫq.Lbb;YIfT3k9C5-gtZ_n}@~Q ]gzeydOZ)ovۊFBW.?FȿV({3rִwO8qa>a7AIWcT#}Wgl0ޗ4gC}֏6'Dž6{[N:Q D** ZfS(I!xI$C>e]S߲u$ퟯ/Ǩ^VnCqxB+TIS a[+MYT e+ְc%YNQ<ҍˉN+^) AR9_Q>3"bW([9W0|: /1tӊQu(4Cw0!WC {4{ޮ#DϑhR '{gw #`l_ØߵZaa9>-î EIm_t\ԴrΑx>)5v:+n/ٗ89 k֐T8/$r}`[[  !SSzhYy5av~f8QX   8(~`,u~`7MQ[n8YN$5J :fuS*B;^DU=)󨃹m(hήo|P< v։n>.Gs"eMA#aXF$.,˪&#\t fyCnvLWAҵ Ag0d2M瞜cҿS6..mwr@ ( 1)|Xa6]>U#R!t2|u1CUI|Rx.kQ0тH9QPƠ ot?nfBp*B(*Yz,w\`:/2>_52bDpgGwBFJ$=]+D١ΨjҐN<%g)8QTSm!'ϗo&/!?/-#^ ZTy-H \*> 3F=)%<@N7ZyEJajk.Ybpt 9RKbY?>OoM|vH/tRTg.Ntr j,@̙yNc$qwR*}.1^ɾ.84Fcm4!gY8)A+araV䢣{RL%L0}|YN—=eӥ^[U~Pm!!0`W *Ew RR[W C!> ^Q|,ZR*I\:X}7 <q?,kSewo`)J+|r\*B f|nShUnë@`}+}(1>[1O!oxGPV"d0ۭT^-)Wm,+9VE=S/9#І*GlWdVO8qY1PȻ5tO@U[-czIIl/o}np!C ~ t g.[si2eS4~E[fC $9&@ӏ%i5ޗϒ'ۡ:V_S'E@^VC$r!w?~&[d024uF;ߜyeBD\I]ُw  90( er k^`^77| e}&I7NAd6]j9,':ߴb,BڛIPdR4C0r eDŽmYgs-ftu<1INxC)[$Jva(-om1L@ e6kƸ.[yཱུ-Baۋ1I^6!O},f1Jo{x8O?2V҃3֧_tK.aV7$:~FhNx=YeLeg iB߲n>A K.lP Mro~lRfw5Xrف?5]4kAqh` M)qT%4ߜ=;W= RLe|óމU$~_AcmpX'~],{"oF[ʉBTX)f 2u?2 mw$T"y8hXft7q]u&:n 9_zf8g@/hXcq.+}.t4ɱUס)SŁ`7ka0[@DB7wFH5-D־m CZK2%J&/iJ`JF,|(H4ܿxsɌ6:烨ngN=w3JkcIuZCba0N挼PiiP_EwBDz XO K{ ,ώwɤnߎvt}˂az..}HbH;ՑP W"]AhZy"o˷V?'wwYn ո:f'ะϼB˯#A$6WvYsjWKdl:)[+@e K꓈ /~б~ J(H ډ]4Tn\dx+x,pvϭH ;KeHgשD jH`K@(I$$D`Ñ7Cs+NG|V^,rVo{~eZ5ޜY)Wrud_a吢Kg'okCN U:I (%54^l=N/STT`]_ iDrCh۰2 3t<ݜWc#Gt( =OS/czPL܀ۨ1ML]?}2L7Ykvg'{Gߢ3x \a>_]0cJo6Ux7|β *v <޲IFZl/YA@Ra+!ԑ?@8`;jk ukˈn?O㬬 JYzO,iL[AݘL#{5.(0G;3-K<}sh4q4}"(e1D/yw: Э1p$̗/V{ka90 tK_ICA K*Fz V@RNe/Kncy={x$+?v>]MdyяfrJ8E%r.jk߀{;STK ?6qQuo/F A: 9>-yW'&`©I&V8AyZ>ΦU5ǣ·kaOtt\PuױVzxn[V?}/]P, `} GЇR~F< N!"57ViDm̄lh!Rj!UId$@!!nw)}x]paTV?õDk00OHD9Ͼ8` |Gy?Ny 9R|/*/0EO\Oʟ's/K]q 3Ow G|'x^qGY??cG}_ X~,κ 8rTaidFᑜ_j.8f@;q{8oy?W3|^2(o.ۄ D7w%Alml8ёUah=T_NDOδ4gXgT0#1Ыs2Q6m#Znp,rX9Ů,T! ! ߻x;8ՠ_DJؘh}>E%P0b!`0@!E2r `:PU!b X{@B $qAX|yiEdh({ Ti$t[hr`H>AnHV{H{arq0:26 ]Nt֒X$ЁW>J1f ƨpB&`@; fJz,BL8 *cU# .y=`/\6ڭp]F+pAk Y0fcB)FE ԚFڻsFmH@ّ TFyP`R1*qpH2!A62ws}\ mcgqq|ڃ&:p|  A(M9> Źs ޸t?/ :W-0fbW)ꤥa )\Ϫ3VDe/uc<N|y4z=.l;!h lYӏT#>__ Ҳw=t."#tuu HZ8򺟓闭luv%U|mjL+s(l"SYz S2fRY_AęQP"PI#i;z%Qcרס i\Miky3A[^|Liǁh^vhݸK;!NYo].vIxtz[߅khUh0](ZJ ChH@ aJ y^BYc:"(lH+#H+u=A3p?58lEA|UUUUz5Ä7c*slPQXPt×$h0(c0cu:sm}Sû 5/'i,qo_B2[KaRǥCX1UDlb*3a;<<.:̈́ ǴacT%kG\2 D RhĠH"fFUqo0C\aU=:VLﰁԊAќ\"8Nk Lc_ ҍx|+n]Tseqaip!Kok|Yb|rӫ9`5F/u0SlJӜho7a_}J0{f(򗩞8oה>~=W tg@`Q E:dVÑA$)FTz0^̙1ᾚ3һo ׄugzc}vݸ bp@ l!`Y TqDaG1y!LWa6Y#-&kPb&+P剏9<993W uU:\wܲ~'E\B+0^Z$udž|krVQ6 Aƀ̳ uˌ0(BI Uc.Έ6eJK!ӳ_`r"*qufN' LN3,)L F<0VLN]pNZZn0G ӯ25MM, ٹɑZO]g{`S|:ލFy4p!Y8c&aKaq94Oɩ< auX `1<%[cO6zG𾓆IU=%1'S166mK4"2\ }js!^A{xTEPV"lL# S.sfvl6Ļn1_} M_~k~ZZ<.4#F51j3QCBDv$p х-_G"!P_gdחLr1[cqQ|OMr|T'SXi[NiGl6~qs8j 8W{s3^}|a+\ CoT?[Y˗3@laB>Se  wcBZ |a^$wkPHmy+Ѥz]J5ha3;أ($fE>1Q2ȁN"`B*K^V>s3, }~ؾZb_&sV4"b7Eu$o8)$oyb5\4(NxfS-s>l^."Yzz:n?>Os_IJ7ez"#SfuCCD2%jc<䦋hfnŦMwJ0vǪl[BFZMMKаN(epxB0s,F'h5[Zi0Q02,4rf 0&z9=%}^| .|1cY#A wC$Lʵ\[n{}- Xtxƃ櫕%4,oRz yt%]앚?,Tg~ZΞ?`kB~p {R9ⴇ+`Ɖ8I($,FOT!$4E *xlg/ Zy- J(WSjfм1-/ Q`#7 1&.>@B䥥o^65j USK`K+ޟ'_JtO3es.޲+ӭL0}hKE'ͅ~=:hr^*1G%52P1=R\xFҢ6*u9@2n@>ԳC2_H/KOv7~`z>ڷdVK5ds[L? c\4jv|~bW'װUCyv8ȋ1\3쯓w׹!uT z:P梅Ye#B@ؾe9,a]gS<yigx7{7{͇s<0ۊD1g ?/E/@k/)1,@ߋF`ͯo&5>Q0 Kv]7Kд[^Jg=a/+qHzPm`s l,cyeY썃{φRBBMECP}ޤw|hB8ru^j=:޺> k9KḠ9x(Z0$l[0vS*e(1H}˧uf!;8+9_N~ + =ho^f AGmp]mޢ/A$ /~+:;j=gZ'+6X1iNݦ/5| ?g Og^,&;)//0 $_^ N.{X/GzbN^΅8Z̳Gk}?4;%06vw r 4Խ̫CȀ4|?o?$%j]kZ0>9.x$bG00! oswAH;рc> j>Z_lWo%p]c3:2hkj^7)B%}Lq(dyr<!ïj&os-·ip|;ϗQmq|lm$=&o5oN>w{fկXmv\n9G꟏j]N0lm8=uS6.$h62cYiFvVG::]AYBEVt`+OӮ`V|bmMuzDe:z0!>Q wh4ww|I?`F'0>JKP'űI HQ0lf\I`17$ HHN&'8HLQOX+be4׻h6 b #@F']fN搨x(PR-02 4XJW;V͹IiD*V8rK>K/7ڰ5@d 8zF#Il 7>#Jm >}Y=M ϫV_}E^çts.apCߏfb_e;VG)ŹEGZ([Qhq8 'R@l1 &6:[G#{{ Vh}D*u34Pd3 a1=GK8̱yF m14ƲQq <@P09yp~Aΰn~,C=(=NPTuDHMs{e{÷/z-&'z#7 ϧ/ ck =H;o;~ĭ/ ,s׫a{i9YYw>LZh(X|GHס6a8&rDj8y|s7ǎ?-xJQV<G ]!\.[pa8M<θ_)pXqk|S pߙέ%Ua "t2\`]֐/9/>{Ї `ϯSjXdR]{P֘ykNMmP]歡4+; { g bTZ^?l G?,k/`-9Lfl* 1@ sp'qJe{5<|d)]:\;9=nx^h/&Ԭ_DTIqy,cfEx Zԯ[˾/j@"+wäv͜M) se'oץU1ˤu0(e W?IPܔn0_=I 7PRHo(G~Z9vhgOd؋=gs17ܱۍ^)4A&vny r6] i:T'x_7ݨGxGK-Slb?~Ȍ(7: 3*XI H&w,'x_zJUg( &}u1_ 4U>A/G!5XSw67J<`K1ydχ'ul^L6 > 6y| UUO4Ny)]nΨO[X40.(((%  /cu~+޺Kaj;~7_mo}׶5!_b2x, 8a~80 k_n| ^$VޥbC,G1AS3s=o$T1sS:iU>x`Ǖv? k.=u݉PRO{O.N(W P"Гw0ecB D*m~`7d^z@'=gNp@cf7&61zpS"c.1+Z2&Mg˦5:,κ:i%zZUI$)N(KTPRlHż{W0؞^ CJ~`}POsU\3Tx<`ȆS)s̴@ ^Ӫ`ս䕶R&VҗS#6E$g0}D2>`39 @CXTϰ6U+VC!2[Ԉ`?D7hoМ v"tȎz8F~vjE*W̽K>`߈ya9L01e I. DÜ]rM+Lۉ΁-Lq. 14pJiلh*ZIJ\ͷ'YNtO](syyȌLRbkm'UF!I:})*.:Te /!’x %]bfҏ|ZLr 3;ynehKQs2\/JDeIv aW(IϛwA.ïYe~/|wme.l u)yEseWh4O-JR!NJ{ȕY+%ꏂOS(kpޝ;u]Yb[S:4ۡ߭#)ǻB^ pa}\}x-Wp<3Cq aH1!? i=;ynݿ'~O_6/;2Q@<sp-fQ{wG}85 Ȱ}6BD@HDaQnKh*#6z- 2= O@P{]b>?iٴ 2(Q*؄3ϔ!V +(})Q@;"ӟ kc٢sr LrsvO.zw!!Nw-}\nfx嶅{"1sL@C<\uuSi6~u''RϚܰS~eW & uqʈ:By1Bq+-daH <,% D/ǻND@ i̬IcѶscvj@[-? ^DơFe# <>}(b*W^)Z /E>vo5?;BE:Ky"Se[u? ؇uH#P^M[CzXeV@'sbAĥfr;qqqqq>qzaz~|󝟀P[L^gu 3Tsh'F"@,1&%DO'2wNyʫ7Y\BacX~yqd]?VQ<͞Jnd?W˷V5iJ?8;b#+/w'V2B}3y鰽$zf6P# I=$ןs4q5 0Gwr @w%<;牎l^H+>Rx<]'=gPbX Xa8\9^.Y daQl]l0/UV-faC+9\UT{MŴz r9sqWhPg`)P5(  P=~Jpts@<(FPiPub IQ:,m+?l*G3|*!*JcSC;=Jz|}[Ff4Dẏr~cɇ_t\3"/{nO\^Q :p{"Q%THּ_n8\0u80>hväye~o2F gm(L$Gc E LHeg~}}}n2jW{]LtWC?ѻ5ލ{c_CBd@U PE[5W H䷸a\jg4eΓ`muvo!Öq=OhX:9T\p&xM``m3rr6ň<*hMxM]k,%v6fѧ`Zjp[HB1`8?G=_| M79!2E^e:kuZ$7/W7򖭷Ho904S3}H2oN}4@" ظ7p4% .XA氊!};̿nzuŘl`fʉ oH X PyD}[9Yη7 PT`:|"ІR.`sc/*߻2(~6w&\PY܏L2XUzUŧu_A s)l~i'YzAh m BVuT )(Ƴ>1\G$/%j=3Up.+":4Y'~{AET6HNRy}  }g${>8<1Gq'xecb^x?ΧKߏsS1D'!g=Z*bw?'>Z%:k]/C(S@0=·oyėe@G*{ (4Az Xb#ߠhQ~-AtA۷{rOMf!̂ *ʰo8){?yUqx͎D2nvwDzq`#23 ?^7w{H|[Sߑ$E$V|Byfg~ml )@~A 5m|oY|POhZmX =Q slEW ҉/$m®4E]@B+ A8ro'BR ֜6HaΌSΩ!;+&3@hԖ@OS8}\#&zcGy1NBk "=A8cT|-Qĺ, k1gIqȔ&(El<A)j2`Pr^P~n2̈́l 2GIGG}gQ>v"|;#]>H K#İeᦔ` F)vs&o V(?eCW aE&(a?zxB DYX\=[JMu  *L*>ǯȩ+ފtNN.䊕No+ttl2жE9isJBpl<0Oj`I_#g$C-4xm#N'cKaAmk7J~ג> Ǟg8DxPT|7T0v7S51H9.F p]o+ҖYҒ%~̷%D*w? l~0wcw 8]OgaSb-ZK8\L`h4d9( p(aЈ & sl?qv!rH"?']{g]!-p(pV`K^'x§a @ @D ԀIA 6+εb^%9ĸ^\cA7M~T@$X9eFȱ;pc>P/^|Ͼp _ IHj2 &OdFצ$@ÕHnkv0' ˑ m.д >zvRv@[  oA\X^cUp (K!<;Cdzn@fWj*k;@ ( a>SDgV:!=K6X UM<6t?Nv?ʯ`:hzIF3:ٗ~ ДlH bEO/ގ? }=:?#:ttND-6 U5܈ `U.¸(X/k<G|&Oğ*a+ v?~eC¯ 12w&Z~UsA|d웓8Fpt=V,xҽ10x}oNΔ,Z#&h{||H |=hWyC} OSO,r՚J{AÎx5, =sHrl֋mm)*> _,wt2^Pm+V<xo؄m&D \vT~R 3bp|8Ư/Mo?L` 䳙>(/Aq{ʩK^xJm.>o(8%@H_`|yHD1P0@9>!.M2NbYܡ`0UgD,!8NTfuŁ찦#b ʀM#뼴Cr+ XD44 횴O[ 3f9Ij)&Dz*t2>n|>ۍPn ͇mwxiΪwTpi-]u;B_n]2N;*WxCdYo3^|&`90exvfzvr_dt$@bV?9t9_OO\L29<7lx߾jb܅Y9a2bl$7ՂB!B 1{5zL2Čm5yoZm=W&"D:zc4tj%9[gVsO$o0g[n'Qq{cYt~ QQ' xSH.}˾^cMn*+](k, Yo5ema7N_Qknb9qlj]{_˦& y;9OjBzʟaM1I1a}[ޘKi(oɨd:i K̥idVONտcаV"P7S@t4},t '@PA eC!LGTuvyWcijQJbR㾌 ⴁp+bw֔ rMwÚR/q(>"BY97 ϵއ >!e`+dCvcyhbD ' _NhubgH{ B\]UCBH@h~k%ϛ> {5XTA$" yk.1l-~h7E! ';ph- %.x+3?w6Y;b렮iaGS'zd't l &#{7J>S\NvF1$@Z'GO򧊟e ?gC5Daqp仹\ih v9|9Vwȵ\pGzW)' W~FJ-]Ox7HR@GW|̓ŸLg=WP"7a9y=־rܚPłꛈcLzf5#-p56=n~L@ BBITԚ3opy (G`+e5Օ!8AiyP^ Ur֫ss@ <՝Ɛ>Z7O#XGuI 0vOT`fnh"4Vc@ņ3K6s#uKseGp(o8~N^n!d}d*~, {U0-ut*dOAr?g=3(nd3 6Maӌ%4@P@PX* KK/{9Φ\Fjp]fJUnPO)A̋q!v]>LR̎$xەVy~4[Hy +-K نeПYOqvK[ 3y,W`b (ouYfs"E b.niIf%Ɍj:JNM ]nˡb.;B%߹6$%%Gws8H]@Eԩd0hӻ/yflSAȘH(A>}Kt)}RULk;=眹c'ٚyHOe:ݱ 1-R*>[uqtpw.W?)Sc @P!ad@Z~1\w,O]L~.\ v놎e?mr= tdõ&Ǒ;Gr7<`0K gC1m0kCp.;(_tIo0D3ra,]5TY]q!z?J|#f~N#hU'{E= u\+dk=iꆆϒi#_ngx|߷z tٝv*_)(-ۏb6SYHH|%IWJ|[oAoEEo370 EۺDC~4 ruECݛXѧ1e a#w;wՔ_75"n]ų`ʩt~1Z޵`3}8w?"ty,y3:IKG/Z.{ araJdR8sZջpZ79/C~ F@fʈ ]ncr#Y(Ő!_N%}&K)hkr$K#ʻ(Y( s\ˀ=ny B{ۈꆿvGuA[#Lݮ|yF}9-z&69=DR~_Od+G@ F`B0hn?ηmgZJY&^3$'Bh]Ov80l^^p^&s ~>WR-@L1eEHA5vr}e&f|s@ '++&8)yFͼy pZjV%A2m{/zωt۱QqM3b0|Fy:>NK]_Lᤜwif R ٜX=^󧄵QmߛS֖z]zЋއ.:A+K(X\4@A$`<5Aҟ&2vsNR~b[hxBzu@u=JK*"x5ژjej͇,ǵ;S5LdqQAzgQg0Suwl:~@Al]=}Q|"nӺs\p%ЧjNRY%T'9*̆u#+sD`6Tcwof2y$V_hkySoL E% ?u(l`6wm/*$?jvjCebo{] NCsٶX߲wHAm_'/S{ŶM9 xAk<ć(x`삠]7d7Y /u_0M`Л~'w __~b EC|8==a v (>e7 | @w̬s5Ùx3 %W iJA30a>N_fMU;}.M$(h$}!  ydu[$IUc˱$TsΕ-L Jeb Ar <:~SvA#UDNDCN,AUOqKWkO/g& mgB9xo,,,,,K]C)@l[7|^,Oc]w]Է<.h):z= S8̭nO՗-B3d.tL? ^OpoIoךm;5tr@6B6ړ__s"<)OH^hB7ҩy '@=Tq=hlk.5T*z`6ᵗ gG*)[ۿ! _.}tֳMcd6Tt'7 +T_rO1|^'_k`u\A2gɽ:Qk׻ Б@pAZ oTQv󕠬s f% x } IFo˙X/lC3 D]18(rZkX*WhR燭" NW} $W*P)!{?fv^c;ޗ^R\oS Oɀmx p(g1lz3"XO ڔ NĕHD UY*7@>@!@ +i_u0 ^,1(TAm< X4"!G 'kA,pcw#+c,붟r{n黪Ҭtܰ"IZ%HWSb' nP]ִNpD,FAQF rowY&&n|ä: uv,.'_RtݠOPppT3]C_{י@M`opXJ)ɡ| WE`ջAZA5`{%eSO[q^o/$M_E3yIF^xC v5t=_LP{OkYձR\?e~ Q]^n) J*SJjU9jr~cܕMdz(f㷌?7à>q3=9*[|p I2n  ~kL5IyEjaB_}^lܭA05~6`@j1\$e4xQ1uuC\emUc4g.\J߿ן -;Pmw4\kBb;BGч'fd[ HG5yYθɱemqCc@mkdh.V{5; n0 2h( +O{-ڭ3ef2>ؿPROU,Q "IdY&ƊFL @ώ87"4b|ކ[@z|fa;{ƞg] ZNoL7ܿw2tchsX;s߹wJix}\\5\^?;]OEEFKfw)۴Lq(YCnv4ǧ 1B yڴO)5MQbSghڨW3RN,y7O^~rFٛә?QFVUQLJƬVQul8AE~9gմMrFQnaG)R1!M4F1to{ y OȊdh"HP@|Q_Eq! +|IJ@%"aE-/_"v7+p-u^R/=<WKU;p0I3={bY/)<>޳!t׳/ ga B+ekk.־[5mЉxuwVRX q1s{yXyE+$d$tDH񺭯*4ڃ%ͯj*Hw`˃1=Sͦ7wtwQЏ rgJӴTX70,ڡeKXqJB6Pz[%p4{o%|r.aӓ~jKo ~`DzD }KvOohX6_fg8Cz?:Mqm?0V;NzǍ.&cO§4 +:- D~oEY_C<ԝ @J6ucR ^t|`F@|بȨ-HbkF/9(sQƥL򩸌:N*_߀l&]dV==g)X]x?,*F<F pY52+Z^4 "v{~i]gc"~$H;z2w̪7O7C|`~t-|"R]d2 DԅT@a!4<$$x*| )`|Fy Ѧ,x U0L}u03`$rtHo=@ CoR󮫵e>.NܸmޡT[OoSrL ܨ"?OW0h:]zxa$X|Fg6\r]}~\z|٩w6kni5{ avx3#QUwOjzae %1φ 1ë^xDZzv<ҥJ^1)m5#g\åtC,hAe1@"exVRܔs0hжC]1VLC3wr? عKAM.yK2WWNF  E*EkRCBի84HE"M0: Y|& -Z:?d@MVV^7WӦ n{#-m|yρo&8bpd%W&$fl`PezTApP JSjW(Ui7xyߦy'XuV37^~Oϛr+jNZ.=.{uf`^qqkh|*{sװZj]{Zq7篿&-DŽ g WxyIyUi#lxhSw:qiZ3aLg_U;W;cjtbfA;c+SCф?ˑ| ;oyK7 .uen*Vpyw 'Vڀbgd_w)p$9Q|uD=>z?e"{QE}]O} Elw+vGH(R7TVLdK:W2mv!>7I&q܋@:_dÄlziO{=Φ?COٟƳ`hy/w.$Y]W_B ]{5ޟeXz:7kk%+ XƫZMM 9a HkV .M[CgwV79;uUMYIwnlw5K71~̀s _v[~V0)IӜq.@f .8x?8n]3&g3gBͣP2ajiJb{3CU{޼G[d( ?X#7rK[W{}\|G?߳E(?ͪZp4V Us>ĝjݦٱˡQ ^gR:o!wı0ŷEknSH)Sгg DR2*_"|Y_sy}r1U&-?Qyw MPpܿz~9љ &/qnQ>oڭ`͵Ҹf5slM]_sph/|Y OE>F*މ_ц1}ٍPH]S1&[֪u3qr96ن{rx_KZxbXQtB[Cgwsm{*ol< JJ׻]LK5""*AQ xw]7)(KM0D85ZKFo7ͫ: EO-S.-f3)ԃqjM>3U@*8]]DG<} /Su5,աR5'ֱUT9u&%DfB;uYUI=Wma<4[zӷ-wZMUɼVRg~4rԣ~,^}^Cw׽kQt}-tfѾB_ǿV$WXRvl/o_\oK/Q2+}̚\OX<#iE+9{*=_}n]P5ҟ6KQЁZBGzmH,U t>ٌ @nޖR$=^t~{Uu 7cm@\=kusb?*C{x o# mZFH"&s1;Z޾|l Wէ3!SO؃-?MȤ|k7Jp[ PfX>9xN 0Kw%:&ga)4~V 8'T./깶)yY.wZ&kq۠ae1l\3YK;0%L;|_~%uYAQ3 c7=HvTM<wc|[9\DWkNGSim(]A%!Ŧn WOQ2~O@Fan]ѹw|Fv#@ ?Y,da #I!-tfnW5В5Z89I5Cj%W;[COαbNn{XYKWlIlp)ڭN]/[5@@%I[:ONVJo5ly!G$*eM>M/FzibLcp9"]vplD4nG ~}_ti:]'4we1n&ofx_K]л?an^9w1F6s$ m#vPw}ͶEQR_wn2mӷXYOrGZ?#!-KWyu'cV^ܧG$xA;G5Ьvy99GH};bڟ&43i2;JnFF }iаđӶAB:P4(PQj8zCt'}᩾s}VH.5p6GKyK`X}/| oLjW97Xu yS|:\]͵Hx NbqҲ4.8cr<~;*Gwmk1so}_eRR$.Y'1MN_d{5Ln&3Y+={&c w/ב&{(/K,,X[]S ?Fkq$1k|=];kƨd3VXOsdazI#.m|81C!'3Nٱ'Wi 丮d(h H8kҒfL:,I;RUPQ/.'. z &db煟x{- 1>a>VoqͿ}Ǽe;R^&r(.+Soe2z-{B߆%ej1;[,pн_Sͬ @U8SF:^Oy&i;UcFhVZRG2| xGUUi3)bw]X#էaQɩ:ԳYEK w S.MTȤ~՝n~Jtuy81yd"cQX;쑡b[iڽLcf2 л'RJhdDEuOSĞ òt5meã,kDȰp?M@nOp 3B6~rkg^h4~JQaZ{`۫stOq\i@bFSYjt|_fJbL z)q\NWU)5stX*$\(}-*}ϓBg8Ufmmd,/M!O*_x"sa;gduٰʩ_ ![.QxZ$^t"F:uR䕹 f 9rлz&W:Kț֧\N0[Pg$2}Φ}Ye*feչKԓ 7&ӚmGbkGB>.۔YvY}gO}S-Bݕ`ZsX}JzbjM$$:}RgrW•  bX(?M/ȵNa;.구~ׇCMdlFR}'t5TM{ aqbخhH7?]eOʼj-7P܂PZGjI;g=]XNM}oǛM{ .Q{4+I.ʊ_ ǸԧG?|0 亷w^+)}Qߨ>o3rjHr {l$_HԉHٟަri .~?_ ^+7'N[Emq9u~R2EI*ީ3qQȳ*kl3fܢmsѩ̣j.mޏ=q~]lr> [aGE?85E=i %m'&juԇ^y_6H5_i"w1w]G6؂2O7T>\Oz>>IP@QvҬӫ%. z..#yd F<5ots]:=[& y|:Ɗ&SpNëD'L}tװE: FbԤ''Q(h&{GU+z򵜓()~^#1bC}1_c6*rXL"C'>1۟!rK||&FL-8mBMH$]W /IkN)Xk̖r.<_; 1q[ʏT %Gdɫf!ߚAq.eBIw xxLS_GHI,X /W[n޽\LLNAeUl^k6˸`%og̹t_l0768LJ(}3uC̭;Js6G㵂 O3R\6e<_%{HMχO|L3NdV'FCZfS[!j\s(xϐ@(W'7s/ٯU{;ȬYbvp'FmV}I+$x^zbcb>ѐG=NzK^t1>8=8[>sEeoZ܊3g:~b=}w4=iޣ܏[` <ċGpQ 1L񌡭:7#ta|lP7 1n-.>&Of\\b3&[s;bOշGtt;"~n:v]J:sY-_Ĉ]%Z<oBYՊWftY Uqz{w2uWEɑ!%#ZoA?Jo=ϱX֊}GVߊ4xTf<EO<2[rR:O&|5MTq2W-%ܨ/]\MfN&rjAekvæl@rfOX~ |";~ V#ɐDIGүS^Gx[~?g̘R:˱)SsO7se9ϊ~|wM:b{$]yHTTelbF,[ntIVF)'jd,xɍt|v ~E)ݼ*{zHXU1 `cSTO pfuyq_/3Ǡ4||`RFHDI±+`Qzq^8I~8+٢iЂ槿׾]ɖ@W A@䠑RcG "vEP*T-TV>g~Ӛ*&yɃ`U Xo m c9dm㖰UrJOcDK/:|IB3\Lo$/˻4*2%,EgL'fz_1x9wLAaȡfGpL~:ln/ pn|U~vU+bޅfLdcx~H{ֵoJyK~vP]ҧ%f\5Y6Z%~꥾Vs$`_6o7=_=ZvNRp?S?BCg>+?6"J;`38GUz_i-KN ;G'=BUiASm1kl :&+8rQ&%݋QFSÅ#j+Ermd"va-m Ǎ=w.īw[bl ]>ޞULgV*]׬xmFkَgmxlU:!HI.p?a|#KvhN2HYn2D20F`=~&\֠}Qx]nL3TD::02ݱ$zE]sO|֝9؟hKؕs 78X^M<+}гȟ_CC^vs ;jOhN]ZM'~xh g'9p-3_Y׵@4%CAsv{FzU3zGВ79ZLROxi洯zưNj̿}K?U5z z7_aUaU˖PwVlǬ$m}v=ڨy %ui&8\x}ݳ+ALr&<ߖ0J(5٫l1Su;.Ŷ N.>%6Z%ddQۖ;ӡHokj'><;2'XW!^ `XIȾ:Yf֩)=ͿK=1]j-D:/@ur7W ̘ 8? _ï;+?D|i*<bz^o:}C"*~}OԵʲ*^Q7L,s~Oԗ9gU=%Mzry K\2 Ն,wK_Ts[q(){p\7"Xjt]ΆSrpG2tgYww2yS -{9L,/Ԓ3h%½Xv'ф=S9[96kl0cv6mP}jgA?-YhS] >RJ}\SoMRT0^~ʾ)P罽>:P0y;Ou,hV&Z8쯨P^!^(͘>~B6xBZUUM3k1IO?/+4J-&B#k5-ÿT$^kYK Ζ¿:9'xCoVBx܏xyþ~>u܋_u[kdwp\U_#6lVvxij~/ lh3MP״°[wn;9LۊsҩZˬGTFEB957ĒƁڥ+ wЗP_;"v{RN>GOF G@W ÜkWX}@8|1'1_'U@T;nI!m1OڞjOɧ)ۗܤޞ 33ƈƞVw㏜铸f`H14U6itNJr}|%k:KЇApKD`izU!GN1e w>FN&Q#}䧜B#v fe$,"/a An`|lZE3g5v\))x[0'wTy"J_rCOg0}>Ǟs*}}7"3:fM(%ԤFYHB\Q]9j%>p +PJ q=pڱ[(ShP\SO|dTIǩiyI.@O{+2ՒP-/2˟J20(zdbkVʴӛ.tQA梬e"AebNW̌髖??,^wvCĤ߹1Fjԗ[r!=LLN7;w; v= K7 %#5oFEѽUjC1s5?[ vL.U~1;%3 +}%t,x]6`ڕ&i="iKT/ªs1OgDn9b8r~4>Ajxkf$YRg~iڲ2Ҝa:d }m8;N %[׽y8d(5q6ޥ1 -Uzyp&ZH˅:3{sKCԥ@?WDx ^Ȳ>nDOȫXVުk,EF PNFX7܍:,X1<q|y;%&P[v2apm3Zt9qV27X|EU3>WD8 eayjܜJ,ۻonkjb骹*uT\'wu_~qvǘzu.,adFI +'=7DJOm(O3wLgZ7;:c -e;Z>o@x@)gV:2ޠ{n< n.㙮rXW(A'N}.1&Vn") Y!l6f* ߿ڋiwg.÷PnR*lqDJX4ߨ.ciݳ[yb ~ jN1uu[wI:>9$|ye*ehd}_FV"w]w9Q3+7\u5Z-Ce5g/=eB# GjcgŸƽIjVEfjYg)lYo)9&=/-4QHZkZ?Cw,5Zm;l_w\Eu1_9mf3FozYS E= C@.#kV( γ?w[؎zPp<XUqyTfuC-DžF>Beu{)=>"Aº2qUxr8:PaQ0Njs|ѝb~8:V+|+aʆx}Oi&v)aA0P-i= ʰp;[X'"YGuv i'dWyRq&ڱ?^ ǀKXWuf^ݢH[$|u w<}hop5]o *_eGWA $v<>α W~Ar2Ĭ:M5 ؾsa+K.][NVg4g{Aӯe H}N'tߗ, aOC\D1Xwf̉hXt=O5ڢRbE3YwϠa_2}9O[jv hym$cpw##$噚sx / ^j(uG4Pwe3A~ K-otT=[4ܛxrqe?6!C&䤑heRF? ѹ1ɅbKN@luUx V/A}ǵ E:ZXq[ >E_!lsQJI0xDcvvR ̼V߰X=mU] ]{睵*pY9:8\lw%&n(o1`(-kh `_T!=|9xa/f.JCy=ib+ٙA $>@[@HɮuR=BpjdǑyc;m<[k`\6?,rj`2;SέJ3 +m[#\$^Yttqek[?uX{o007nY3|,g% 5z쪞W2.m`2Qޥ{NBFcIO׷;XAd{<@Lt2u_E.o* cRQ[}/lkeo 2pW^ޗ.cy1̸98EHzA5Blg po*"xodM̵u8 ɍT-RF'O$sG[R >QnwٕGLHRDSM؜τ'"EX촴yyls|,mL/w#pߋX58O ~WI8j"oi'iۛqwRQfj#i&{3{Yԕ敏m*-^|;7PفV]ga8,Hg!O~jh[2ӛF < Nz/۫|sTVJ0sQ;`몎Z[BP'g3{ 4DrUIIn%DDOy tN]Am|u = *vh@"8P@/q:V+i+ ¾{̑х-Ց3T~ڐcVuQlT#-kN-fCÅU,1w3~z˒2em3-\: HQV8oszN+dpd>>]Zz>u*N\O;bthͬ^H}738 %}t4t2#Bf)O9GM hoU\K7yLTUē~ +=bT;({M5Oر"8,`LU4?wE-:VAi#G~|p M 5elP@E!D) eP黔"t9|!N= ֩:vRķ0qX,YcunOyE\pդƿ1C՜z b|Ba8i?{vۯ_Qԏ~@-B(rHXB KoWyEswW$ B]Wu3_UjPuvo98}/m[\K5QjRف/9U"V'OwLq}(/M&0(" (&Qy] \ɜ_eAS[X)6i[Hk:~='kܒ߫aͶeo򞤽Ym[ t}h뀣<{3$9[i LPI:P 't*KcM 1;]ǪJ'>VCrW%)NOjSsYKRyhpY˰Q@Cg8^6R0I ~wCB 'oo*QҀA \knΑpZGmKDd 3lZ3Ysq*ƭ{Y7J@q;Ģn}VY@\`!(%Dp6K~~lywx8 F͔QERXTG(dc|/>qT'yV?__ƈ!ᬰ"7>[RFHH2$!# u忷޷OK_$=QT\)9.{(ϲD=)b,\ld!r9kҽV]$b^TJXZ5=Kf*sƻVߺ[}C>t/4>'Swm|LnHHB2'OAV'3%`vaS׬@YΪWg2fCs@qZEbȕkd@H !iY_}!Fv{^D@*lalȔQt8? snk"ǽ*PrdeC9A&YdyOKj$4kw^.KoBSYdXFH?y\KmWZ*ә? tbu<Bpw(7ܝ~6;z 'ߙ٨VXUŸ]ȾgܨxخLP|@ M%5@Gg7qNB#u鞓@l{bWKo-./tR'SU:ߊ[D}tVش^ ^ fTu)>~P%e.O<-dRۙݶ_>GM+s`_-TeO 00dAW7O8aO681/OEˀ|N[~}UYg >t:p,* \őAf@;a_gѬxI)!_= <\sŶlV =sH.u1 )%I~c>Mk}/ش.ݚ^-*ɛ$qK旍3TӬ`W1|1\-K:ڌg)Q;K_'^n}H^qkM0\O[j_$?zS8}[wYkJHnVdwɍƗ%[p. ;t0ӗP!No}"*O?9w頰orحG ٖK#\gm;GkeW]RKv_.CT5tl&%hvB \rya }0y y$\/r+: SJt(x<ׇkR#`g]vəDs>ѭWf?wi5n_?E~A R1*I O"lZ8#R;ɞs+)^ӖxXQW]Vm/!΢vZW^2vǛﺚ;jP飵ÞLFsyeu"QrOӮT_5yϊ_$Gp>Ec".Ér7jxR@ϸ@kA;#v5e;Mvbm7(Ѡqc>5nT& ᬤ GWۀh@EUT2xggQ )hK D@LDo9+1o*(,QMY; ȷ2J4+pD-BkBxwu`{i ?5{,B>/3̅FS,oRX,T|֘9 mS-w9P{(#{+Ayjvjt&7H78YP G%L) ʧͿ7Wxyt%;FMv7@p[P_򎼬R]+D3HN!PS1u Fy\8usB]#ġp9+sz*^ P3Jj>ŷo;mЀ IfkTm~BZ5 ~:._m]=_]~'@Dy۩&@GfI!*z wlg42a^v)Ɣhn BWV}c4t\ubX5疶0LnۄЂ?1fKja8Ѓ0`):4K: =BmgK/ujcvPrA kܦ~)|9"Bb!ڧީUGT@5%mwa瑪F5ؗx˲D2۞-{"m9ܔ* D DWXh3kafp*x-{A߫ztWHPDDJ9[<}N4Fb3op5)#m7;O>݄ȹRm*t$xDx0 !p =e !GMX# vX! }GX( wreUA ?o,q0/na9ܶ +'nㄔz+/r}^ZZ  ) *+*F[*4zV 힊dCC<%+SMտ;:iIz%Ws,:%yyUd|yǾk* Ћru_qFmrAM?ɉmS{-yMc|[uh!% ym1ք+nR!vkPٕ Ek^H t^z5҄M8 lyT@lQ~1WJ)z ߄z;r}L\YS{> ֚rEo5]M]aI|c7ct\ q 1bi1gԃb)SCaN{$&:k6ڨL{Mbp-9#qD|]{wl>ܦ9U{u+D6ꪘ}/|#,$uԁD]k.~I*zHYnUس~auC9" ܿۯ7}wƩ+iI46 O<{]/2nu@,so?83;(^֐>jq χ~WD;#MMoCk)F`Zg_pl:̹#PME4Z`=Os#tqJx=+{-, OPJCD!i2kewM-iբ{3~ۈwvkmK-}C2ɏ}i^n9ore99@ `L9L#s@.fKYA6p>s5~0H:؏^I4~-,"xN4O8,P UвiCޥOٽ_l{s\/⒗SǝgF@}6{[?W Cz&o#GM$^ V}J+e1&b>L(ܟsyM`aKf=/_*B?M/ZPKYVf~'2m ҳMxjeZcA7?C^P=E$'moO^ve}WxZ^A=!T_oʎɲ>+z`YA0A u0ub.= ʉԾTAx @ !@NmvQM$`2BHI i N !lnz2l$z@g%4y:UI'd[NNn'`5|ͼz-1\e>T:G3I`ORNd8p ċ #߽ޭspOd:D$UY*pzjg9IhoV΀^ϔ,s*B"|ZK^WT|OE$S%HQw(G5j7F!3gEyD(C lA=}9PfhyO&gnCy" 7e5^ufd~r|MhNH{}Ӗ=!UVXR#i(첿)c/\_1*iTO|!dr8rLZT._s<?ANvBolhl'_zS`R_n4#'ޒ: "J'jMjllyrߣQkQzXUj~1` ?|&ʥPX:l:_9G'oJIs?s[6ur#Y R:S }ꁐAio|*KƿJU]iJ?' hP@-D7Rq뇷)\uҲg)wkKmVl6G i_+ XX}t.u (%3 eUͣ"X [|`6i!z`;,|y}9-fLU?;f@5U9Xɦ|F-FLnWZl8ɇ­HN}65?T+9$77_wKbj!zɡXuupۘJpQ0j* kZ2>Yn̸? X~P3!gT`1e rT} mI!8_d dg`M}YF' e - L.{JhV>X\ lGoͣSC$VpxZ}9ð=Zۈ5l.qLNM7q5^]-?? յ-3óNx J}D  eEH1z۸ jO&v1VBud>|qЂVb/UPE<i:-99 #w~GsՍnSIHk1불KwS=SW` s9̳^Ѧ''ﹺj4/BR9u;YNfޞ$[Wq9PͶ-XO[Ve{P(%kL؏rKaͿ+W1(HTp3 R uO~ p6 <$Bݏ %4&a-XFkw' 杛^xi*( z 3+xLpmt!R=Squ8Jp8LM ![>U\Z ; p.?6]%*>f8v#0wdZ]dRIn~@go-tiC }Hsr=c,Got8=i÷'^ԠUhxlF˯_{iy;rpcOceƃ:fg͡Ԙy7AqPN +Y]Mq v%llޅeOzIc޿!wVk.hlfi&O c]U hFl;ͺnvTŅWSvqϋ}u;f^)$y4I#_/ uwK~+, 627R|ļ;:4I焸xOq1?|-tkJ>l8S  / Y$+LRn%*m7SERӦ)QE-A5@ٝHNrh*cR=u:DC?cO a8 J OuhWl*>r6-(Wމ]\9MI[DcEɢj{ev$+_/ϻ_ԁ4ed5k\Z /kX!Zay_7ڒё\uSA6vj?Y΅FB_ EJvw/T̠+_9cZeMWKdnIr:TxPXwW ׏gSP@x4{6N ~P@0F3ϸ /-&|?m(`k":Qxs198}kMmlDmVUN2?&utƍ;Qڪ3(ꢾAZK<lryŇ^Q{>~>f֨zRPy;D/*o<߹Kiܴ [|.˅o/|L5e'}@ 4o 3J=uC!%͍ k[b+m3>z^2,R0RZn쯅|HT<_$w;م:"|} }.1D " H^])?MK$ëe@ ͘VjQ\p^58xgu*1]ODS&a A0=ߘRSƒɹ+_fӊm?$i2c-q’K v/KYj/IX:0=ÂfK{I[W{p7g*&OZPsN}~gyt9ϗΜ-\`1j30Z>99Ƈ}ekݯ>_gx@B]d` YL?߃ v/fh#`G5'~hƓ@^ jWO}0 ZPWѱԐ:D#ɕq -><=oJ2..x\*4Ves>6$j7mo7 ϶G%>Mez71OENsEF:`"R 8x&`9wx QƧ%.0+5$x%6??񯲽as/?_-j'LE7ՁzuT=xȷgZW乡eBxCHSObمV ֜@bi)>׳5PrmWlbSD \/\cٛ꤃y۟8y˅ Ru9;=_z? >BlAh+ !n :P_c\RKxM)W`e+w0 _ :׆i[1\c~d[K_wQ_,GY,5A҂wOλt}p[U4Qp:1G_$4^p@w-e3Gǟx|3ԡrv+T gSlYvAA l6dAa߭?Ϭ4_4;uHHnT֚颉;WSw4l0EaNEUUEnd އc``-NJZv2*p)W<"a`zg נHߒ3Ec#e}'Zǘ'99TBPU}yS_o?Ov쾂P,(p8ݭwMћe } #9ą7c[kvzg юIW]\ںeu{XB~)ܮI!ux2M7 !T<'/2]@IH7<A@>bY]음Wq9%pO$bP"wdZ'i>FColwKYnqzMr^}v2q5%,;~KwBff?wafKMrY`Wp8@CyȞǯ@{m@VLۯm} :=dRaKg C6dq]!\99bfqMxR(?VA_ bG=,#b{ 8G1 `t~{=}g]#5\**ǺE>%ꑣVEY|>%7ŋOa~'> o[|4>3}ކ ) lpt3kƲTᛰ[<Grmo 6oA)Q}SjjD-\R~M r>%;VRW(SRWNzLw-\YCNTs mK-;΄}Y_a]-J>oϪoYx8p_FG.'D Jo \Qhq% U*&u}Tp=:"6ŊGQtOpt馬:AX y~ cЇLDJy)z&C}>1>{wdߨGW>Ǫ0oڼEb:4X7+[>˴c~"bn~smj;/yI@qy R$=9Wu9N㈽Yجo༟+cLȚ o,j\HF\5@̥"~BHuu9k824+ nRA|'N?=:GI=^򧬳 a(VӐ WkQ=ɡ :!C/SLXzej՚ 2cn/(7OWJ /k&;:$ a[@Mg 4l?z9?2[kN:~ H ^1 bOQO?<_!?c|}~q1&Z$BYZ*\&[eUMAfo{Mifj}YgRCrZʬܞ $Zg@ WLvo87yͮ"~UwxvkՊ PU n? fDHD?!@ALyCĉF"*.jc+Ih9 nP ;%3"gqhWēٟghm~z-IWo"7D5c;GTY4m^gu"#O2޹bt#K, ò|B .,0EV :A1dV!B*QAUM@N7~OwНi<ޏDr{*<ݘ4,jQ[XZ]#vй r%BQ޹Q=0},뉟c$ IThy"v̒;-fW=ijNL֦sDaؐ >{1)n +'@%[UNGzc_ BG0.k0sG|kTES [`6T ,sEPBẼ BmF6[`Iw8 vI0# #ʙ!v( ?m);%%R!iL,J1Ճ#{(xٻVj[Vl+'nU 7:3al}cJr՗) ;':c4Z,yk\Sߍ>ZyBW`*Z}lD&zz_6#tS\>{Lϙ -9.h?I×;b\NB]J:r㛕۽fY::5*T_}g% #WA$7)k7m*vT3v`oV BzISaQFbN䔯ӱw8<}> f2Ў\! @Ӡ тu=/ߕY.s 4lIR!1\zkvX^=/w <ߵ>43 쪃.\ťu疙z꾙O7>/t=r!t|wbO0'F2u}7bLVnJw,ޕF䲀6:+uN!)éf|їǹp{x.IKkVcc7œܝ6I~@#2F!3%;0F 5F',p:?V\}5w,g?HxNn]^fg o7m]䎃Lϐ:C!b빽xuʖ'oNJ+]d5]/dN$ jzhՇ~SClĦt6y"?zȭ7*u pjc,D3Z!;zl\s$[vٔeGwwxx}f`E)8Z[>0tz)yh'W&վ6h#`ek ].mw%M˝Aܝee`OWoNv}Yd3 ޒs7+ks_S|әqXv^AvDAB'Gفj(͙]xqi;fc>[ ^6HdU6Q+7x dȃ Ap,C,$[ _0!oc~U]:-;K\y5HY5p*sq03>;Oz#߱}#:" P@-}DGPnaEr5 ŊEuNa0"YW9 8@[ 4* &#n46pbZG {,n1cǥff]L޸Lmə&2Q{b$eG(}c|J=S:|c""* 2RFv&,m_u0h( (]x1?0[:'4Z3ۗ7R`7*P|η v l]MdF$}uۤ_Rcڇ.C|VH/^roa?(,'VcLvstڑD,n~r] ' \d|r mp~WQSgnap 3ܝo.PX1`u;H" (OwFD:)b?oŃ1ҽOuE„> }wG9xTR솣SF.SWOnesc3}gL`O1f6 !sK]/?OL?/Sjxq[F¨j?wh6qg!\]:@fz1v>﹠>~a=׏KީKQv:)=i4􋛅{_9}}D!>ޯڻQF<>$B )anݴvԇ\*G;ҙJo ">==8w4raƎ(>#Ms >gk?WtܻSggEU_ĘsZl/А GFi2 N_?x4uyd*<]ju̩xeH-Ou: x:LK^rn/;>WSbLךW#${O 7g_qM$ |#6W@A7(_%@g$~W|^Mponc+&6MI:׵NYp`NK'fPLBԡbk6%Rc43@!4} @0=uԹљ2(GDC&a{oeLnbJoK׵ O4 -Y_hM6y_rJAנ!J Nφ5@Ei?:Nk+6 OIjʓKg$o…(%=#740Uzb̗y"q!κ~sOdv;4ikmTԳ0:FVԝ"sմyD66|L8cg{ȿaPswr|γ˲܏b0.U"% ]EݚE[9v\o?1:bnث+bR 7v/wZwbͳa3ns0:eT5":d=~8vx_E)w|9OI-筒f6>+ ڃ&ڵ0!i-^De\"JQ5&&2 2L+jWӝGW.$:2BFq;ˮkDSx5&e{*7$fc)/ݩMbIB Ut[ mxo?q{N+\B[ߵ@[`mZX9.YuH A\^e%yoh*wfT2Qf&9g`ߘred~j-uvFLVށ*Vfr1"&:!@y1Cϡ_,mkRs|=ԵFn<:,[Eutdᩒ'K;dA_O1cnZ6<:*_Y:a$pC\`R / nRՁK}l/VzcD_^3i!&(TĴM8FO\7JÈ3#T\c=Įqb.?Ju^)GvTA~_4(_S ӿVg1Ĕ{FD +BF8w5)]KHk%N.hTԖ3ݑZt@' c|G}j}J;  9;nfzH@U[ Lm毉8hT'Kyׅ3ku(,7h`^8AްBp6/ׇejL=d#%!?Iֆo5~;eHdotP>q/g8fFXo3UK) q4v!*fgw~}UNgeOƓȢ$Y,ns~A33+6A#Jnܽj%È^9߇{wshvz*B7&( )A=#|r 73? NRF0ZKO| KN[m>jܧTL2D$ ziSW#Q4F{:'[Y֨tsT+[Z5aVJyR2L󃪇qp@AĶQ/7f)rh/&S~:a SxC$P$YBY}waΌ QHEnkI߹Pdv0:ǪiJJXQ%nots~sQѶ'IP4xJ~]v;|C!uyo,5i'Tp>ʆ=ꡕ36,2@%"4}6}ﶱ%$:=w'4*|.r ߈((#RET?x"2מFl#TBK{nr- ]ElCiO3m]nr vlFCQߒlJWA:tc<&(]mfx:NXͽ1$t#\Tp\-e8F#Q^{a" ~gy4~36ky@pcj.8'vx-JX |5z ^ c!}Ib*f?4x(>/d:adbb{/SZ3fn?̈́j+_<$?i])mٟy̚8V2"GDq}\vpEףmX @q3Z?O\ytz%y$ {n^U"ks{"|cuc켗r'+r?P.~g}(yAE@_l[bmY::j,q6h3H1|2Ӽ9ȹ)}/s6Ǝ#´,DgdHHyRF7M!:}'3yڜ Xkgz%aͱx5{[ Cݙw&$2)f]ll}gQϑ7buX-NaԀ7:a:"hʎ<wW|~F5'-q-K q` '\/-"2twvW4(h~?1#8w aY=H @!1} ?мk'z6ظH+VŭaFbn師>rZ(gc]*j-{Y=!mkI*ex$ PfȫCx*$n,I˝϶_L8 -wLV4q>w9Tܯ@^lnVSMWpУ킎 9JG& o3cup? QKY5릧5E3qaty=&Q8T =#3x{^fvJ%kvJR9;)$zĶPpD1s FA 8/_@`H}??? gӇE3g;r|)껭3hx/hխM҃NnWAz3J)贯> ' j2$h%o5FQA$<0.z޳-~[Qy/7eBj9a"{C+95ԘBǀ&^`6soAuB?uwR7cIqT-`moF˪LAQE3>a$hDE'/ϨjM&FZuK)5كg T{?uo Twn~71T;3qLxA'$&kG`jNlW/^y jA٨4L{\~R^Ius̓TCjzsX(U|A@ѵRgRν8s縼>m2$58W-9)- CnQ@BDDiZ@PX`A=/Agaxﷱ1σ9#KǓTهEv(ϋYWV z0%120_?'Pf 6F$`iP<Y磆:*ㄜ$fO;6Qv}\{;zݿwᐄ!B!Buy3OHHԛpgr̲!B!B3J 9kf1c1c1ş1c1c3a%1c1c , (1B7o/V4I" aqWLxdy  7F {ã^.XС:7iFc/);hi\QQ a`ۄU2SpMm B<B>|Ό/(mBGaAW)(e27DJW?"~ٹ/SØŌ2yI1z-ZIi٫+%8gq|`& Ѥ>Zf uS1Fξ6wq ~S~~NoA@-n'Ci>pfOȔ҅50z6mMa Oָ0b\<+E@ѐo703JaMt2 0 , @]-8JqQږ\^֋ȅqg ^/W州p߇|8Ѯch?4Oq&⎌)Y;Hags驦7:bxO>:89:W-af dʮtp183*ѭILg ռ:÷\uJ 1Hp50T[*BAj=_{\wqZChDukZK8zG#[)J{`oljjjjjjjjjllllll888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888886,23QLRRr̖m(Q"{~mo3 e=qϿ3ǒxፍM͗?_}{zk~ +N3KQK/%0(=kW.ZrTt[#Y3 o?O?`nΘ`rAC_@h5 +kA_&h6 _T1@i@Pa'}ͪR 25)!Y?y\ey{I} g}* x'^ AXR}?`}p2Roz܂"lv͊r{h+;,_`yXvk|Ik_?t#^:(^1~:@,:=͸1򗼲gU0@^`|P3Gy!C0gQEi|^:3vrh)n&egMmνa@AZy7n31m7"!\HY:zw{2Xؕ,_ PRqh/Y(^}0lX;n+I/{SYُģO&vEy 5&SdM+ zEƞNa VMbW+QFHt&cCk{x>%yZ`wIEKkeJ~^rz9 gԿ>x)ziCMԅJN' us>l ̶nQ-&$y 4`\#\h#~Od?T#Q5[w{tZA< i , ;N=XD:vk% SN$No\`duV[Rk% _y90 A dHOvR.?}>|\6-ܫJVO̰gw0KhwC9kjԞf(o=HA-}3:mo9 ^^dTl}omŧ C Zh|*>i6 "EԩX yL6oUl|W/wGiO^ t.dxfHyYvRXWZqA7F IGZ6YsEjgeN4۝E42daC:eu4 BDր}jM p`2wR%ɝN|}k7'tbi"#c/˯ꑱmk!CYDDiu5/fy 7mH [/Ώ|SݬPW\ARԇj:;ASXܩ5J3b (s/уr\&Bem,71LLR :"""!2x?t*/`䘣=9;aEyx٠/@Cxڮ| !od}$mw JJ_w-n:\8mgd3zVEʗhl.ޚh]uzҕ%g9\k35# PPek5z*c4*Ef6ƛфg01x x1 MFLhdQD/  =qku-4d^ѠlԄ ġLe !b !$#"" !$m61ia$\BmM4l]]!TRhq@ɨsd(h=wV ~⏭w"w)'c j }k|>s| oŻU|ԧ0t|D͍ZߣB.vY vdP X{0n|\òDbG-69a0zL峳3cCJ) v#Gk&$ uQ$a=bck,xĶ9/yB |,V7UB"H퍃0jпƞɶ-pbkv}m"^ w6@Ð̫i81 Al% yʱ[\޸.䓙LhEpA[eW}~K gBv$$R@j"Gt  `aa 1!co<'`QiQ9Ō؀:尐rӮdP:j$ΣʴviUdX{-Ah1j3tm!ft.Ե[u_Lg&ڭh ZV e6퇬S8ε l1~` /}F3KdaG7LLLQ9mƭYk)+i˼ մknc BB 17֤SNst\UI8٥b3,YyӬ:*Zs#m2+aM`[/XhZoǪ?#^<6p#Y PQL-$5ՔVpR;EHjcοQkc{t“71, 89S)/ X) TA |T/MgxuRqDB' 85mAH3 1GU&@ŧ} +5ϼy:;\_ۍJjrq)U ' =1\ΒY0O-#I @ nt$*Z(&  x=HKɞA EsT+Uvl`n_/<2cI'mLC;q )#EMderWg+T#sEj=խDg1qLWjE/k7l'ndnSxwz9Wis@ `.~3GOBX~40;\oVA>ΝA=1Mw@qDPo-Ϸ m3 r]|xVɶzpK`eT h(]\=8Nk#ae}p#t*gCZ,3NfBP>!AbAVocTR#aqYyu>)$,άHpYuZOknHy)m!|=6D3(`Hg-VYqHe':Aq{ d QzF,/*VuXj1 nAYFo=X'(E(}4>H[ːl214Y~,@MY! HQhCHh(Kp4=CbH1O,On  JoWMe\noPbs̡jQ_m6!F7s*B#l~96AG \@!<,T; C[>?Zޢ[|~](y^5âGPSD0jh3kXafz,j6gF_#FyA#]g pXU Њqw`Dby-)zfHI@UݚOYd=c@0(oc D.I$dD ꙤPiD dmj{pHI!^ J?CT勋>t-G=;aa~44BE 75EF;ؔhQ!iγcc"qD,iX.P#C ` A&ؖ_X8ڦxP('#>;O)**ޑ "M-jCRLP:I"H*&k5a4ꜘ] sFF:>I$cTʥha`0cBm7@LJBIdR$ TI $5R ]R,XơX4VJp ,&cb!?MQRDK8 =MSTx\]b)- 7)&ka%@.bL{,]:67)͋ DThh\\2F(QEJ"fvXmרּa5X@ JËk9NE Hf@!xjqf+$!2((Z7CA `cBjL1\YKbf"6хX))"oyi Ո$iFh2i+ҊR0 dnJп6ЍpHfo ,R){AoEo|ooX&,igZ1YlX!#ZRo1WQhk %a S2 &CڣAYjbL}kree!@af bpmb"d@S, M@OׯCS=pnFcg_ۼ3)* mmH"vq AoQ05WПH?ێwE$? S?̛ҹ ߷Cj(u.,k=s~MmqYlWthm\ &j,TVItaBQܴ' osN +`sN*CYٸHu*ƞ( +4EܺT/hq^Y/ eH'FUer6Ad} :{^X#b=>A3(XH-,YV9nn ׇ$Fb{uYzsaM)T\)~ӬsM#k=XD`A'K+(P14UIz %‡Dn$Tܷ8ף"}SC(TaĐ@\ӝ4ėJy2R90CUS y< q!1 @#| f Hbۑm` U T&H!b p 7S3p}*u9N5Ι^`]mY5#PiWTXJr=3qD6}zp&q ,gM+ P|h /C&Efd(?".6%,Q1P# #HL6&%ð5C 7#I׋uw|6yjl9y#4(z¹ː0{.#gAa,kF8y߷K(v#)O<,!h| ^t-8lP',ŢM=z;ן{OxOiNVlGx^l}h'uvVǩ?g0VT!Ͳ/ɫ'xwW_%RTt妘}i|qrLs8wnޢ W3<T宑N)+g|m1S<AHGy!!h?Q>9RW|t&%s낥ܧoM9?Z7jiδ:Lsq:Ndp_QA%A*Ms.W[c򒼶|5U$si^q--"FYJ셻OSjSkcO8 Yvޠ^*xn'}?Y7 П$6$> U2a0+wY{@Df+kFYh;>WdD90w̶&_ *^DETO0J wѻ+>âC >6w(wG9kKe kΧ=5pwܐTP`Ct&°m=f!a| !TX`(ix}jAQhUtwC Bwm:kϠ0@=a?y:#(n\^$*I H.3ufSM($D*^S!b%NJ;^1 Tּk$ 8J.7c{Y{It (?\7A.?|&6~_[-/;6L: ZPXHDk LCmu-rB Ng?#͙ߦM|f hm5%xZ{6 \_#MZ1ܣb*{(ב'y]8JNF۾ 6 kTT+@9<}7>XfZ,勹sq,j,|Rn:5!"oPn[_ZK2 0fþl>X2g@Kvczc|ZmA; ?;#:-pCk^|l M m {v ̄%sR2J(86;(:(9)&dl67k3تV1ϥ\p3*R"P[Z2 uת'0DPN$cmatA I "!JF PbHiBM bllQ"˱t*h;IQ{:p\&ΪKY (-$P%:҇|{w#К7 _DAc[t~ AqΩ&EC%F+I 3KFp?o4˳>9 aA6IFbN0J/̊io3XyP4EP5?2 4LK mc6$w{?0RlU䶻Gc@D5;\ǎ+;>Bji?\HMQn^OhH(s%`էl.v`t/t?9iT(Rsv'c8.ZIᚰ@M&e,m o%@ޫi#CQ /8{ =ϼ."XTĿ#A%'F0[JfCB0e?OG=w9.#~6=ُ'9^fAVcǧBC8@°ܡ46&D0æD mE E&4F[Ch Qjڡ]PI Q *d )dE1eTH%Pe11iPԄcZdv}_S4ƎdλZy܄3DSEuD`< !DJa4֒V´F@T#g2684꘰4b' H -TNV0Z\ E5@ "n۸gtڽ?HmD6 g9;*+ Ԣ W-J`t `DO[U]'%Hlx@0 Jϼ`Eػ`Ue os™^\ EgF_[K} M|Z iKC+/ &0T=ZM0d>!z/xH"J#DB[^m,TnЈ\#i AkpB1?\haQE18ۀ5ZEѤ7eP|nFBG4}ͬř18Cd{@q(U.Ƒm/m)kXI УM aQA0MRQ͟n@z-ChJ\SʤDDOKnty߹U #2aIA |k B ZʬFIƂ'Zꦠt*QZ|i?ϝ:[uV:kUUYuظϡls-lK_vWO7SHy(jn{Vӱ'wJv(_u(8r쓀/`䈗޸@՝1~B?뇘j/nLKe@rd%Qi ՓoHh~1zMC  _gMP@:TNN"yx_@U YVT~&a!CLzY Ԙc M I $5 "ӹ4B_fV`@40֚Y i򚆒o[c:8)*Y3oWL2*ZfƿͳR(Z?[DȞ3Ek6% &C+L*b vZ,kb#jrV&P\ *Wfm{/쵚=%dvFh ɣEEm7 )u-^8^+&0(1φM5Wݧ;KM]8msӭr!{ 7<qkbӁn=uLgG! C*1.CI }}np$%J0dss9yZ i HhH؈SBc@fҐ(IekE/L֡ƭ2Z㦛i64u8!;xvrP3x/ߍǧnNy! үĥso{zޑҲ3I 7]jͳfڞ#%5ﻤɆbVnpCtg;$Ҹ=؈@r{.[t)i $(&HEb m~\W',hw;. }־wԻ6 a:]|Cgu97;N%@ SF$OoeH5l? H,5@]K{kxuZyo0= u\o>y2 oc덮<.zws]3Um|0p7:lARFEZ'L \M㜁 Ux}u)iuGZUY=MGsm+W.uq~:EOtmKt~RdLXȏ8+H48Ye1 0N 1h666#hL,PUXcN 7Ti%z\Lhl$/*3    x{ê;)P\,*Rya ( h@.)SC&fedBU*# %hHQԄdp6$11}g b>/W,y+ث!.غvkߍ؍U5hnKR4Zrie,O$[ [sQjoP0oWhps$bh8&՛R014 bڽ]A0IUBDgKG `q µ%CS\ofsP#,uhJsg+'IԦݩ\uVgJ*9I™+8m#.vjΊC.363p؁`*JZie/`$O4Yct"@Ia쁁 $HFA TJ*+ʒ`TadB"DH0Ê"kH#xI InE3e^6TK}E3PH6h (LWa'!P^#57m36?߁C# ͦ!B$aS &`@ߛthj`7ӡM  (8Pn>G9BAEF FCy=_b8i%BFlC"(#bG=p쭾S: 7EGE;ɘ܄]$"#$ H []DK HH00c2G`n AjXj(Ń.;m%Ti< T:Jf6#!lBpc ;K]h`jRbl(1ciK ~ Zh "R"bbQ $ $Bb SDOܹ^&ב0o.ޒ#1`lG`~ B=r ArB@ʇ $I$I$I$IM 9ԛ .|8T0|FH@#6@xwI5Nef!hб5Y!l!!KNe$$XDϮ-Gm  h93/MGzhQ@cgVD(^4D`&Ɓ 7 [QdpDT85CO1Crɞeq].sR5Be V%B]`G : l{xU$R_[n1b!-"R .|Z4*aK]btu^RTYkj`˧2fXٶ  Ɋ5g3^7өht mLA*ekhZa41"S#Oi-Zj}Ml̢V]4`- _dR&ye(4Hڕn4HX;mg_k[cB۠'x nb閒*vE`V]*˳/L1Z!$BFA I UP$bI!B2ۄ*^ОJg65y2*N8k{9E\Hj52>D$" EI$X(U $D$$"0H@%Sru("m4kVg9ujkvN DMCf}J Cma-BZ -In G1<ڐA߅ۭ2|b%e}s䳩( p@P_l -$e#>ɜ ( h.ڠZD}+'ccHrl.ؠ4c1f 5iH~r†?:W3ʜfZj ;./oXvBb03 z-nUg=DxSe58w@35q&iO ,oy5Į4}:u+[)0u m 1;WO*9?-<#+\EHx2_7MQz0x ظ=fb"'}t=O5=7~w^ 6g1;6DE@.ӉؽhbځPo'uVgyB7[q1b_$2:=͆?1聑r?)nD!98! ->ilRh(" {.C4^sG?pf4(L00LU],ֽ@>9Nq *i>i+yz8=`_DrvH 3/Fi:"いƱ괙iP&6Ey7ؠ{/SW{ t"1iÁX(^Gk3C V9.׽r}%G t#:Eb4,^u;y}89<_9ChC. @dH!'دseP@KOQ|:ZV&e۬hnV&1]J&O6lFh:[9 0\~!(w0&X5F]t?q% 8 =[ "z3:y=ZFq+n=NF@${߇|D]l $0 "t"@7 H4-Vtݠ*6pG ~)މgxpܠ?S+xgVI9Zd#ae~X 6A$ ,F UvP_I`wJgǂbB@hvahn5p[h/ IvϺ>%9hMw:AjSxp悉?.Iy4گ V1P[>{zthgY 8d }$>g؝HOИ\5\чs^5'e񀄂5|RR"DZ\O?%&^yzH/, 駃DQz;1€-{yiEMm".wj 6V&;[Ö/0ﻼw:> MJ  0tJ ~UĦ>kRn|T{C8!k(AF@WTZrC<xg߽G^h\B2 Qab B(#?wZFz݆V-Ew/RDW>{ުՑC JEp~5)iY N>;H,OеKcxt(մ8Ԕ^(.$o8Axq?*  84"G_cҐ@_qehC#UǛ(Bzvk:snS}^A@w } SI'MDb xFf`>A [5]uSD5- #<0C՚&Cn?ɑ3":P T;b@t8{A3eN">z}lX-s |7DC=1;5ߝu%Li: crIk. uFGg55 $D<@9A4 H Ȱ! I$""$! l#AwD[RШT-6bM P*+;gG~΃=b⏁&+jWes^_W_Yet2OcLZf܉`+@]9B?zw{֤/\=0% "mM҉:i,0h& [@{=F\8L"Y B:CfP& m,FTj88KA|8Z <|l<<}:Oy䠀((ve _GJ7?c~ {M53܃Rp3L=i~X!Qr@1[4o-?㵟tЊ2龚ݷaQ%כ8s*oՎZGj;};@=}x]v;; j[v9@OON8D%}kSAXB6[3 ˷ƾ]kaDs!Vwv&iX?OFrpwUdɴh`l)xKȇK&9~v~<M;S-X2P ( ~? m:;5{2p>;7Rk݃r.^r dd B @;-#UwznO=>)ې0DGI޵ݸ!?~U4taA`hqD;p1R JF5o&'hF uVaH~<7 *kO_2gpqB tO{Γ:C)k"7i:{\H`e XP-czz:7h[ 5wMk`5ջ7drN>/.ep;+V:_=f/)}Gr~"q2|w5N]W@K F( Ap ,ri/Λ".BMKP(FZi` }hz{谧y h. `/^ M=2;lnj^wD@SB}%A@3 뛽L%G=-BDx7ZLYU{#rbaL5!85raKKIZ˨8SjQ>z1vv;=f42Vڴ5b J?wMWr*<We^/`t0tkdWȅxA@QB}:7ryb;/ \C|@Ađ"!RJ X,y$ ܴ@10iy̢L-C,0xl0Kw( }aw!"cJ@Qc1`X!G$AmHk~Π8 dK"!.C+:bD,nIV3hnJIF0bf *]3CS8NjΖv0*k2!;3M$m?M  S]e(ZQF$cM+ןK 3O-"M2QcL&;aH܄m)tRbthm[Il46܊ AcE##m4-?v;#"~ 2MXxj[hQCdnRQ+$E A'{:Y`cLA7+ mjP.{W4PobY챈!֌~}l-׈ڦ%h7Q̳ Jγқ{ y|=nm޷K_SW𱢺,c [aK oY_yڒXoź5rd A?A&5a>^zYQ!;2 &D"A,J;X}gC ~%G]W׼mg<mmBaۢBOy`u/7XdA  (8ɳkn-C@3b'\?.7Ľs܏!v^9ƨbI7:|"o=LEDȔT <ⵘDJ<%@^1l/'jwa;8#~7:|!!>ヿ NS侖0r;Sz" /횐V=rEr`Ƴtsyvŀ=ݗ\peu{qz4n xg=Fª jWPDmvaFSЃ;m榴b0OP%;-s.~LǠ`4 Jz$-' /]e-K*Q iM@I!np!\}Gm/DV{TOc{GUJ^mpscrvm@&5jRPpoaQZu`x~\fI$a1DĂdQ@(000H@`BI$,$c"!DbFHI G 5/҃" 3m0 !L`n;RfPÎV84Ul"$h21j$$v,2_ao -D$ |2Phi#Pv@0XЊ8UT%9EQ)ET6 Jcf+ 3.2!IDу`mdk.b92Q5`ő!Q@!AMY\0#"@$#uQP$38Ù\R`L$\P$YM;VUMɢQgơ~3glPWQ(QDncL%xTŐ(0!$&r``Q3#T)dLh~U'|`5yk&14%J$]B&<jZpOoݸ]>iz U( @VƛBH&r!"! y*0r#VLFQ6JHU$SU) TU5PMW72=6\zJmo8([*RB^ȨO9ɽ!qEH8FϝEU‰Ec"B!GkFd \wN7Ìԋsp0KPskErc PM9eU[ڑ!$AAtEd4y,KPo_I$&q*{Hh1RX ;~PoQ! :&{Y5y17=M D]EIY\ԗVRPUJc|ChD HBIi Yj1CYWxyE9I;}_=KJD Α\u3i+$(&e*][A<껆Ft {Q;4 ҍ~:i*S$8CZ~j*(d%OXsWkง;~o \ e-ؗWt)e|Ix ȒO r2ZnBqӗ$&A : _Dח.i i';0OD;2!S){^ɕޚbGk'2]Èmc3iQ{bB{Ƥ k#^ψ2_>tw9; oK6%U {v5-̸kjU2`&̰ѧC!K//uO͍hy}񹹍<@^J5`cI+6 +I Pw,`J XU]!J"FF c0bZ5_o9n{,]S-8uUcO_ܵ!XaSS_vq԰X妾(@'i "vgWD7B҆pՊ$oL(uOzW3jc>| mws&M!KJ5 .z9sOC;`J-{[Q7š6ps?|"qd\5AEx.V8]/}/u=NBcvz)"cJ @x,Ģzf2APB+?XX$|tWt}ޜ)EW79.}  /K1Sndn4 "uc#m8Cm$R,..YΑ?рl%Lx!Pa&xsCeƢ KAz)d!L@.` 1CF /ǫnrA~_ Qa8&j@M^,@\Z͊Ta# I,V3Bef"9DEC*#}[QCq8^+/U `;2#r6+F!'Y}c[jmRZJ+8-ÑeقKj.iy阉+K)F&布|Nl=?lQe?f&Ȉ*"]eS,)߻aMlQ6k?P'BN_ uCunⱳKA(|&}qh/`!m_yCW24$_Jʦ$ K#R3*)B\h)ǥ[1C:2&8P E.VDtAnK H,FDB p~~l0eS"tqnV^(FH~XlcȐ«.R5 _x0XBdqf5Z7|Gnǘ/B-%WX%J!1 >\QކR',+G z Y(1X+Ol7Գs՚XaR"ᥕ+[p( B,OG0s!^y$;8z kfo_iHYuݽhƎ:4=y ?iz03,]0Tfz(vTBmD_Aahcp"緭qh9) /W~)L%X#E??}'{tX-/%mE;G^:z?Dq/2rEaG{hjEFMcW" K2a, 4w5gse߲.r-q~.bOMgA#Lo "Ɩ|[&¤N73Q:vX-Eպ\kibe"e7ER)ɺ6U )$OIQC#NC&:540WnPee-P anն2UECD`m0Li\B Ž!0rPF?`%O5 0w(fῆ=rHj cu.R]^g"& LDMNaG' L(pȀUP-@gu_$PhRt>g}۽Wzwbs z %0!  혂 # f7K,KRuҕ"TAc; 14}|eD+%7ޘ@( p[vhfTTQ%QN2C;U ?D_"8@ @m4J 3!1 GDCE+0 <$_8szJ ?3 |])s=vp )J((2Iq!ɴb!pLQ`]&d'f2򞯔>U'8O?L>YhI$#RR Ji4I'!^J YaL`ZlbBTqG1k//g}bOܰShU/S0\j*M-nP `bvWL `0Ӵx?FÂ"4V"M $!MVZh MEi27  y3T. 802P8?pzJx~]pD_So=SCúaK}Vcb>/I@vPa` eR/b҉ ZO(p`% 2ю*\peOq+d38es2VohKtHPүM$::rI:q`DE: 5wl5W` p2"q}(킵E҅~Jkg!rn9Du<>k?P\ cPSxB\[7 a8{6Mbtqs_|hvi:TlAq= i_%"l; G W`3:y7hZȿBWXA@ۦ*N"x;I8n\3aIy[ɰ7&sXZlqjYVsG^lq vSdyZ o8\q'd LzW"V 6忱nc#OGas8}ta0Kf6@da61ֻň:(Ɂf6moQu,' 1BUcg J }f. ᨥ4a!Pb+:4\%.9w-7#3MOv!kk޿S23NW_2ncxݞKWfȍTujQp{]Ѕh׌6u_(gQ4P vEf9fg ydÄ?r6FB}F|ǂCzo66Hc C/K!9rW77qO?rpɡ?OZ031F TA9T(2($L ҈\1) ;/,,Ql"f`F-4. P[!P _SE*qy+4]iv.Si* 6M1 JH튆'rKb(Y0L&@Y .tDƶgZ.qá>WfMP$| X",H $ @@"!Q g!rʡ,iPj+k܊p[ /y-`ο g_wp0*_.Ǽ/@Q /<$6<o? ' #֦A4!d$EB,)+%#5یBvq%` 㳁I!"WJ'O!D(I$I$+^Ť T F mAW`<2&0ģg|d%㴁 h7c|[C<' |Up)B,ڔ vۮ-Xv6(__[HXW h "̙T.`lbuΗcog}Gh4"ouncm( Nj[ ,Ms vCz`"1Z]hB/dv25O:Ɓ:i(p.XuNlpuT9Z52*jPNE0ңJK I Re*m˝YpvvQ(BT*_Nyg $iw|T&9(ݣZ Gɚ1ALGC0hYӣ~oCqd%޵F jԒmh  Bs{~J_B"v}4tsnjؿRJI5u6|Ϻ-H5<dVem=Kn_/xw71*F=s@2U(=VpM 7 7|:K# qʧ/\홇J!3ƐƟ}8]993|X B6J=p:笟瓛.tuLӖ#ӿAe8CF$|7v!MD!0 ҡ*FI$dFI$dU #TBCDtAI!TFk]E2BFFמ9>7/}A d991FrSP9%'c,AQZVr@U,U,>YbHZ!L_1aDcm2 40m>-/vb`o6q" $H0$ĩB$%4J@HI$aQP$*BD $ $P$"X^U%`4)FZ026l(#7[~M~*Lkd8>R }@:Zj~"r(1Ȧ],4Q= NmJ* mb\zz"PGl#mm4Ɔ c镼-JۍXtD7X_+[<)H @†barX9h<5J΄](Z\dzh!;n.^g_0!;%R:w Mc$rϷhV(zx<<?ҿ%r7Q{dZeqD`qQ3Eb:?+]' 30! %='Jp]BNU*+^VurB#ޘPWJwM4@{z\0bB2@w}1Fd!@(|! *0 Gf B^azBBBA(L2I@^jDE{ ipJQ $"HiU,! %B-$(!0E2FB ܄DdILA#I&I_N0i$5U"'NH6/,=)J(OB2jgrJˢ!6& EkYZx$7~׈#8}Vni?_`Dz hV̠E`>ubsEU^679AB>[:A3heZ#fȢPŲ˪KH]Gr0cnk~?_MVVHwj~J! <;;$;ƧpeШ7wii掐+0&^5n o㦠p$WhR w̘qM3A Ά`!xjHDASy6 cCED$3v PPu(\x97gN_jKX.2]זct#[[(C EFF#$vg>/((P#+ m E"_L*A^OÂݾ^5_aB`Zɠg/DJD=8 ߁vN LʏwM:*Rw-㽟hbC=Ūh@|YqzK=zqpmEβg u|koZ֪,BQBa\qV #S%N+0^xA+hفamMhu9QQ`5Ƽ"m 1DD.c!|*LK4cu5eR{8!4M!o  u/Hm5bأ BKÆ>ooejJPRhеpfk᣼۝%M/D4HjkfLa:H)z-]$a\,F yusLۉ[!별4ٗC1,-k:QQmVpFYF,0zZXҾ&+(4W+0MYb56ƿc7\t?x]ph (HАuM"$mzTm!QƁ3H8 ϖԌW%HJA-SCBB4xȠ5 iUJq m[-"[ikK-M1~0#FVFq[36yJo=zۇlsJ\ږ-`(sFn]xo5 lS)zH^#lވ)ZifaJaІmnkX{c!TRnT)gwZ9krCQ-937@Fw3l%G@^-komxﱨ^ z<:>nM?qӍ7$; u[WcM9 <ؙ fQw=Ʀ@?7Y~q}wٜ0ۂ7fKyu+VE+;;F"'=Uޖ-R0o\p~m4 WZoWJv^ϴdRCI[`:7xNH]\0<7W66gHYpВ"6c Ok` 6smL>/w"$ @@"M[q>oPHaɝO_9gsfgDbDx0xKj/2y*P`6EpJ: PI~8+w Ռ {Y*bxKU2cEE/S>jTk*ZKuQ`MkZ۫jka )i,f?_B7 # kDnSe "ˣn ËH y&%XR 7{Xz HkቶloTКM$ҳVi6^ a~|be A8 ``pC,3$YS/%^00HcTr${HP!B`@< PMuT, m 188 8f`@ ,< Kl$ 3Z)FFFPTefGT .7򑿖!}=T'`}$wFth lPe^)>B:\![9 6Pa67Q#!brѓZ֜Ɇ^VlLhg.owϷX',v;ûCc{\τLp4̀óFYΤ kgaV8K[K;txf 6,:,lqAy3pmUh6/Pէ}/)iP_$3R}f^S *b\+ŜnBy&^v΁ӫS?^6?&,luu6 ɁIY1:rltj0O; m*p*f@S MlS1Bص*Q8U9躱i- le8d#%'*4σ* m1iE8 5R8P׊(\KIP@玶 &:7XX L @88E Jekqq1^S)&Ɖ% p.lL\ S2tab,FFeTi)|wa] !eǂ֫k2 (Fۗ%-+3=Pt=owyVZŏG0ܫtOdTm9^xu .<Ostx-{:O҉rه_>qW#wU#))gƁa<T4Q^cr0?˽SŻ 6w_ϸؾlX gdsPm+aWElj ccy6 pؚh,oSl|G$?P6#@*_/Ǯhws٠ F7L͗Ngm.H>v3m} 5 aelOzL;޾eBU& Đ"\ D/Uj<"R  @_\wi[Վ><=ὖC)T˪Z; QI2PYCEkP{;|0Ye q/|DDK.„/#*qw`3S$Xme;!]Б =Apm[Mtr53L#A(Q#A*HHH@uND⌒4 7pR)L5G)ؑT1ƘbJ1DLJi 4jxA:HUA!R`JW`(,h`@\XIB?Wb9i;W=  g^}Ďi 6ֈ .Kz+ ( H!I$RB@BRXy|OA^ pG޵(Cl\f) (C#^iڜgػD 8gN v*lhZ*0n29B X4 vn[dAzRY]peF|C?#cpg%2K־Щ*IQ3b4m3 )4QJj'drl{ Zjp$ MSV Ha1Sje)aN.F=b59pb+xSU&⵻_G=)wWI"ʐd`,ė &j}{&}@6&z4Ȓvۙ1XnI 0 N gui31k#Xo9|`6t1q&Ma(=mby7w8iӅŗ ΨQ-s~yIZʗ7;Upn/:2wITuc[.?ةűX'xu+A: ?}ܝlʌE4!8Ez܉WݎD^>̞Ẉ0-íWu`[d}!+$ 8<$O (?DxsZ|݌ ʼ{f,9-9E}&W'<g!xkt yqS<ԑ*t\B7ax3N AQ8n9csIK2f`죝kZ&(B=?Exn?m|l^GܗOA_\J\syPeHMMs1ሀ\rqwfъGScoU{} wc ?fYkl|cᶈvG'ACQyفuRiMa[Co8Y+w?C/ў9hEH UcڈX#y~I-` ?֒#$V=Nqx/BiHmhaiAH$,U cUlg߼C L}?aTn &B+zP ՏBI$@n {FZKWEE0DjY,0AP."$K@@  * zL 2pz8E`(F(APGUKE/GOCyg@wsr@Dd ᕨD¨m&`s J5) $&AwQhy68XȄviw /PzSc~Cpa*,qehApcC&tD%J*$P`vDGZ ˆ Y2T=w:hzwlGO<䢎_'`9?n2`M3LRpv 8Y~Əчjл!m5BdWt@3v>Jl:i j}aޟ@iN%r]6푩g-*ʤ|}PZcE-va =Nvo 2{?w?eENG{|bfO~Rׯsޓq6ηW ?J (Lz\9&NOlsY>rϏv.scZ9Lr$~roq')} dh.L&D蹫vmE.膈o/{{pqk|G \:%S_;0WқNVbO[/]jCS(Ogퟻ}G0b`֘MgFaSɥre7p Cϗ›F3*h7 Y@}ɀηx Tݴ{>gH*aFjF p7zsR+q/,_lQѪM"CJN2,%8YS\ޞQl_xBFd-XՏŜ(%ٙ򊥼JˇR GSAe'9І pZS33 Xivgoyw㰢~'R3oNyq>V&̇lIa"HxVCk-.AD)C%D"`h0ˁ 2߻r#~uԙqW4L&ތʲrGHoy т`txW*:b-'٣g3a}!V "bmkAdimJAfi~(/S*RnBv|R@w ~L/ѝ&p&#$$[4)%r+bbM 4}+:R7e #s~zMGTv `4 Ce~<<7=NOS|sop"Q8w\=_y>裮%ƵNwʌ0{-@6v'8L! kz7W&f@N:={F<Njm AȍbTԎ5ZzÒʀ61g>s튓{>}.o -aݣ< H{iΐ fl㘪.]W ""qG#)L_}| C Fl'0:}4 :0$=JVFD:L4:t2G -"$Ӕ^ >m: ,$w ai#ǥ$G@Aw}>19F+kSXpd{nNXp2Aq3Kd~ˋlH@@!GG!Pp=W_C_ }+BLKSg<懨*8ũ}ͥMUT6Rf|9M|ockcc94yh_n%#8oYn 8PS ^I0 w;{93jsx*Fl=G侁މhNCORWZrr wN)) vrp|4E>*zsNa>q~/c;*MgiQ^Y'k]" Aw#CE3ٚ5vKmN&gO_ h3! X8w\FZeр;= 羿2'Ӫ&O07~=n1K0M@ ~I}ʲ7&&K\Wc?9JG ߉W+ě3L9YYl?4%>^dt<|5*;P* 8+; ҩguـzzq:&U1}fpME):D7p`u! W@jLH{Q iTLf_. '@=/zudHB<\i8/+./v SMU2 iB Q jAk踶 a! DT2 76(h tƊVk?La};nAm ('=SV$^d*p~}`%_On kܻt<*m\WrsrĖAz .sO[c+^7H&2j \TD?#X_D"wyo3A8:Ʀ7f __isL0@t30¡ar*61@UF'hS60*1h]+.[5ټ$u缗^&x1q /znW]???NR Bd"ύzC;8t4b@I#Ow_ow`7 0#/ _б35ԅBGmmM@.qk*_̀۟?܇2A, IK%J)h55vc4}Z_^8~Ww(% PU~o+uNO_j{vP;ԔxND7D/pCfh &7vN0ạG 0̖?E4eh>Fft,;.Hr4bPiJb̎%{"S2XgHO"A(.*[(Zi09Cꝛ NE*g83  F>J!ƆnK#0lF)1DsXO!P EQ] 6$euǽ76б.M5;8@7[x[Ǡ<(0 PSbF8yT`|pY،}Գn4P(MJ9MGoÓ 7~PC^Ќ+rlzYVVwyV(qnMj/O҅8s .A}A,&iR!Rgâ8Mt<5~WCm`ɘ|a59Iϫ7;X`a^^dˆ ҙd^g[HVpo+>`U|عv8Ӎع|n~nMդo$iJW ųz轿x--'`DA*xoڧ(}6ĴT]L9 @`Ͽ(a_9)x&l}eKI Vmڦ^sg9+t)*wXkR@;* H܍fu^6 ϣ[gC̃q-SR.9|ZoYa'pmhGjhXK5g7^'G$>%VL)wۮf3@bٞy̛l3OkS{.0i}n-~#],+(>d)10nvћְr;C֚uL2 :)bsh>_Uk@"rnda|gmf 1o=S @#5?n _p3[pe QDZʃHo @68f`(LaOѶ79ۄΉ='X$ @4b7e¢"1/GgwDV~cjXwyss1;UG,NI Ւ5pZUFThIi=dr\_zqm- ~{AW,PFdYPVѠ:tz^X:aWk7MQg-VZ91Pg6w8TW%V>׶,Q2ک,y{W@oyoVy%1n=\hXPSgios(Ibg1GQDa޶(H~sǛH#Ob=C>,jطf ـey˞#v, &=z~RӇ/ua9P<_͂H"H iּضTƱ~FΉwS8}۬QX-L&_*u<է>C}l:v<6i-%1]Y̿Leusiy*MͿ8ܶC0i8^Nj^HQ_Su:oװ3sFI0l^5&H` U6ꁳ5Sw&Y`: \0aLKf{.Umذd Z'z nAv: 1w>k6pvllzb6YE;-!|Kwy1'y@ a(1O?SD?s|ݍd; X"#P0i&޽㊯E۟|^&giﱃVoae?6k8k~F6'4,'S0b9_`j.p5ή GHɟJ?*E)j eJ}}omlb  @9|G;e;rbZNW{Rz!84pvl>sXro,+Hme2!_!Y{Q3GC G_~Fp8)PР^|޳'0i21u?$WRqD"f|J|c^=V Y~M 56*\w{km}>jpX[lSqj B-^Y,[rr|ޞ|˺Wltr P ] p'Iu( 5)\ù"@!Uͼ I1:H]vPz8I*gV8Je0b^"]t *]V<S6Zn+Bn^N7MTV lX(˳w`śϋ%0c/Md,oBeF1H$!?'VsOHZ%gNm*8f_'DC5Rǽ0Ƞ<2]kEb#>_B-N]+p Ę;=^7ɘ8^9(q.~r= =97fSe &UA Q覞pט~^v 9w$(t|vε/;9:w3Ut߾^t ҏ2DO?}I<ҧR &I!E"@R ! (gŢҲ aj XB0`k==9ˎCܼ'?xhwkd襋-,)ƀsR` DAa@S j&/>oc,Sf"qteq!F_xZbGA m~1,E(=w k@c q k0}l,r&Rq:a`7&dN4}"eEӘ6&ϥ>)O/i~!C] jA7RA?X_A'TIeԙf 2R)+)`+ [ltZiPH,\aG<7ll枥iUV/V$xz umlFMh A1`.}+N9jH)8~s+_r7{oSQ%Ez_E{ie'qչP0-vah,D']n]B: 7+=>[VNz7Dnl,eeF ޾ӖY."~W&D+G%/o{ Y?-۟;k'6VtqH3?"a6~UJ+ ehMT8{!߶5DG n͇`{ `$-mSE niv947oX /~y;]E!\vau{?{Vセst).h;)DhYuZf04X>a焢г7:d1]^K ]-w2Te;\i̞G7 /8\998Kř@XÌH[^I*_C1.r2O/*$rAƇ'dhR8"ebvnB3ojRiI7"Aʰrf}|̷/O=QB7;2f+)qt`RӅ6>3#8ج𹬫FEXKDq8{ #iDW<0\1 5Xxӑ {֯#)qO7n  mh N9<i¯a-V6ِ㛁Ę., n>L'{UW~a0R`A>B5d$ ,h6H_Vu*(C<l>g)i< ,Lqj%[d4 Ʈ;@}hO_GIu!_j-~;m v*y.θ.$ y;72]]2 _lڲbSuh 7pe9BU̥P;־-ד+ljUV@PR.G8ϳ;0})nܜ{xFjVJlQQvLJ Ey+H n81~k4輀 &˵ׁ3R{`٤a:_@5,ה} ZM T0"rk،H=y(@`p'uqS 6vȻC -)GBEXCԴ ϯAQM{ ͞2DaWNWFsCatݗH~HjCؓ"Q2ZT"~G.@^@Z֊G+3w`7 /Av LŽc8dž:;cmn_VwK<^se(Gdcߍ]k%r]!7[W%6i\'pM5IH@U8Boq~C U\/Kv7ec;v0n ]ScRa:YLt cIlpݩf"z,GBv{}lP.U"_P颬/qmyrd~ϻ3tW;?]52HW)tj.Y%Du Tk (\*=1 ;E =E ,#]+3 WkA)&lvcab wߙ?..-Th5;zityoGڝ荈X(Z%R(0)X{Uf{8lͰ$hťΫ+dbH;3.BYwкapge\2HO2j󕴵(6o++1)4Ebk(l!|θc#s̝ pAA7X78ܐA&lA' # l<lPN:$avH`|CJ]v4Xw^oW+1yfӹJI >F46o_bRx|뢐a5V)f. mfUB ooB`)I}?XusBՑ={GMJgG֕{hSN\ɁՁj2x-!l;떮.8 ?aJ;;).*]gˬ=qc5`~KӘi XEFR$y9coqQgTֶA+J ۓyT|r{EPshB/f9ޔz]d2qC{6(v?tF]E%~]|+0=/BT=ɸ#i^\tXHcS<,|Kﺰ\;EA["}ɪ}u[Z{-Н2)^^KoO7Uu"lGS fz K,l]ǩ`6<|HNWnuWzP9]!- \ُj}ߢJqVek3;YЩR ~z ~k5{VçZE}( 1!JpmG ڗc3 S ռj+;&s9Y7cy%TuPs<* *}AfJj$ngo'b FFM%4:vR>V`v|;fӫohy)8]͟ɽ]b 08s#$sM?PH;DkSg]纅`F780 !7L y,:ԇ ,X6(tkBܾ+=zi=;WYW^N[14ܰhti#q˰{C’{m`eYR>CAVׇmKBcXھTS=zC4M ~59+ԔA;g]KҸdDLi?l2|B [ 4q {wN%_=0&[:Zjݣj'F}x;q.j=Swˈ;Ƈ?SٱtLeן+B(Si\&SRD !QZ}$"rs gPJ+Msp=,5%H<|v*47LTLgq=㦕:$@$0i 7օBg϶ |z{j<,%:pgU bӈs/XJ]CJ !Ws?H> e_KX XljR"! U|f]w(0r Zj4$+W!ݤ+{c䔙'Nd6u440g@"Vu-TƿC+{֋)ա<`|T$`T^+0""՛i"' 4_ݓyޢoBZW45u/B鮪.EG= ˻z369h(&6gXv5`/9 8=r:!$+ZBX-r"m8顀'vz>j`z1Y}TNDuxkKe3) '}LSۘuD;Dc(N\晾?]VvIuf%-N7;N~i?1LFŻN7`SXFJ`"+9#P@Ԕ ͖x/\oc;sEQdk3.c{NZOg*wpnw"{yߺX2p+[\r/M}yA/m TS<aA\3^\yöEg4y:TN|S&_j3 `dl0TR!/BOIetDgo}`8Z&#SȊlmeC/ g邑UD+` M%Y?* 0A/! z5GuA.qzM֧鉹ˀBC齖 GG*l\Xu380sdπ*R/uFSx{CUÎ"Bm#7Zo?);֬&g~CC PC Lm|ݼYg7Mznc*N:s_O;W÷ RZ`;k3u7'z&- .٘^JbŖ{Pwy%s9k|m|sׯc;l٦v?=/a[wPOTq\ZNw8kmɏW4,k5 C' М3d._ҥ2 Q!L3]*ti?Ҩ}/yX7i0OWxD+97ƫ+/s~ESMY/|3,vxMr&\~vw$C>2Z.,"Z7Y)hu',x{`b6&{[ߴCb,wBfGST1D"K}9踅2@sȃCRgzb 9j o6۬V4FE9BNC_у>Ǿ8=U380Umm3gc3kfn8$%vVoaM[K+&44&ϱ|+ϒ ~]if>Vj#cO/xwcÞr(?*ڕ+wH"E|]էQQ

YBQt 4.}J$N# h*i='j\f Vf?P,!+jw`JJXlu΃h y̏;06[9kQ9ҁ{ F>a 7zqFP67)EQ~RP-_-3$y}Qbzýk@շ!2݄,X/UPAATǡt\v0ʛЦZ6 a9z L Q}."x6*h.ž *QD3C|X'Z*q/>.qtK(``>ZDGWG{,by9C k8*LXfu!1-+iy^hkEU9Zc)v$ ֛^]!wYiI[*(@0o cs ~<(nYJm}P&"/YCdž/x$sRwhMnˀОm\H~Qx Ӊ鵋<ޞ=s%] S`gh\ޭ3}(gRMBMgB#4k85`w~UsJ2˕E}b3˻s(gH.R;3lf $ݮܵ0l9U áwȢk$GuWk@q y#l;!/8هxOI$)]n1KlCH4oqwC.uKձ}dTIhgW~?5ɿ1nLˠ>_?Beh󞅢|lԔ{w`2~PD״O B\$-x1d ZOkBpoN;WNXܯ=3vt.7:2S*ofh6b xR&Xbm7mMzɜ6UHߒA/$ ЧPQϒ=5ڶ._##v2Yo$sGBK`Vg)siKW8T4`2@*l t,95+_tm+:4BH(?vҽrWfӲdl_4p^{iZ0N?֝UrZhq7Z4tȋL*Y׷@FŮ!8PP=#G7s&Nܮx֛b1u w_&x8\SNx-wǨ`ikX7|2HwAT'pԬ!k/cfh (lu.$ \Bsg^wh7Li|GH `CӖ?d>_$ {?WO [^;jdX::3YyLP'vyv$0{,f[&yTayğqyD]ԁThw=bNAco | vz d_Yv@xTPޤW'郞?} is`|M6ؤr;'$yW+7}-0=՘]'gwۢáhhv۩)q{JY\͸SgNVv~&0%/0ѕ>Dcx6ہ'uY,73{'G6Ƀz|bA:7x&!){ѽ7LWP6kqeer*G~GLhSKM7ղަinT5OSօЭ @:UErq;~{}ýa>h <[ "mz$*pU&nMFP.[bx `> WUj6?`ojUOvKqiu'B抬awmAQf4AʽKedS4MG 텘T`7~,fY2)&vd]#׬Q W^ juIܤʥJE ?[e@A(x`Qu*ZC*/QyxG#"gVyx@P/Muq3qX'W s(+\KF,W$8M;s4~X%GS(ޙQyPmLJ^] YPq4oyoHd͹B?m7 S\=DPow-:X^899g+.]IΣ`V0giǎ㰳X$uܢr t>g[vyxВ;*HS5!:r*w/\ZV wiedis߬8o)I_T?0 "4~7hJGC|rM;qMW7Qu=FTdY"@;B sU 䣾/6D8_)[ŏ&$%!Gm1Lň1Q%v $;K}]UpLU2غ[/O2 M!UO?x:* 5ͅ!杔9" ; Y~Sjitj\qW[fBqQRHmT. :Y%3Yp$/bn"jwd߈]cqLz P Hcei`c/xY2'qY1 7ei@i0erfQBhۼB3Rlz:勖N1oD%[ZoE}4<< nIJIx I6nZw`7vBEj9mA~iۧv#T?/ߌh%;Ց>}2v78 ~L*o+D 6dqL<}^b%;3pI1tA<y  `>§7y8VH# 6Hh3,2YgLoA30D;j{D:apc{L}/cƩF|a ؃pL63KvqKB*pR33"4B coMl٫SZlϛx>sٮt>Aytr+`ƱُFV؞Slhr ` $X^ѨH`z>o&`K7zwK{?QA&xݑƪ.#"QA;h0{uJb-nv\@욍E@b WH\x/4}W~bx9>td؂"(š~GެAv sE) gP@S}v7J%^Nq5fuӧXO.80F5mH{(ln_@~L̓iޮ>w\]tXT^J"DYwg$WLA2`@T!`piU|^ KG` "/_V9{|Pk OV6>*0O{Gu6,1Kk}$ /cm3=} X12OK?mVHCH`6A'P\u)ؐ g*T짹jm8hM3kXڬ`44e'j/0ѮL 6WeHX|nZ%uON|ފyc~F;h[K/RU'W ǟS6ޜci=^ݺ5[`@ǔ]e V^0qTz7'(q5*"=^i#V\MǏ^38r~+Ze $j3kIϬ WGkh/Sd-N^fI S 53f  "CnM o" 1jb!M" |͏^^&1btMͤݮR` Ғ%3`=ArMlv87yTqRi p!V6\o9vvlT;-/ɆVTA;Tt:4fx[򵇵_ma^V2= VLwqT;#`\V<YkE!+;w.5 E\=7Dnȏ`/h=9*̞% qerR`gdV&1KjM,g?ӡfm/gaig@~ Bx$xaf<5gÖQ ;(9DiclGA{!%Az uG5G< rtrǪ@vp=ou;,A`nŦBClPkٲ*4Xe8@b(>_<qɸ-/J&Z ˝!%wh wS"fC[e{+ G_eRz!>inJwv_8¢C<ش;zܗə$D,'&!]2H9,+0)5YXU72ЗYi\c?;7ŒA%?۝=w8.G`.۽9nbD&ֿk%OA2,ok"yc?o)zaM~*)fc,S3z:[/,( vx㷀ab K&5zvy<IT0=SZ\Ŗ$vAsӵfo(#&݋kt_ۢaccIC-V7HTNǟwwftmrLu}iX]bqoR|b*gE ;O<H?ZyZnrEUJ)ے?$Zq䀝HÃK<,Ԥ{d\l:\dʿRliJiuFrtBk[`},8]._?6Xl/9y/hc]t% e.%Ū2p?D>Dd8y 8XeT^ 9t U%eJOokcbٮ$iv>s9ҟ8|=[Fk9{#fR-ʕP)Pװ | V"м5I2-|/gK0Zzwe 5}ileSA2'Y̧AHj\aɓ!E M}HE:AY晒e'%:Q򺻞`l.Rr"I.ԸĦ?|X9ɒiF jMO$C^x3ѐ5@ ErkBU:d˜?lI7%B/y\JqJhpPEe̢g@ 0`2} QgYx|В+3mB& I=,K8 .q>@pҔQP'>q~ݬ.^Bzk;$^>T3|I\3G]Fȸӥ:w Ȝm yIX_S NOk殮Q>i yܣ`4-yvQX|k<Wb 0}d¢ =6inmp|=|;50 GeLL]ffGߞ;pe sL9+I0;wGJ[]&DFX;B seә#>Wb)y\E@=M`7iZ qb1N>Pѯ9H%0g6|>vM0YAPjBRoG\KFYۑMB{ 6`6h__DnSUVe Y 蝙l@)F_Y![k#FJRɂij?QˎARk--r:"Byy*TVGO\>Ιpuo8(,s88:g}ߩ^aSYHIP6Q@5)Dm)Q`xC9dI%LS;.M|VZw`H s.-"G]!ȕA aƦK{oS9>< Y@/i?Q7:>PC PF;V&;qi!_!cBJȒTH" /JNv8 'C#Ի5>Mqx4=w3L({I/-2bXng{uRc A(fAEQ*l3Dd(xw %ܯy i8riU~҉݃G՚lGZXT?c@'#rF=_k}(eخo `RZ#LϬffݷm0x"ةx[qa["G-v!Zחqö# 91v5ޞ d'9 )6Ԛ336à@%* 'ognSS01Z%-BLW%3_>S/#B/sy48)35P󾭿?[x-`sBK X$C `dmbC1m8OHWQ ~?~]"vu-=v yBbo gg/F8{PWQC_sezbpN@I;_('![ Hތ ?bཡ ,s!,}c=]if zaɚL5YJỬtP&},u{ }3-c +G%-̢OB39݅DP ы30Y- 8"V.#jE`7tYʥx<1=z!97)B]i9-,~s9Bhr@<3-9m7'5S88Py7&%/AX%DY=nlz z%`뜈%^<lO3 x7 %(W]}x8 H8xP#[<9u#+-A쑻ij86dK|TZE3٪'B$^*(&rqMIp"Q.jh &@ӻL1og] }M.3W1e mh MjjǒǹFv)˛އo2.= s/VǧO?&Q?c0r5I:s׮c,ԈnMˮL3'LD Z]㑎1!(H!4`V^=lOS 3=0 ^^")rF'BmH\D!JjàA `NsNק[akT}!Bu!M7_mb~I@UfO@|綽LPk= /?d.9;,ɀ Yxmc 7Lȿ Q S<Ő}L3u?=1.H78!1fwQiJj% HKg#UD]g臧=}{Đ7;˨+>4BEGW?204|O!Z=%&>MOBy!r% fDOŬSm=J' YW^CQjV.'54Oq>VAGC4vX ]oGF\lw KDP%՝,XG#2QI> t,@ݵ/&E\7|/J݂'sF~|#.g,K8VPqu\=Oυ$Ex|٠̆Rڍ X<~I UDTv%9OoާXw.ҥ.Tc b ^d2u%Ea@5 /*un;9K#y*!y>dLJZڊ(zY<6ͅ7oTb):/a瘽(kp-\|!}h/d9whJK\jeֲ`+6ú3eNXxR!>hQGH}jbcj)+[9#apD\=1yO #  RI!gzы C~WO T;4?h CֱΩG>볎%ݢHpҎ'fo@t4:G.I:hIH@=Жz%/`"}%K_(T/bSgϜ٧=8iBbo=3eU?po?KO6&'.w$^ƨp/%fpl*3f4PI5Z> 24lvnUHUfȘF_[%?#hHR|뚿}]'vBG@}%>u#್ p2EΧ#QH)Ӧ.E3m&7 $#k%~25bME%n$ dJR)j=SE*ћ^)&##iM5"yTh(O,q+?rNĝjZt]18j}&Pq|GU$c$wqn+?^8fP]g>]G$6X8::iWtk`1\oOɶ{q9oWygz'ϒz *pK D)1`HIHQM^"j?=B I4pQ<wB`w+5L6I<$N^]U{forń[ +To>bfĹV Jej i>Qaw}z xT 11AyFGO8(lU#s,\D=MN̒2s`ĎxJ8Z ? ~b~QqKh\=ַȓv56a" RN;/x^N#(3(+J!P]mcuzј߳_ Ƞ3LY= }k_AvƲLb醃g$S~لMBk&~tRI ?O;xtq:A,C!$$U$Vh}<=8mBY4`s.z _past4N31d/?J@ۨy 5QV 7ujtUpQC=_]kڭmwWd]/P;J1g(#g|Qñ/NX 2'Tw)pc=/,)#u񷾳-< p)gB( < V*~A:=ڎ8A:<BR*ICI0vwHwM2W:9zL^ڹj)2Nv ǸdUAݷ7Z/?_ͨH1 R|Kyog`|G>xzz9ę|=;rω@\^O%=2ܤ+& ?I/z/%;Ǵ1sFNX~fmFu 0B$I!McO4O@\.gODKv)flGT Bf/R"´Gm']t&0D0."ty;UH ~.h)p)OKO0M*DŽs;uP"yfE 'iסkt⇌Jj I/nC^xOE"hIՁlABB^wph֔fqEnFHF4|TƟʩ4r"pmlˢ KG>klv¼z1cFx_4v<" H^(2kZ*6/ߊ"0)<ޮaU1w $ ~RmXM3nKbLW3Qۤh"D> ̀kkз~&0y@ cLO+5 6^b8 IՏ?jo`P:x0`ʇXpI푺krҟw%q腁"'eub"ȟyb T:">g:z=gB6O#HևTs]#$./Zz;קWg,-F% f~TnW,>3$e"qz!ªl*$5&yBɢ:Z<W >H 6x̓1{|o<\ T^X7ݺ}@b>b/nC)}ѕ{؈ ۭowzaP ~(4[8<* kYeܭpA)yxi!ljauf8FKΤqzC]5z 4z3-tY ^W q3j %{E٧j(d" &vԿ-q N71ŔAn WS6fK:A+H냵  Z h\5F> j.j b^90?:.A+7n9KWALwH1CO84*=Ju۶I:7u+>/T ~E;As雹zsw EcFJ_ \ 7R&N6N=곥/s7bbr@$;|$c>Ӄ2J |ܵ(ϛiQ.}| ao|gmR\:8>$ޣ:%rط2ai"Bc+ALSxEA'O=7BS۞uɇ^2Kyv'I˞DӔ?^:̫gp/;c:[t1,4wM5ubꤦ1ߠH>c8&M)tnD[SE+e%v82։ iUb.'|7B!tp* \W)/E.8L]Cjo~1Ri^>26aCYǭ&(ɪz 2`W1Mm&Ϡџi+d>j ,ax !uhО/}$i8 G_KErkL=&>6i4]<߶/oX<_%0gƅEA7uRpkH._mk]}f-7 :fv_V.\M.YA:;+:u1E}ga"AVh|P'/C^r_I_*0C#Xݼ.Kw-*n #c%R?q*TͭPy/_tgrɾd`)='nG}l`+%?S̓1a~zsJ4 ;*DR7ha9o_#v Lx,Cmi恏 *EK_շ9KĂyp:nu0l̼lPjJF~:S]^w"ƽY@ݲ d^{R28~WaX'{^m^녩@E&,WCKj6J!}4 )/2 )K͉$m:peN#.Kz(' 8!2>*@r8HP:f/'IdY>I"-ƃ޳*2cQ)+\0Q+Z>̒]9Jevwx%ͱcvHP  id`+>_ROil-٤NhvQMÜTHb)$|zSmxc+`w8fs3<)w ;ߜV]5-sbG6hA mG6tL~ HImƀmߙOLqym`YV@ \ǂzko<~ɃrFU=A ɵ</ # džsN4:9~D@us0k+c.\(H¾] 4A?H8W@-,sC ~/#Up?q1q Ja;U$(C 7NBѿHc͝_C@GO9\:;TR_Ngw1Y3Px7a;΅Cp>ꌹ0e:4̞{Nx?6VqF(pk0A)e'Dq'A&+4R ^I"[uLs()!|C:;`k ly|. w,3( BEŹC?Lnpm,p^t&'(39^TYɅ8突IV3|0uU1z`Xexr4ƆG2`n_"܆R/`8n3%*cH62m|MQ?0&j";}6K#젠iaDd-A_\oҺndaߘР~#TAKǧܱ؇CROMx~p, #/r(Y8VWE^^G{9Q9ff:m'#fd UdIћI]IyHo4mxSƒQu3*cdEah[':/,ʊrz)2{I?d>q iO\t}cͧ92CX\52aAKό1. A [i7#ɜ"/ Z>s[S Z̠J3f$(dVV-w<8RH%+%e`R#b#6#'*m?QϏ ':N~r*hy=SWC_ cɊŹIT߃mbi Iݧqgrj +̸+Fpՠ_/]kOM'ʲE"Cy"\IJ?mMzGuDF)V g:3K{:J/;z%m|v% W8h O]e ATICG HV|WK ~. n;"`_(tc:^Qrׄŵ]9LOKJ9zSԝ-QWֶZqȵ|a4XyzeV *ӺJZD "|6z^*ߚi@aUSȇvLR%}2zQv&&VYu6?\,uǝY}^ _=Ito/aN # x\s D~-3놹oYw庾a:Hnفp.ZI(p %-oL%)@ 3QQVV̲KR7{=HH茵/Եgf/wbTYVz?XTA'sxA`!x2}ZچvH}lMbx UJ^qӥ{18(}.\^kbvw]$Eqn)!Y \b&!LlDB!|G=}Yjd cCgqS􋮍iu\!,'9Jq뢶!^+}60{QXz%/#_rI݃ї Gbv9}߿J6\|c0}\87 w~~⦺Ni @S>S&vn CO'Vurwl锅ğ3mC%"+v * P,麷?|I<تQg<0[6uP縶,tBs/7يn´bB,THi[HelݪPL/.IK+q!xRߍE l`\N"jt{T6CﶔxJH4T3)OE:(dcg1C"y@)𤬦W7kcPSZïWΝ wx[yBpne=z//ײ"Rq&J&ÆxMeDŘP7cz@H |I  V?Q`?H[)ҥLDe̩頙i#r\׬T#K, vz( $!6ev\E$߹CN/[VS۷WVog9#[`|eOV*,Ƭ!~2c{omg 'S܈oV_}Л{$~%ԓh!(\)( T.zozªVS n|@l=NgGžu5˦3 ?'Jw i+^~`<\x: }V5(Ux~ Q'76C8s"xH1gxj>|ƍN%8*(k( ZG^fQN Z?t v=[QoIf;؞A"JPҀؕey C=7ҩkfd*yW.UTǽ[5PhÉ+=v``t"$ܠ()ӹʼ0~([ ]ďJ^1&ۿjk 1 ^ui!G 0}8J[$ѠX#D`~X+5=]c/xpQS ?޿(UEM> H >ԤM'N VtMoB^# Sy *@-֖zT+ǭȺ=o;alp~%Ю@qzZI !dF[ǃ#\:%aTY]<cÏFCɐ<^Dž( 0Sͻzn';IۜpEhP;"ϋs{qzpUu1%1p>~e箄%USj#ȮVu<1|` r+<(RkboUBo_g42/[W~L5?Jp] ~& ;0lp/oa?F؞@%2 de}\ò-xU> c[LnV;ux)lG &oʀIP>=\xRAڄPW\46&ݡvZdG&bc GN% 44Ƈh[zTRR&{ nxKFJ|Sݹ`GHRZZFeE((Yܫἒ&uvDky` _ZKUr=ϠpBZIyq[p1 Q·A:aACsUUjM~eHkh?i8tʐ H k2]Z]Srz2+Dsa֢p[C&6Qa&@aqXCMӦ Jw#rɗ:~Nܚ;Zl*PÐx[ՋPQbƩ6d$&Batgœd,bs{ dJ} 3 ٦Yq<49/mŨs,oJ]8xc{ųGEՈgҀ~,O!/~J:SSiu9`0VJl}Gd :s:5F9Q8rrޕ9 2\u 1ƃM[(,ֆS$lwF x8NA!!F8PӋ Dy}7i  ZxP;); 469y]N30VcŴzγVˊUaUw"ꋪG-5;R=5x>Bk!^ Bx ed:F*TZŒd(ÀC4dAӘis,}ʥ;o&Sɺa2~Ye!T^QF|>y ٶhua2ݣv?*j,$d<Ѓc|Ǘڝl=4iq6s8UX/5 9n# Q ʲu_ARwP>\=s o[NlQb$Uo(Vч{rp[ PoG0@zJ!BPmS73P=ٱ27IzjDK"tdC7ArT(nm9&!.-.fAE;\R]W/zBc~+r@Zuz{f?hMw/mX80~sʋDa 0 =Rks߳Ҁh%{iv~(=:~=Wɷ&=*W0TZ`+ENSC0=6P->gIzVk9Yl{s%_Y. Vm?f]-aimͷDijNufʦSt2Ak;6 ɤhav4D2Ծ$&r2ָ0 ݻfZ&?TJW)?gƗ!c/V1 i(5e)\D>]7XH <&< <_ |Se[SZPLfnZ֯\#dLn|yKk3KN)0@@h#&8CNnC=LG`U =0˒bu3fX1Š2⫢A:˓1f3BŖWVA-0zҶ^>b1", sbjOt{ Ap͚n 46!b1 .\!Aq1͗Wml43nmı֧5ES+c -mKafmhzHZd.]\S(tƹ<ЃWPYdVHoh9*g5M~]u-?;a s]Rh;gB";)Y 7܆3|7 k-!#TT8J0դ |v wbby+ !62w+_2xfT܂d+ѥ|0cHKo $J!?GJЇ2v5'L$Ag>e[v %aDdHV mr@08wK@ y{\86T#` UݏiB R 2[6J}ipMҥi p. dx=ckp~évl#Fs%ƫ|0r{fWQpu>JR J^jL;gL^ @)1T]~.5  Xw UP#s vVf:cl4@G4&,nB> w~zmmfpqa_qΥw\SmJpHlikrf>FT{ C6hCWSX҂z@M2PRACgDhcC)^W $Cs!'y-]߀x8> m`W5\H#Ľw.*xM/[/8P(wjΰ (rndE׊IO m-X 2$+C`srZ&lRhNvWss>I׭i (a{^Кb@ZSfg/0>7r,#_OPl y:^h/uGht@RZ*Ԝ@!hE·P(VYo쿇V}-[.TFGJƗa,[8DնiGy0sE4iD>BHh9IX^܄=īn?q.\ڻ1|B4eY5J۬SZ4,o~3J#dVE z;!ї0 , h?aAk7nݮ$Zw}@QF/1S~o`>H|NEhM' ꣥ õ>|pQ?2ښbf#iVUK@%2O҇ƓWl ɍ_MP߿rʍC;/jB^JҤ?)뽻 8,:W e Vw"JSOGpm<8J)r8úU|r%^ԔuUkh\Y474Du4;Flw=]? JxZ³#uL66yR}@8;4 [8Ub\Eb@z@5ên/MPˎ+RWɝٍ0e:4YpLoJ!^(i,̐|m*tA[_7T^<<8_exL R`vtb6} 04߯&@{{ Wz۩>l B+ e3#X o;8QP uLn)v^ 65$%WW[w#,{#N=PRPK7J@QP~A(d0W zV8o'-Y~4H$!ܛIF̦|SÒw!Ώ ň]{[FY@W=}D *R|M;Aʎc߅߂t_Hkxe4Y.)@ 4c)Р5,OJt!=_˜r?PD4 dߊs^.</e~yJvb28ѻ—g BbuB (e"YL2{ll [e"r Ҥ ]h.iN$Im?[1.ۊģQْzMg^KiEؤUy\yƟR6Npb/=s88CA,&|*]VϠ!p;$oo;l?{IUoZ Ʌ2qBϐwXqnWhZ5zV~U74J: IA\2+;,%k$`& ̪fV S1^Dxac $B"d4eIbGISY8V`kK"-[":7ؗ(~:T1[b+~LJ `Xa1s tN–06x)~]^q=+.c`ӻz}*|a:SI sDTO[ CL}]Xά-vT_cfmt՚TyF 癆~l_nu=ЋS} ,/|*~`tbѤFTpf' r-çu)S@ዻȯ-J߬Gwaq͂{CmJl]p$iysV?#0j048A$Q)o (yT${I'AS%$[mo gMk&=q݊rI&fⱄ` !LdV@pԁu};GQ>^>&K qDc+.%gv?Y3yWj#:zGm` N\#Me`!ħEA?SY% (E>|4ZCHO, * p. 2**ȐK0J:o% 1A2'ƄjK%!>uNб?)Vzq^ENWH\&ygA>.S fi~( @)}&Ԙu穰fDS NoY?<1g߰;GF8 gڠؼ|*0&yǥ򕄋c?dGlL7Vq:(p[]i.,v?AWI[\VV3#&l@leI=@H(;{áJL6nع}3l \5upc N?5YiA]Y \*H9vs8S1p& >ЀsZ߰X:ͿƟ`ZdUy1D1XH'Zs$¯H4?HJ<`rw[.ٙ+-.є}Ay㫽׳G5A ;(m_#ra<@8i4&&JLi=pp p'}wAVS>؛] 9eR[ڭ@{*X0+l`|]:8V#Kwb@ b?U!d ZBd2f q~xIoҮFע{>^rDRgz켿s7>zH?o N +k72  >ђ'/IhdY@V!^K k,5 &oY(|5J0QqzE,ފc [Pi)4Z]?__v7iNG{(vaE[=;,˛$ [d,fR+%۴YJliWkK?k;tE ýbFR;j!  :V*:7&v:7WGpK9X;`S]g؏C6Hx`'6͒It\G{.1QtZ/i,jiEG˚=ZTLkFqSgmABu` Ɯx%'%cM0b`D%=O]Ա,rf+5;aK5)},ރ 1*qʋ5'V* Occ3,Y%/[GB~ eu ^zn( @!fq9>zo2x۲eC**D \bwUM)Qo< !w;4Y ŸȯAEG|ٯ}-gxE f ),M]KƳ)UlpǂCrdwpJ~2&lV*Wn,Ogb`9v51I:}O#$.`Qs%Ҏǒ+C CXQqB$A6E$I=Q z5 </<jtPwk7*Մ.Bil,Ě=\w̿SFDvVQ⬶ f)=V U=xBWL(q@zqTUq.`>%t~zA|tm?٢ ʹzvPeFK$)jcip%6$oFNrfxd VC44\.[Íb-civI换_# ⟫u4 ')5DD(1@3!3,--\Ch݇x-МnaNLj6cU8yI,bkR^-d@{t^ziPn}ԩ f;rqei?5),Si^хt)rCv9DQ*h{tfh?y8 Zf fMSegz|>~aY&s͢yR\S'ߵ1Rb #*:hV'bEQԺ$gVryE DOJj$P=k>N=ʹxȂ2.],^yL "g,g"X5R8y'?xƼ^@I=1|z|z uY$~?r}kJX^aӒjl2(@1Y X>4xzn 8n4_dʹĮ@݁{SV݈Y>ji.;w\[n/ PuWW_^L5C(ov«jHFPcG{7Ī&t'Տy p3 =߆50S'jRny FHˎ9ExQSt)o:FB@TjɐTcr0[ou2m.n+(~dVܭʑ펟2*40vk 7o>@vgB8m*pPc`xm2'&嵥j[.c>Ckh`MIZs ߐq 7dK(I S$.PQD: LB`d*?B=Ewi+@_kKvztt$>`eyUq'\Ȧ7n(n` Q `-4,mH`Ma0-},~p}e\gJoKTr6T p nHΩywNfԵ+FYfVMTʑDf/HaeEUyu\s8u%5$077Ө}NBqӦ0bx)_cmxY TU1󹦬3_$Ȱ+|= "4 t6Q;1ʿ&/^3gj ً7Y~s`zK$+T inAoģY}Vu1ی7 lL"]B uX`7qmO\qfZCr i &G.M ^A+r>}_ܚR]|uI]%b֒~Q ̈́l`x QCU!@,sq~QpIAX1lb[S8z853hZ|mF3̝$`` :"[oRw`S5d|ueiWW*y+MУyhpAdEɷʊz&c=u@qҌ*A~F(@#rr+$(DSH(nr O}`zX/O+AsL?">8Wxf{ 9XeR9=ZNRkcbw%H8k /[lw+y-d*'ED *fg9@q:ʏt-ߘ!Ic(;A^U"g7-F.ߓS?e9)ՠLik6%?Є^`eMonA =O ,%=|HZLYBTCȹ7KУNnΆ̞a3n)fNa*lqm p'rVo{P9L k˪YcL .Rف'k/LE稓!}.80뉬ɼ`~n&%сr<bNxzl{a>:eh7}{[҈xD곃.jxÝ` C lըRR1lIqx,'>ݶfvIe?q-Y-8$p^87? J50sI`I}u& NP;(&aN6" ?W./q|% >tb?֏)ydRId%G8ױc^_p_;h_1=@nv`ho:SIcmd:}aD"f=7dqh%S$ y1@vx`єNSoͬn_v,je"8oc(yԸ7:4I!jW0MQ/75LX G6S2zɴ9{b <-mxa#J8<Rӗn e菔-3VI4ᆴ`ޔ4IG2{%ki[? l@Ŋ23"GlՒnACjÖWT|+ݹxլjW>JDm3"0׽ZwtT۶-m͡q09Cxs.Ӓ/Ii?ſԞӗvtt n`0:{gbc ?' A'?_e߃ "7:cxEz7tjL0pi`&@8B}pP_'vמ8 /{1Vt]GO;?7ϗ@E6;ؓ R@jny+CB/zG`fj~0 YB2+ AzPd(P/ұ8;a=,?UoGIq?1v!9V_ : X!a ShHG9'%% ŏΪ"% Y['y=E_*\]+ey|M fXQgZĄ@U[ yϣ\ҷtX"6jBG1pdY|tg@XCd퀲,[n)")Vkt\p,ĩNuF R͛>bhQ /T:!H*܂>6-N12Smq02 N;fiF)@mqs- @mV b2kwgwc?"߶B9J#Lȝ5jeS*ij/t$€Kj,c9ҁInRkuCrDgY/'#I(`;)D`Gf1K!wT};CXJ2LuG&^)CHhu%ʫn|XE?&cr1/aX.r"*ۘ[l  D,`#=0?3ruEؾ =V]CP˅`:ޖRcLkOA;"8@!loFrzDوʦb~"ĺ9%uvCZSq œDfTF;.q !$r .zf6bK_Va6c5aRQ-F*˩s@Vc\Qf#o /|cL<*RV86ֵ@0+FOt;<X$v,x 3ƣ26ó*v?t˹$2==9'K.)Xj|D4uѳ4$Œ ΉUvOG]0„FV]scI "F\mqc-#mYz} D*f,O72jigfv}Eh$֕bˀl~}YӃF FԬ{΁Q5s?[)w8FPɫH0a^,ׁ6[<@Q吡? Lkr.@>NCmiqUs4f`8$Y (қ͎ =l%r n<`mhX+.G:8{=_gD#/!QЇ^ZSc?!QdzyM*7>22u4 -0+s2؂-crnDnxYM T36qVAŽے`Vy ȯLƍA>_Dvuzl롅|<gmNj_K,<~OUl:Lq}2LR|4^9Mk zz Y@{00|_.Cf4d'ƒ6I]~NCt\ce(φd4 ? _0Ap<8NQ,VN=APC4xk&b@\]i;GMRc9N"Z/KGGX_pӎ`9j,lN2?Dʲ{錎^N a/j)ÿL8݄~o +л?P ;7DÍ>D;eu~LێM噷kJ8IZtí" lfӧshI,t H9|/)seZs0rWEd"9) @Lr { 2PpRϚ Q$ 7~:,oN:y|N (pl+5l" U;GA/sqj^~3y $p 0€0?.,UפHi@.#>6ߔ0<ښ7dzE$I8,,>uz/63 8!p?74azB8^st(SP:s 乘I,RW>GE}qfKKg_{},W1ѢŸZqo8fQ3={҃J[_B| XȈ2lEL=lS@߃/ft?ҼUЛlp{-YҠr9(+d@8,Rur_Es ts3mR4pсOI!*e|'a vӔM`m.d1zW=P eJ1ߛǍҗ}G7O7gp;\BK F'12f}ARCy]uk1@ق"9wDɬGÞR*˸uTr6 +BfN{D{Ĥ?Ϯ_x9,js+"VCs o[Iw}ZcoF£(]yj*1 69Mj>2S&4UaA?eb_puPhV c`UgH=MLSdGOC)"7%ƈWMczGa'2VWǞW7 ʫ]kۦcCD@mqx-Ik6;;iMUngtY>Nac[1AID%@\!LXU԰e]&8ằ{_f䃣 N_ty<{/QvWf_n12'9MVbўu&A%aK(6( 89rc wa8,2lVNa> iь"Fқ+.KUM_s)Byq#C=z!'5s'Ѓˎʕ~|ѝGcc Jv9;#Fc`^%ߴ0 We)*]ؘ_o:v&'z8FdjZyO_?z=:~۶wV~JщUq\_i4etx4l`,~":MkױXxH-> u9_lG  9 ظ<AU(y̐bͷdm æ/^qS%RI]ཁiC vOGVO(smBU"'DJfUT¡ݒфt>IK IQF$?r$Cp:! U04k`7*aRs ,/>`OhL9$ˮQ5r^wH%, q x<=QxM{90e2t(ţK k 2L`$vHQIN̨gޑ<AXu4'l ^?i#{Ů-g[oa j8TJ8|Ҧh+-ThH"5Mήh]\r+b ^1eg 1((a/4jJg~5*_K"5}@>0W"=MMo9E8 rĜ]=J?ZAz%4_gސb60iA fʼFY.V<^LddptpDƜX/TbW^BV;tw\̺|;x` Q'H߾*I ꆢϪ` @Us}@wm'4 [H@*m :t|(Hk%qÐt);a@fd0388!jǍ$)"ҜBco}O7=}>K%m>_4)|UGz3OY vEaæ@gU$BgG;i6PL3*:VD*&nY]?ܳ1#ͯzП vPlyz,ŽgُXgBFR_dJgϫ1(=0n; .gs\byD Fs3@-C󅤒$f[@aj9=I.zMU:JRB@`~"/vT/Tzџ۸zHDzW &{tB/j.Qq jb%̈́[Vk+`!sZQww?֞B>#)!S W[Us`Y lȟ^K!^Uf(wL<(G]bbM:QE/86R?^|+EϠg2nMVL_^=@+ڣZf>Ñf%L!GqA{fmJ3f1VZf;j|/A9V fGni BHFg;*/b r8a>4SR_{T̀VyDH;@*^{{ tت [D?8 l&3\y~ Wx!8 ,y:p,س'/&}ú:zL nT }Gb(PP2m3!N`>[pDj%pXsXďۧLy.q{z#ъ)L&&hin77rN<϶Rj tf%!Q-s80jUi y.wb0_4agmL:]`y$+ۿ6 -x8vePw['6oʃj`9=>6R#T1H ?n.+`npH4(_¦h9yHտA%{aPWIZk}te;kG}*1WrhRo!̴yo s{e7+e!{H6?P,;L /:!H8.&Fp"Ǧ˦^i$l|N޺8)Lmr訚ޚ "|iγ?Ԋj17pI|jxd;!(H.C`[76<&{ ;fZ(u~@ ő_s~]xe9,%j%  2tf/e5LoHGH=ޕVU5\? s"Y!$M&OU'4d&`s_+bю--3:M(^St_eb^zM1ʜhlIrW5ޏ91ߑvDuc ԱrYR}OnR>t1Y)f'x\|ն~jk`%͹^ReAR}JY\-A Zi;Ov-)x|ϢVx6fq~=Db3|PuY'V )L Sb;VF yI&\K~ P n2< IzĂ]j~27UغR\ĺ~>FMF3RZ(y̔1݀޾T8%)oS+,]qпB F0M}QɯXig hH=h^b+rXQv=PY\l.$gzJ%$]WlZ͵y6faN|&2wPLmycl/P3s0SOp-_\eLuB˴EsY#vceC3c&n yghGL;sA)ޞVxd{wne8\Xɣ_ÅeXGuj`5&'N-1wU0pk[Va #x!$12Ju:{h -lεJAOyΠ gUFM09LjrV(Q飙\>ur ~δ.3.L5vB^%wn5xoeaŏuOpZV $^7}OCg9}g3Iq%o\Az]9eM}B=if t529w!WHs[4[O-3.p);^icpm`ineZy~U4̒ȭ <}`k+XPݦ`'-Ӭ[UnM{nM?f¼%ya(hȱ$j>̋KT<S2]]dXu1<)WDRYCrl~K<(UV-W(8WpdjUQOJX-J*vkA۸v"tw<{(Sl.Φ<ۭGGoJ_OHwݑIf=ud:ubMհ%$,nCO p)pW4=@i3, jәZ2mCE֦c%*$)t7wGw#wm.*3,lzGO-歫*V%jӽ~V.Ł1aӿ%3 aHAstr4oޠsU?Np:mջ2]w9=-YO="8vhm4Q1~[%+> [Mw5m" sޟ~e3ZQ=vJ%#knqIĀ:jji)n/ɘ?y%8fGݝ8ܷP  9^" еq疰?YZ*f~|,G.lhD$NyOwn7e@SI,7^΁8>z$ A9˶t]S0;@1M +m:bߧxk춂oaG-A}oQZ; i$N͔|X#5?hB=Oc3˴!XK-@A!0RerMYÑZA9kOQ+wE$Mim^_67xe}C::5[4f+|m|ww=Ϲ}I˨y4gs{5z37&K8Jy8tyu;*?\7:I1 a'ҾO.nw oqB?3ށx^&;0{U$aם@ٜAlԴr5`$<Ͻ YN{W*ϤWy_2Y/aIQ}KB>N4\SsWcuStCxL࿂0tN6% &?ef0I/_^!aVM/< L٣U,ҾqۙVrP(1zZR>/穢?)~#Nc7LX(%HwƬ?s?CT^!{ ]pW0ӐP[ucs{H-r3epMqԘFVo}_f?x%]1GѴEr-!VM[)۶ ҬߎǬݾc3C<|U{\yK{x?߮]9#,ygˏk2;zI2/s<[gUC[pj{^Z:`"*GB]b.SjJJ| G|RnNu'ءm-3qq*LՓF? 58ȝşw3Uw܅CkBDfR5__=^W/Yq,׮k\{-$h`s*iݻW|;qȚ\F6+Rj~n'ϟ&I[VÁ(t#9ցL°?͌Sɬ7gx!CTTjsV{w5&jMWM`@_ObaQ_b ˧u'-,cUH?M̰%rmqV,+wykcQU>%ʬBen~d"}l'#*t65$Om䯧Gé{vӚ*d4ť"45UTtUT8_f QЊ/Cj&actt$;ѻmDz"{w2"x(=bd}e`=]+UrWv^ݷ; dؠxE2fht2 {һm['0ڪ^焴C/W{Υ2A88(yg( 4ֿDXقc}&𥿮[p4T~ֵ7 .c3:nlgLKbק¿JYlz̮ll>ʳM]:*zo\D8 Y?aJOi W͹Ycޖke#笥_mʃ_z\j*L=ο(ޤ=}N$|NE?** {KϿ|ϸK"^9yF'.9EV3R2s,)/<;UN/9:74w;6eP_Q#qnTmJr*\ٕrba]V{靯?6GG =C^j8Vx.]k}o^*%4^Iڿe%x5|O(\/lUVnrZ.5<~2yn\B(m{ĸyD`-VT4TKhχR)jƘ!eQ=8EY%3ToG7Kv R_}fflʤCNB~+2~Ep~dY;8naטB]ɉ)=GNfUyZW΄hO2D29F5m6X|~}v8Nz_QI{JMV3u;3+1R VoM >+ƉCW -j6zdu|q{؃y,m ڪ0dFVD֑T4)^?J%/c6)FL}e lzoQw~}Sh{w$Jh𔖩 kSbHÎ5uMsdtJidF?QùxM"$FN'bgg=M ,YOHU[aZzr:{؛KԙgnGQeW1Do./& r f:2 I35 ,0f`# 5e9Leb=O<_ڮ.gG&na-mÀc/@S2^K@}U/ی6U~2?li&]*{OOf]W3C{i\6!w?t(=Cz"KBhx3|K?_?BIˏBM&R1fsmC2CF^7oS\"! ,v煣J{8ˌ\SrJ.CIQh?oN\XT6y'q2*V:u?$cy -,FFXk3~'=7. e穅 S@^fwLmk\zʏ+y~5HܥZ,8};#+#&616|E[^CȀIy ن=+ԩ`a6Ϻ| 5t%Q}ϕdTxЯkEw>EYe!|S]bPĒ 댔H9y^hEܪihgic\_(#0/~[9?FF]+,cwN,b_y(.nWO8iWğ5&R/ே{jwh]Ÿ:[exsWg*|2';QέxOԜE5Cݚdugj{'֋ZveP2q~?MK4uaeRtC^6bh8?G;tQ,ŒEzapv?:}1Y-LiO ]ggD J;S4oм5oLMTڰ?w.W6<Nz9EID佷~m˅' e2B8_7q3l2y5Kw q(vtX׎{OhG\FLJ]Ch_V)%JOȮt7O^KlbеSK өׇ*뼢QQ%W~'&m&A5e| }UNmünc-9- G\VGXcw”Zrbl5F,]{^)&-]zm 6Y&rzCckt6Z*qǥq#>iʼnKapaRjJ_4vs7|.i3^[7C&ofʭ{rijbO1 }^]>%SY ;^3ݯD-G"ZcpXcR]ɗIݵU]=̮ƫ2߫"=iFwN;q7x6O{zbiUqo1Dq\p j<^rLGvuo͈O}pQzːڧC^d)[z(<‘ൺ/.Qj_J|Y'@77ĊCY,4Gˆ5C Pô Qֿ~ɱxz6|^(dui*t9< 昊 '64d1@.֝LjJ}6BT6ǯva}I.ϧyGE4'f6^>R F}ؼ*y33@)#e)Qѽ?fWi?svpgd֝˛Bs/72gq9A^u0s(wL9-GѾysD#g`}潮`겦@|[LJE% ƈe.-۰go[fIТ.Y/*񶾐xdpj1$`OVR?lo Xx㶥S"W ǑN@XpZSS-v-3 >f酣M?d_bE_B}ڬ?$`tp^皜.7T1Ԣ h~%6;8d"4RxcRzy[3]nݮ'޹vmO}zTVM8*66<ǘr"=o9 0Œ{lf9]:KὫzvH F5k.5+D/V^xgpwԉB Yd.6Ծ`۰=qF5WbTy!SpzU{13:JnqQ+oMbm߻^c {ju ܉+ĶdDԋVؐM-FiK}i;\j{ )ȕ$|2sO,Rwٞl*e[ְoR90_"+x[R9 s1~w֬yݴ?ahZNLF6Ș6жug=%LE,|Q3dk\vGHhŷ \˓W3Cqv8{5Azk(uG58xVNŽ{ lݐ@ !Vy< ڴOL wml`=1qZtV)OTٺ_K.j|f8i'ߟR.8SFw}kKH.e;:L>bzk" MGij 9܂,/V?B܏+,Uͬ#lSYؔO,Oy?s9|P_-QgzحZ|]ke"Osi_'Uiϒ'LOͶO]e$߉H+'m-I/ܤcH>lc+RHwn>1e:IMRW;oMjzqUG \YR2HX8Wu_) !eR|]Wdw|v⇞E})}pQe s1oQ#0:0u[r{}~oAZ6ԿP"|tn4*|>)%(t^)jvTz'h~*'MЧH{g?,zU+y})ߜٯŧx^)8&Ȳn[:(fH3ՔYZBQn65sKLia 3Opr龝c~r{GEvy,̀S!j6Ӄz["=۳H3!y˕wnl=#1?RۯokݯKn)׆U6;63xj󼘰:Eu1|-7ֻ7Rkx iQ OV(A_bTj=M/{#hfI>U)ŌSB$W}'qrKQe4㲮fG̛3]Z-[9RdK9q3c^G6Z2<;h/Nۦx5UzIWGtriGHq.W(.{kqnFx DLkr6ӿfpk@i|gЯg$̟k4U[=y1O)1կ$r QeRXsbn3Nbqe NJ"Oa^sd%c+u%974ձzwϯ\v>)_ѥGAICD#cÁ-{WsHwƏOZۗlcٕLeZMy,ڎ4 ofׂ໑xA*ڸ&vT^P[sqmHNv㟐X%w=9>Zwc.wvRՏ#Ap^Kafı^P?bڦ- iۘGH{MRH4F t6!yHz% ws0R2d=Vme;Α+1 }͏bY(~K܆?eA~lbyؕd>LG7); >>+QO_`֘Nv58Kص)sNѫn$o]7M`[,-B#4yYttMb_k':j-O  A%lyUYٞ\՟y?GL-]ݠE<=szEƄzi.410:Lc~)z:wb'inV ]}>d>wυ_yE<ɸo̪g5qƻO WQl 5?G%kG!̰ӛzp6U|!Ȏ- A4QQd !2{rV2C/W…~Zg73 HQ(M"".& S>ZCu8Oݮ_`Aڨe~fUZ(8lC>_1WǮL_I=Z@k9 S x<5B_\Pl ׏(%NVNx\/WFuAi4az_g6HٗCof?O1aǃ?O$DpdwGǴhNIѪ5l׾tcq =;[=ޅALk>mFҲ-zx$ ( n_Qf`IܚOx?IQ-Uk'3z7;8T'vu]ISOצ1ePx'H+Ch47:2,,w2l}7U#$a7Ld{TbsXsv6&w xG9,ǙBqU:B@ҕ:hQej^{4tm{Ri01էlϺU ~9 ںuT:g aZt J^>^I#jL\`[֌';,w-!gӂ@|J@XͦH;Z/ $שYV0;HVeF^3:+q*yd+|MSm\Z\j9]'伧h-46|E-]u)ǻhWvK`ԛix]Y2ܩGy )e_Y1ȼgRO+rqм{7YVI,=$7x pȚ)\DsT7|ͺt?ۨ,Vm{!S#",(?_HQp%Rrz9Co~JV(ܑ6[H aa(vQI^i %Jr9]cMy5\(oӄmp,wvI'}OqyWԥr" RzqY'rHf)MU#]6Ӏ1 ?egcirvkFI=sBUDsCNߠθa!'R_E3rGXsC`RtDWgJI1bŸ,y|Z`nv ԰[ c !"Ϯ\rqcf늵WG3b kgǔ-}1coĭf]_7y6}xT>?_zwӫѕ= ;QNvT;u@qk@-ŗFqSad!Y"4ڜdYGn+<"@C* Β`Icݭ}^۟cS/33N}e[L,*M&V*4-&޳"wM`uD+ӧ!Y%A F;ziM]bʫt;6r 3}"Jg$Ԟ:Wƒ;le/yf2;S䚩)\ZP{{㨀rJ"9O|6ĎkX涝.v @1Ww 切{$0 .$pX\czO*tgX )݊if0cՀ|{cccz"K}:GBS ܷxMY*4vV:B[9;G!9"M!#p_IMX_9hڜ>үq@~ȭK0o[@C [ύQltCp4U4"x4A7d9OA|Ӂ'[[vsڦB|h GGu.ٗ,|H9 @B ϖdB,_%!Gcq>AN|~=zs9PR\Jx`s2+`_ªx~*ɵ جGV{^_^0UlEnǴwb hW;I{xO20` pD4lZ-~Gx&.Ur_%5eA55u U}dfo!RBLo=,eRqF\'s:z捺(\pI5hХ< G62bHGVܛQ33AUA&! ~ԇNJfs*" i.( ;L9|/5(JG A7YBw›N=iE ߮9}1J<ɑ"3e`Y &"z# *[61z5B&QmPKfe+NsBoͥoV) s*$_ S%qAƄMl Юf/^6?:¨ԓ Vo{ y}~ηcsoj|~U0&%ⓈoG٣kױFuУ;9D8[^cz>KyQkcF6rBQu@ʼmáx/`J)2гIna_o ylok#Ͽ.Pk/PW4$# y͗j$\qb^VN(x΄7_KA6fl>H[O4IeGIe 0'aEƪ"yoA*N RmPc}nx]qW|>_GP5 i(7eW/{*ryEaZ4 ?^gofp^cdaVr;ŜK3?3'`μ<,dd;9M*$>0ؼSzP?s`"X?9Kpy(Qۚt8HVN;%,(Š{4Qs_BQv Z% /mxN!D+ܿ JQIkpu@ M<4QQ8hŁhQ^~ySdz$πP韺 6M4P9j^R$ sb!:.OsTG!vǰH{9߸@`"[ZZi)0s`4^5ozaڏo4\PF2 _0hN 7.or8@m?uQz2|1hܵR켿5LYbTXZ\ vcKV)v?yW^DեcVP`?;X]+FB?$b xxrqHz;v@h|AR;:┑gѧ{$A` p%̓^I U:)> Z8[k{yڧ~+w 8#cl,Z mT)ak=/ux䔀'sյ*G4Dߑ:GB߬GHߠ}Z%l__֧Q aҏX:oq̻8yGaQY=#O,ҷC<[@e~2<$JN\)PJM!+yUQ6|So` ,UmC"t? _r{Q&:YB,$^f>sI 6e@йW6AERM]3Q3K46c5@"skwo ?#'s/2{%˜2L[QxI? ie$z;a2p[[ eݥ)$j7k37]h}7yN=u;IyӺA0 *rh m{pd!*/l3 M < T-̣uvG1I!,.Gk&2&< չ9 Y'I̐@q\P;CDzI$N׃x)6f X14_ oglaș0स MGdG(bṽ >Ң|_{Gy-cI{c 0.qXL:n/ mWЖ0%A*,BS3)7!p ,bYkx8}30::H"U~y\8=YAIr3_Pm}xe4}+X2փ)OːggET ZZpaN}Y=1 b 3nqh{|ϓvX,"I0j.;ۺ'#Ajz}Fm;ǟ57OٛD?jMO2=Dn5Qe&x ``p X۠O(辴䥢 y?ulRpWcoQŬ^o !g#)FQ^(SGN{H~ݞ92"CSs+eH=5)Rq;c-ms،h{*VpsG$*,zDDhj X}˺RQvdiPc{}84f*TI|o18NCx-L渏ġHݔ!1ҫ8UzR4WN7%bkOlUq'x>d1N _xТG׼ia?i<%VwL 2vS9Zto@ DPT` o◥z9.jrGb8gl*1ډNn:OIˊq&q <=&%ϮPYR ̈́pހ|P`L#2/:ز]`K۱FUI^O{@zR3;PO|M2 6~0AF'V:.rp{ZM%K^C[/f袛cq= eu~fd<`*9aj 9CZbo`ë`V aT失ԓ6k* 0_*)m7?*ˀdbuFaxhNrҷW_>i"Q冤Wo[-^\g+;Vk,fNHö%b^d/J1&w]S&wo.Og59h6/ONiՄq/J@&$G2g#a}] i/FXmkϏĮiUAvF0|D:bLr'B yWD ?U[2Nq(<3 #Z`s0|b3[h,t"y 1mn$F)τ-}BMt;45hJY ;MלxxAp=O=Ie΀8 Tf 0Ӹi 6qz#[dB\ĞaFZ5I:Zp.ԒvR\:e uf Y9 h2# n&gzu}WO4}fÛF0Ԙ)fr]pnDӄV`\I%iY 7͊`F_`9. x"M4A5Պ6JI0L& }6JH 1iOVo׬,!G-nrpn{rNs٬";JE6y\'r Ap[=P'+W$_N(-$B%F^шYn dou$3|˔\T;ɾ/Wnj-PE?j/np珎k)ȗ\L8^~ghtXop 8.`cxj B &&1vq&k[Iߋ*+0)h eZz (aUyI#:ߤ1t)DD_۰wXM.AUOԨ\F9~"9t\k'l@ P}MjV#)Ұ]A(0p0z^yؔA81s޽`G9~zQ5!^oPN?ÊG_(>|_ jMҖ|2mJ&愡E(4}saV [J^\QC vlSf8f߿o7u$̷yZO(VPa|h{YC <\TQ %97^Z8/|N3AU/fV]KP'\N.o6il25u ޘ R2hDDKN} 4_>%Dzi#pY+|K,w8jE%IaBh_x$ˌ%(i vjT0TQ:tTz!.aM6!tk~Qba Ar?䁗5ƞJ޶$!I&{1# qǍIA{\ RB (8 #woR/p^-v~t\N*N:]Zhx^ fЮ Ul85"O7+"e(PO~8qpJpKx GXp (gN)h=ȸn`x2`{DhrH_" #Dm;a .( <5-D_] c 0c9ZQyrP'պ\nqb-ïo [Y-Q(,HK|x:2й`B,.,<۾BbG\ _7#; E~d̪ɏmHn !@aȆ3QXBs"W^N?0=dX!L5tƠAׂ?%ɍ fn= NƋ(wJȪ+s" 1DڨaY滤w3a[0~*XK@߄Y-/9c,2v7(DRŅR"HR{s+j ?-GmǦ0k"ON˲O!ӝgxF #.4:S j:U,9@0+8JS($BO$y9XB }2D[5oHЏ-fŦ0?}8'^}m{_EO@z}bYb'05q1Frmā'\Ȍ$#f0a[0QoPbOV8>2"h7dIuD I l߿>Hw1sh) q{Y[5'*g(ܢCGc=Y&7{r*1rB@ -YB;?qڣLbЎPL:#+Ody$))y`xvH:f` Hoocr(3ց/(λb@KgL|oإأll, 4d͵M~@$kp?"`V\$^7=ϚAh&ȁZ 0sf60H#qC/}%ΰd3AD:8ʱuIO PkAP|=r"dVߔuSΚ:2cۡFtaƜ zA POtƄ"(qGp#GC|Hm':A0 "5bxkbNK 19B?ǥsE#4<!q+*d2D'ߕ"Jdcpco'»Ty~78sgnPķv](ÚgJ3A;qX.T7"Ӥ@ŒXϸmx;&kaYzjuV`U|?V^TWS*G⇆ #KAMGN`VWNM>/.&h'yFUv^3/'?EU(/`+:.llh916X符:9|Ԉ8zǜٱehb OMÙp꺘uhLGYBw- DUY>(8ÊiF$$U!cfClSވVXF woK c6 -$N-/mI$N@:}Rʺ z$_ b7MC/v뇈%|W{jM9zv^ֿJw(/0/ _l' Pmp~YazDssy~m D]ZӨ?H2;1{؅!aͭ td0fQtA'@jJjxƔ컪< ;ۯxVɃ> %`=&HsWpH^6li hhkC `f:dz PQC87a(D+ wJp5ca,2|Bt#w19F%P o2SpKu%|.6{#  TlGva$zUe(]H)E"̃ȜqKmjqɃ qg/m@hD G읗D{r%hgD݌`9$f?mkIxm#P;h Ը0Ub%ԍbPomDXG)'k&ҿi"CY‚hZc }*du:ox-|Df;,@d  |1q%إ-}@Π"&pt=.z |? 9<@Ol ߵ [\I{G+{^cMp_h,.`5XzR|.g@)3CeV1-)NG&~NQ)Dq$!|{j|#3[CIGu3|vH"FTy)Ɉ$oKD!_xzӺCAٍB+ hWŴ(ށ9)oF(ٗ6?ٵVGHx_aО<Y("(a,^@nc&DkPAax8ZhQdo4gh5.f\I:b]uy; 1/FdX*_Tfb;4]B}ᩗ$MOý`REbZz~eO֝/ pHl' l$G ˔{w(,dy=& /|TƋ~afA$775nbKTН7`ǎ) ]FV:ذ9sncʽM\0pNlpaP&#BWlpii8騁Vot}BŪ-@ .KrU#cj{셈P9|VJ &CT@f7 eW<9)^Cl8r%>v ]h \l狼zn ͂kDH<&)دF9cw-BᔇK$&^O=n% A#eDL?1=e1!%s|Âu{قGuJ91 {6 .6Owo+4T ^v*?/i85닐'. A[R0I^u+' ȸӲ NaF !oD,ww0/<+wSR3E pÅ 4hT 7+u-n2ҡl<>Jpc,BU1vS; な?$ON5>( r,%F9/lӎ VfpՊvy:Q$cK,VrV=+@l#5N@>X-\¸"!0_cb jCo\ 6lӝ?KO?v~GDdxdd46Kx6ۍ ͋˹3AJ.1i止DI [`][W{0Bplf1KMxRM!6{311.1{+)GoO >o;0(xCN5^/̍V^Ev7cИZ-k?iE%F>Pг~"߆LIQa}Nv뺆+q'/ln>@3(g<>o|TXƣyRSra1 Av42|[a/RsB( rz:+x8*b\Ҥj*3d f8Aiz#ʲk/6Wj6TX44!oGYL׮f#}7V*+5M\jTY<96.318Ā>Xjs GBfdȴhyb?jL7P˳'٧&}A~d7^݅)͸ {?,{M(!JpGjځ: ]$:@ \aK#R8Ai 2VY%n?RdřE6,-%=uݾD`v`K#D\~]%DEvp|;(Vw4gAW1ldoߑ͟vfʐB$; Hҋ/Q!xv?;R]&DN\ r[SN7ԭAL lja߬T< 5i/x>ҝx0lc3maӜhFpR.%Z;ߊu8:W&ی5bk!Fh $ZFS4ՠ4 nPh%$3?\m)yz5N:꘏ [W/i&8`0ip?eqN|01d~pE ѬV{/|sćBjF94f[' Gf{0$-='nԵ7d` #h;SL).( -Ac}ׄt UFFkc9kq5{Rra=lU[vkAr7a{7~!,os~RID궔fBę?\$7ן}WҴnĝfiC>]$&~DMtGsE(Gmf66oј3 ^X)|-o\zE7q@p!QʼS[BjK1 XUǀ 1wH,>Biub"6\CbF:^VR@`Xw0 pg $x`:рXEh0PA j }9+8\͖4u{{Bq _PU}BgzBtAM#Ї 95( 4ي`!qҴKVp2t ?І1*tH 8v]2rY8wT#ƃr')~snâGfW Z??OgǾs uYƃ[W ȸ+A8=8ګI:JFBtEP|)B:뇐blUW&c*iB՚.d{?!p$&`Ra@.ZI9j^y[{wl11xj2] JӭS?6U_ODȷr& P]k]b^2{Yl# V/O 2VtwS,0_Ygr5Kϙf <;rq[ڈK<"vC@gZ_iOngKణ3 }KB5viVNTW,6i~(pFkS{s$3 c@bɒŰO >?&29x3 BЀ>8c4& Gهڿ2z>Ugh4fc0{>V߉,03d|:4$r6!p0hu( ~&VS|| eD؈o.4jxةzAg׫rzWPؘAA` ¹#OI|/1D*1Ew,ڡ0N!lx;HK:&07XۘnKbq^˅W>=Zv -rs[!Xj2Ҁ P] `7XD)@C( m 2;4f_c_A[b"rҩ2bo:KZ>B3W;'`4EZ+$Hw`<hSt i%wˉa)-aOf7|#E%47pF@800Uv6%ARZD4G7?+m|h0g*ݰfj{~v|"$dݟw[:y<^vt2+O3HPEd SaMV9B's07_ i"rl_YC 󽰶-/ I\uď<.Uٱ]Uh}˜m=oK Op'2U㿃c<_|;.0)LDZq7:n񈤲L.>IRpB($Fdq2(JR3nstx5õ*f$Dc:]!@>ٷCgAAt "} _@nC\J-5 (aJ8}4@'QD,-cEں4X}}?gkQwҟ"hۻ.&}U@&ivDb7v/^/>d 췰}0W39EbOFEWĿF}%J sxF?N_`ސDӸBS~E!k&A`|q;Q| |T&'Ov8_C^~-?$o9`}trؘ' 9̇ۖt-(eh#{+d7z]nVTLJO,lvY@=ݢ j0>yǨa% &]-\w۴.+;Vn]&t_[Cf" jdpMmThSLzD/L#H:f96: Zk*a {s Czc 8* VT]0Du qA~exNاR'#6uq!@[g\ 8舕(~;3L0d9,Z%u%nP^,9Lo8m3HO$a[W@J[* (%xzO9%SIzOP~Ekjʾdgxm+P$HPUBpd؋_'J[zь|\79\cH%tY!G킏@8Mö=މA*wlЃ=$}$8R^23 jU0}^Up?6ۑ,mPPFnF2߆ܲ2r 긏9 Q1VDuGؤ8/.)sc>yT1,mY0"+0qBMmo#,CfžqRD#Z xo\2^<s[BES3&7cCGgժ]  ȵgI,K%2aWUx/BHU((ˮ 5/ ? >Hop8;X OQ?D %fGX`_SG5z ysrT13x;ZOGQA!+eB. |-vMQc2# BA^TG@ -6vKXdcu3"(O\PogGu3{:%5*B&m8:@ęݪtby>[k  ΐ_pçv3-;4`hJތ%G7rcu;fR JKnɿ}HT;SɎ_FfJU!V|K׽1c'ܺ>E29!Aܥ†ːuQ%c"G[cި>n(OhIemhwEX)],P{t±8gig0:BHw[9W 謄O\$( 146upIwa@I:Q^}3A~ۯ3>u -̪qxH,z_0EWg|lQq;2A~D.9`W?/t"&j=;dH'ǒΥߑDT2  +T|)ȢxWM2w'@u ¡ߗ#՞ۙ>Qrدd?RFv!MjDZtv4Kw.;VS$^jQӞl)J\4@qp0K-p/ſhtvgagOۊC, $:VCdUD4ԣƳok}hrv"yN֐,-ˏM[&(+tv$'!Rmsx=H1eԬ7]~.g8A.@W&:7Iast_70`-D{B@|؁ )΁9"/M`o ŀ" F" > &iN,RQP`1+lBD$pW(`t~ypF HűdzStYQ8YISTEMG8cʿʆ'7딢l y(J5WvV)C8@ؚ @=PKt?܏yol x.kJ,̓E«FH!F~*z]`[|?,VzPEP1[){u*,nG@xkzGAjԓ̺V2s BD_s(wog@soaJ5.|= y$PDnO @-HL'@@d XD(NJnds9,C $.I^x>k G楁Qm11z*i<טEӼb\ׁG`"1,_,A|di(p vl5X}b rUb~ƙ1["&'pSdj@wX;e3BA|P2b]IkW&Dwń*S^-67:H_+f.gru/ߤH< jg yoY ~5$xmSlS1azرzܔpogTqsQ~sQoyNy+\rB伝6jzªd ;K'͞$!dW:<(lbENZsڴ"A7K@eU|pqfCp g*U HE&o;Ф8A2I#/.(aHr1C-d4M82K~Y kYaY$'_ģ{K4:}j7DJBB=D4/ܾwx]5x?zhMZ-KJN  ,0cwԉ\!޽SNW#ѻdZ)=u﫱۶ӆH-vZ1H hc:\g y% =a$X'?Lv| Kp'iv҅NCfዧ0<oDqκ8oE~"d" boa0aeFB9sFzDyI2O9ޖ-IaECK1#xk82L;:hſ?L, T2!*0MT<Ҝ;]iWaizC4_/4'}k_V&FVhyl]DXvuވҧb^q(=ajU7e>yhu t`)dfY>&?[![r7mo{[OE\?e`,#vjUP.a:אx 90M72D^7`fц%^jn[/*o#5Pfz`@0*H]aM@(ֶN_TNOdq^e:.( is;BNZ"㢍i]ol76Z?s۶~2|bg"vElA "(hFWb6^q:BtGJ[>#v&gf3P- Y3YXRKu㘑nB~ّAD ZD8O$o?ɭ~\$/?,_7FŗS4 ~+rgEd"n5 Pۘn0<>+@ wDŽeuABF>epc~ PƟP`s7O2iHL)%qrMH~|AɣzDP,l Ű.-/Ѽ6cމ !{?e𳶉qe(JcYD D6ށW\ZOȚ#Ťq6&. r:2FB9dv~r㹐_ b QO\]\#r6gCuR#pmGX#<;L؂QH[+9r(K~~ʱ?zd8(C ~J`"`B3InȂ(MOMb,lrM<}0s/DG`*͉*;.K@12E+n_{! _YR\p輐선Ghj-y5#𺲺 7*6ߔ-G Z`/^uQ xpS#}J܊yQ@Dr!8U؋P=tNU:_EȂR*Wi: tk$6 e#5q}Lg S "8(0;QLP6 Jƌ|mgԾz8x4W}lA. >1/W9@d:GgJV xMWN}`-߁¿)8· 4Adž,!8xka甉#:(_p0yٟwׅ30c{I(f4OJ m JTrz IQ\KLgn!)B2V n/ 7۫Ofî $ZaN/ӿ[鮋\9ΤbR噤!q2-9?Vx]N ? !w(-crS"LԺƷ3l],NpQ8* YZ J@\Pg9 T++hB2cЬZ^b}g䈍y *^xΌ875L9sz9.A^EY{>sżF#swae 7v]@_ڴZ5CLdwY:?RAS]0ʠF T lŰ^ VZ $j,++3[ bȦOBkyӋ D~T yhF^RdrlpgTQwgI̽c_A֢1[El~u b)a;rഃ80T;Ljyx,CSD7WY٤}+Z_E!Q7fʐ-|;ʌ/Dzx+6& 9;Y U`%QOreNrPѓ8L 0_nʃf/ScEoJވ9^?x督Yx/[A2 K"i"br /JuAøj}7R6N>irc4"D p€\=0 {5U87Nhd ?VOS6N _ !uU\5).:u>&3T|6E!C1jq"g1Kve% /"ExOy-2ASFX'4+M#>; XZnYJz<) 0 iD87G*}.27@ic[8% VDC\RQcxQ( {F8K v G{2/g3b<'OpWH#iNs >*/1\pPWFjLi|qIP>m ? rLo(┊3cC72,[pN *㺼~i =BAΜVF<3]:~pp!即|A5L%9Ndồ.OSǤk6D O~>VƵXU\R/qHlqz! WRc qӎef6 8}K~uFBR!15*\O?) i Ow9E,`f&VBJ781[!&8wcԢTHYLM_hc$a̓x8'+ck@VA =b[R%g?][3HqȋN:~C(Ayr KbL4ViᤑXxumƻ↢ L%2!'vΟ3)wb\PX8͙L}[ nG gXޙJ*X-x=VlRE|`Q d鳯m_Ϻ&80`"&M:QB )XՔYSpi>tX/I͜;88–L(Vq)!ļp> fIjāߏ5:SGHNBMD Y% 1'ˊJfc ^_^; 6إJgAAUCB\AL+W:8}" `]dLa !~w<]d&0hZΩ;8zGvX ;̲H cekןDSʹ1`=OL73Ays*scqeuo/9ѼW*Qlk븀cN#il4gzwulR4Q=!ؖˌ*5lEy 1ؠ\4D"h^*^XJSu%@x<Èse!hG\5'}̊3+b*|_…3$N틛ԡ{AaӿQTgp1*0xQzʸ =H_BJ,anәn* 6zfWd m~IXIQP.OR}% .NEg^Yq텁K LI ZH,㮏z8sKwG5REOTv<_ O ]@צ&#I܊X@\Vݧ /yjT-z}jr:;HsPJt 5h Z2 ʖ_+"h:Xcʥaf7Dx DV]ry"T_=c_^s=:x\&Y OT--L:kS=`*ox~.?0@g1ʔ/*t5";0@Ŝꄔr_q_3`f:p6z遇`@ DXcM *)~^ ζ#\?_QhIZy;ޛFB~r7[߲as}8` ><]{5 IĊ#>'㖻 *svSܗ ^0s {Lr<:_ܢ oPB;1C`G̫7)CM;=UXVX+R[Ŏ|•7tA&=yS1#Zk= j[0^H *j9idAJ3u g33[`#;#"׊p%YbƯqxEgq#̡b/(WCI4 .pxLp`TqEQC7`"5O\'nAA["ŝ-p "zRVqPS Lw 7'UOEJL]x(FS+ cE/<)lf/yՂ՛GO:uKlx^6J~L 0.GKr!br80bF,Y$<к@&2,az%vGܣf`u Kۏᓇ?vtJ_`}Fec4u0 〄لSj{:o H4r_ in*_UfJoT"D<-P"BPkx-LS-"8EhM2A]x$ I(]"/WALjYiIRND˫T2!'rr[$(/; x|gu?rh^!v4ضqKH(vlM. ;cNQ~0rU䦀 Wb|Ʒ T&/J =Yڒʧm@CXt%X2eѨ0HBМQW:zf z7u擮 tG2 rd6yCfdWm+hPq|ȌoW&J]--r^9Do)FIDa5TcOxF]8 kק0z&ƳH&r C4k4=$mX>E[@ǛjMx_[(vX*x~c˃ct+u|C]K˸1숗[ČrF;?#!A0 cͺ!,q%0,:{|\[#(cu$ R%R`" pwHBKEg ;\Fc,fOHCd_%&gBA- fk@E8NY<5fu;NɺEVs޴k=4իu$ #FzC)9 pfJ.Wai%0<)Z`4Ɩ} 7q>d\'WJ)6%}^k* &[oʭW9<1e!F(U Niۖ'!qG&9ug!ddCť[!)3N0/ 2.J͆.gvt꟦kցHB@A2Ь%*fLO*A&hX%fsR-(Vhok:oEˁ59ľmpy.ؤAvDI$(. A+l>x[s炸F])FY'L碑S0h1I UO>q{3D|khhǵ^N09GhU^dA1 X'?>˒{[G̦Ddŷy~^*^?~'rA4qV'0kS`so`r*(/g )&GgeK30"Ś8+h vq|'Lcw?H<=dj/ݪ0IǎFgq@[3ȜXBbA@4%z;LQ#XڱVǔd=]GkYғe{CSy&<WI5E 9-V-uwp'Q_NvZ qihd"j"8Z;IR_6ѩ9< HQvg3a'I(x%}ؑ :B"Kq=W3),,?yw؏|OvW'4uagm  n { sJ:]1!qp˙Yԓ-H˜ўagػ&^ْ c ʔTPALX=%AJ㻚~s|@9Ѹ#i wnHU|]qH5BЙ7H5٨݁YDM qҒs1D+^\T9.U8*D) Y>"? p!~?zޏjze|S2e3\#Bb싎,I, " O$GiNWY=8X2lXPΐb%@:Pb »'HPIW%{_[OZ~ <ʋ/؁ZؓǏT~ƒ1"0mq.J FeD`,N%[Fn39Byj)N 2|F YZyI"TUOf1 wp:c[JrI bgUZh 2iov,gG`qcԖ1ixk<yRc]Kף,O3xmVLEg,nPA,N!W٘VrQӝԓWȤILP_̫\IYrW_ޣf2:F`Wv҆4>T]~-,Fk^a J100\],l?+Hˣe =FYJcU3, Au߰,x3G+)=f>V%˛aF"7'8_.# Ұg?ȈyGӾ9' y]!g`<0BnvYQ= ȳP + ڋ+x/Ky'ƂB.?́5js6L01+tÚʴg߮`Kˠ.ӟ:Y-`o,K.'LH @,nmP37bOY 5Nexx-pN{Lbt&_Fg'* ;.yXH)r\wRd} >ƀ4}(L+Ȑh_leiCkt|]Rډl gu*7N>}@A7h[5BH*]+5[M,/N,f{A7 5?i@ PBr sB ,Fid٨;șDl1~e s5Ro*KbWe)?#MAJ*l#΃$Z!utg!{bqR-wH UNvr鐾t?>Gt^pv W̫.]zِ0(2*7 Ǟqܢt4RJJj MaǙUKlU(?۲\w[8x.u*3*J~fD,U *^IgYF7t)6$~Kˠ,01%Ja -BS=\6p//cKԑ$SN\H/ 0d"̧i|lHCjiOOlx'j7$8P+W4O y?wYc"15*'ի"8C>_998KW7U& knr[g;G=oN .  o;fqKSKwɛƮI?*gaH9[ruBTb~Wf뱬l Aʿ8N z ėt+%ren/׶UQ_(/,`<{f$ mz>5m@헠bD]=/8L(  `o;T-o+)ng\Y^-i(̇J$B2ƎˆO|M~@V#?G5 ,p0G7}D6Dn{1P sgױQ @'fInUL6IR ~j:t/`MEy TxZ @9-Qud f6^1h#2ˎEiHL3$eSISo/,ۡ+Π<]@.e?iS10jС05:HID-{%!j\nI7eO^_ ޾Xb&ͽ  6Id#ޑEdz҉Sn,ؑ$Uc.z!e)hBLPE6P7RiJ٤EuKKV]#$X ͽYoy!c[XbTmEW'7Qە7p0n$vuAwB*?Lr%%2\?@1&i0NqYeS Pފ<qRkr! _֞.Ƅ< r8p8*I1'SH9^\hq8A,­f(rQM#!3AORdqtX|#j7qQYQy?TWʳe>F4lmvUg%HW{kv $C>n:9%Tt9!#oǺ. ~ A abXyGIDo~Ү*#<9T#]as" ;$2K $62ׇ?岚z5VIQZQ@0830^ꃨ5?[mF|hFfqi_~P/7ڬCtyL,]W,QɠVdӗT%yB$B*} !? \T7p}|ods-QA."( D.kTjr?&햃,w]|_5R]Ǯ Aet8EISX {dHWq~BVB~?mVKvu,wJާ70zr@$%`f{4x)̋ӰW,c m+d\o^5kdo>y Abh$Ba]wސjK:o xd@/_=j[]hz]駁E{L~E_FՁ9~7ja;nM/f! Б SP >+:֎w˟rt-FP)FeCVCPy:5hW J(D\aٳ$Rk9h8gom@}'br%^C1waOWW߹9ɣ:*; 8A^bSp «3J#Ufe9c%أr ei!;8b«98L3#\HSI 4UCY]}{!femt7ABLBI|bH"w?Xi Lv)ȀI"z OI2VI C}DZ{ HBClG&//F`1m=4O_vߡ c9La a` t,~+kKk_=s)JѤ$Em_.!{} QxEt+c2&"eBŎ RS99-|iQ_F ZjuŤtAI:}G6Ћ`ȴ_*g9fI2)}A,e9qJ䈂>W'_4|XPoegۆ< ;mچcIVIyP̃|o #xaBX0c O`D%|qK;l-~< jX+bƖ0.X%ZC9!2Ch"_jE"*;#ն䭆 UJq1 Zb:6ULu5 Աf S*R"-~3ɘMg8R`wP|#gP/*dNhC;ǯ9[7D^J3A@z W-׍#x<6JBR pJd `bVƚd:/ϲb@DNҚwK8/S_RӂzҭZ[NO*s5Dx|don{ez# f:}gb6 I{|D%c1mԅш)š2- #r' b4@m[j"%%04o,v.髱5IN:8m].[Hi:c@0@ =:5ga!3,"V#tz"H$X=BGj\1Bm^@$_7?6l!\eCts' D<1e ,N䱪] 'H5c@A ywMH{2FZ~co8_q.:ec 4"C 3. U4k5w8_FC R!hf_TH7)8eo7!CM/,e8 IhIcc;'|3g͝ΛCqp:J7vZ)Q\~aY A›%UQ~a>K"@kX0DTw+G_Q/*shqšF"/0ǁk}TiHZJSIS#?f܊LEÙƘZxN[C3>8F>.ȽqOkx8G͒ }ָ{0cM]ٱW˻r)Φ$c`a%#YNz91(rGT-f܆Qysz: i8C:uF/$(Zѡc3wt?)-cӭo)JX}MR) ɀm,1Al$v@I$`[5 -Cxɛ zl0"ٸg$3A a'üD`*EMJ fk p`ɯŚ@gz|/PF/ʵ+rs9G j8ZVT +vm')A_ޮ v.\`b榃]MTp孔OuO;P "#5ҏW- o/8ɞOi>e*S;=%e/ s$x"bxsqD# LurZ5{Ns2lcn`1(Lɕi( Tq9oqSGn8S,Z8o].CƖ^{ܽe׮ݳg+vNͮiP$ʢ۬PzNTa8: FĮun\Y:8kL (ФAT dakWo)< uᏱgϞ,+;dT< ЈƇu]AF(`b Z1-K$r_mtPa>(_<( GϢL8EⲎs-J ]/QtK]X\so+#Lo+GS}_ۇ{U@(NC1 .'@)#eW*/8i*<}@00ƻ}7" )/9q8ڭ:L |~kuM59=BɻkoF:vwcb>ށcB'VNs-ի7z'Z,yV΢J\`Px|&'mRa˷I4nh:?M+vI{Lbmz!  F+b I:2ϐTA._XwZDw1tqԱ\00- uZ}׿9PjOAt68sU w)0%I`K2/tt6DtvOPuHs#.AhBT`EV.'0_h ؤ܏~/0(lހ: ‚-1Zj;*M3@L9Gwy#0˹?UiX&z=<ԃ.;~| ԔW]/ ;Bl\\/ ^4b|abmhgӧÚrߤÙ/ƌQ^ɐy9]}tRo't-Fg~q{gva@k$PpQDƥ#XDMN[GlrcǤ)k1Hw':H,dU J!b ?B 0hCX~{s@\2CW2Ku|.bSw2Nv[9=:𭟔 ~̅ÝO'z[ cbv5$-N8od::R4%|c@62ݳ{*Lp A D /凳V@߯!zSyDp g1"ŌoXA$_iZam,3]ЬK,7 4qFD %RYXVe8(peq ;G?#Rp^h]^`YKDFALiIpoَy |i% fe@#E'bd#)O? ?;EM.֛:OxG!ec?A_}"4?ϜVHc{oďU^6Y5Ω(cO+4y8['1˝4 񸢔wc%D' O t[#l9=/W 9!0;<"t9|IGWt]xiL6B0NX x?iя4ᤛ#4p@,vvF1GƲ ՖP<\4GA _}r?xxq)KMl|5#;(jsZCpFXC6WgrpL6acqvy`v2 @ ,޴荙IAǿd} ʣgzxl )c$ԳPE[?yňjNŤ_-B2YG, } Ҟ6-P!*pFp`X&JI:B $H`x&H|&Gک$'lO;WhzR/Îi􌑋EEe4eg'\PPv6}J)Az* I}0_=fB{ a?xdI,gz"tenqӆEyxHT` 3nndHG7N=EB]bwȽhFYUC 5!8^bYΣG[ B^{ňqWٞ\,z[ *HlA |AW{2J;'X%bրú+D-)?YZ< m^R`?s[Y[DlVGx;z}7ZWaΎBcV `Lx}D[Z/Y1| )2hMe_po_f~Ѡ=@VMG0BrK+Ʌ#Az+Yݥݜ%յ!2KZ{~E2c}'EV8/کk:#K/ : À]cf앫Ώޒ-J`^w 9"e`3ICL9O~i氮 娘ݦj^H? H\ +˶EpFB{Rps=ȭ8Ѫ6@(:Cea#C0P" ?V!7ɲ(9PhF{"H5*`.Xą]#W~!8/[?iRDYc{~s<5

14"2뱷g[ɬӽ< I~x\//pׄWTkE&y>VQ20y7vOx[Sa:<"ͷGAtT";* 8e&/$q>Yv(.XBޖf OJC:p56"gftp%@{kK:P$"#C8(yV w2( h1r9a[@W`*ܻq+ мt *yoR*)R /9@8@H\WFEy(9k%a)ְ[FJG[&>]ah%f.99m 넇0Dp،% R hB G񙣿[~R ˺f:;^mN@]Ovz4B8+QRő4rR(Q^Kv :;gYx]fi;g6=ˋ<qGQ)RLມ ;g~Ӓ`,UU `EamRu]ߺQR~ :Qx3I;7/^"sՊ`48m)9ăWƘ*!߂[9CF `]qS!ZĺpoS/`18P$=zdd]F2ea7cٌb^zh:t} %_j3mSI2ñQ\Eٔ {/t\q(Uɰ+u$8rc'[Ua妆P]Aҕ?̒\7v}HO㓏Ф8@arV%\^ 1r>)aW*;D,gF Yl&(O\ѫoQc{ߩ3oް( :^Y swCHEKhGp%wm Aǐìܸ-1adUIVq11^ȂJ;{Eq܏ȒW^;Psɖ]5yڟqWL Jue$fUU Mq4jݲ~3L'5 `4ё}64mLI!GSq,~wp oʽě#7cі9D&j1FpN vEW{<\eٟ͌Be:~ ƩMq.Xe k_?y'$ cJϜUS@Oۨ$Y[I-Ď,^?eO[ T4iopA]q$0aoI/>s:'{Z0߿O>!SzP{OځW {D ~Md$ P>0Ra,>yVAnmuMS3;M{!{6LfQJ]ܺ2pt="=;˕Hi Mzf|43*0~z'pЂ5ҍ꯸||~z>CyiGX\3T4{s.2ԡ']/yufd\uոg}/Z%MKbV( O&wKE wR8@.6"u/'HW4s#JfvoFqԟt~I$e{}AKn 0])rX^l(N Q(R_#QE6xK$9w\+"1FпVtIf oQB^!/e7HEvBpGZq Kд8#4u)f`wh}bC捗# B\v 97-3\`(O%#=sN\?2d(a)TYRl}OBĞu9D_eyrmd%yEBz^š/.0`o'8XzBߺa?E[ iEn]e brwQfoIU>LE2e@*,Y681h_*TPӱxY-kY~nG`t8=4Pm:\ Ɣ\ _LMoYFj}!&$ۢU.AE${uM{K<&xP9[: J u'i<,6yN$X8uE۷t?{FQ6F^J% ^v#43~82bgK{nӚg ia8segLuϟ?TMv;;~?c]@'~)6pq XnLp)cY >~{Lk'i:aRb8¹]:S(Ԓލ@Fw Gcatp`8c8ɖ1[·A ㇁\Ņ-}ݓiAˀf=fE9$.k-xeg<78o9] Qf %3EᴱXB1Fy,s9.wy%7aTr`ՃoE^H k1D:* 8047%ޗB,o{8oSDWWFg?BNDA h D$N5 R/`(IC߉߇esBD1lx%Qic9Gy4t6 OZ؁ă'hɝ_8qD= Z($Ϣ4oT-p˦r^?ˎ(Ls{y}`9~=/G_\b ܂w^G^؂pJwRscTl?$5OӦg_tj!Y`JԐ*SA3R8fy^[JAa #ܾu$VƥG=#gkf [ <רPs/|/W _3& ;c̤Lica4"连3Mjۏɯ⪈A9^Ie0%K+,.~/Ή v,G' <88mL%{#Ո`b-|f uA<^;sPcƋ$_زpUfR]@ ]Q{&e`LAqivP:N"`:hcFSa ux";s:p|oǣ#p??eU!o^`h#Ƹ,s|(߹0#U`K9`vsüDf ]2\r@H؃mB l$c>F.F4|un,,AAE8 Xu7>ʊmmxpڲOl͊ ݾLOjD Rsd_:Um>eNi8x۲qʝvӅU-|ɖr(K#%z@H{B"[!vg1B 08Q:Cq Mw6u tY(f{ $ĦՏ.7Tz4bE[߃ 4@.]!J`GbtAT;x LoWKv;f`n׍& ֍8?tpï"v`^$Ē>vb%@ '(q_2_72ΫOXey>E2c<)o5'uM*7uXTՎ ˉD_q3S;'?yKS J ,dVN+v"oFe`TAꔀv'BSOcmŚnR{6KCWCr]b@Rݭiyz$+, T >zt=s~F4uh@ k]T63Au[cҒm^ZE#(w#b:q&o{%۝dT+v^ȓ PXMpS,o!b΃ei(^t7F Epatn ߤ7)as,P\=#M,I#1sɽjiEȃRq&CW*k{,WVATE'ԕCʴk-9['q @ƝOnҝy>%T򺳛Ʈk^ /9L-A q}15إ"])8{!;<.cG 8bZS.SMQƁ<>I2~Sp?-ة"pE>ܒ)nCBkU0rnTz _ʒAanIydEC "C q̺uCW3 sJa#zyTC& N3M3y0t{*ْN=a\(z}|MsknIV=څg|]o(ËiHU]:'0KtC垜ֱ}پ-ozƓ^7d4y)M^Dwӭ#ʦre+u?tܛ[*i$x%HnG mSh ̵3 6x(6dt5'SJsf׻~"}&Hb캒I%tA$(; 3`!M;I=Z|W ߬SAQ ?q Q|aW $JҢ~#9 2p}B"? nQ,:?)9'F"OeH(6aཫs2 eN9m>"t'WY< ~]5 -;BJM/焠r*es\[ CbmXzK$N%Y؅q!& }9vT9 --0qATI~&4fhx?c\$66dZ0+lt J?sc~R¼,g[Ol-lÍqa~0:G~(1R$݂!l7Prw@'}[Q[Yw\P&?áÜ8uX޸^ өY ? D=€:@0wW7ϾIãS ؞ _?9BrФH'8J0+`mj)L7'JxcJֆmƹ%*F nRf;OEd?Z? 1B!` Ӑ3W:TpfE(B|A.eZˈ[+&(hQc{͕zǔ5q<8[!d * u%3yy ݃<&L>wbH [A" G)9GåeڈwGjX<'Ƌ0z,#_5iWǨЂ%i /~Py>ӊ]@/-(#8vHEQKIi PQ"^RU8,ƨu&\MuO;ϖ\{4Q_D:hi6vCuMId,)sdhq+'x_5Y4p)WehYwD:·>"J3V_%͔8" ąq,59+#wUY%]\wљu:]Y) y$`ɀ{A_Dr\ʑ4T}=D4s1m3VTKRD23&^a=[%|S"[8ҜX1A1c#V *Lnx%Q0^=WVKy_ecJN`Z7:N@\F&Ȼ6 !l<Nj>@wI^1wL๴n 坡<[NtCqmZJrh3GPSxZ*-2QP[⛈'쪄de AC 6tch↾G&;p͂,]:'d=F̢@%ygU#J㈘fOn1 ĽޜPFˇ8;yS ྃnWvτ-a!M-0D6^B^YizH$-6Q2xC[q@Ru8I/hĥ%39 cGDY6\cU nnP|^s(osq.|7˷dcǠF}/w [<"u@3I#K۸)cZ;~KQyF<|TӍ^_ssY sȇEL)"[ZZGdm$?r qIIDkiة/Kc,bMFb %\iOձt168sP`qΎޔdt]5(8V?mF|<( yr;cVR)BwŇp^Đ$lUAS0VhK='9?=7~j*HrLI,oLϑ `mgeัV<'u׋QKO}<], \'F[_A"\ z?SGy0fV"o4U=͠<V l'C%[=~׼{ |\Ewl/9Sxфaqv.97IB5|A `BYռD_@oΝ2]|.ɡ1 j y3jMegZ[C<edeֈAN+ډc$ǘ=Q]Dfc"` ޻:OhSc4 7U'? 9aQ!N?EҠ9` }ʻ {gS >6KӆtnLo/S+AmpG?ȩW::vH6 ~27bl!ڂmHj)7KDYK%<o٤ "J }Fp'e#y␼ 5!ԍĩ)XsQmtmdR^_]yۅu1dPHj.1{uCݪh8gbF)RҊ8(K=dPhm@@[o( &Ljsr ; S}lUnjɽ??Ӂv=KKKwatUإfx'BNꐲŋd(htV/@((Sr\ K|RI0"ȩVc$QzJ=oY."|Y5p`]lQtgWwc4);^j w0h8m%CߕUOn*$)L]~~\9xcm/l77s)P0OXu[iV$s6:kNJ1.'.6-ktP{P0w\AuW7g:6_Aמ4b׻ 9wtK{s}WT =D %y!#˪m}MK,H<|1L//1A!X.FՂb+ZkeTjk'mF(x!],9^}sO^Q&ُ^]qˤ (9J EI(&ë_3.rL?I6_[ 9$5g~7ˡW,נnѠYJ&Q,ȫ`l LM$/\r1n^-ر}owbyhu^lpqCA\;λ`7?â%<\.{Y TG`=JΌi}u"嫰r F Yӽ-d}C_LxNz9 DŽ>~MwAt8FsM.,E9̉>rJ#GPTU^*Ba(W:aA Q 'K%5Pf @ ;͸B_KrӵUsQ9St˦,s9`0o:BD|!TC[x']/=>aoa=`n4 h녶JA(hKtض~DfL7ϛDFxG:g*.GbB6$ ~0Y^5=PS@U6AQz޲ЬP@ !  J#h(2 l"@;IPj(R^DA -A6UDD * J(DDE@X",$PJVc|AJ"XX JTiX2HV"EDID@ * bRPQ*1X!D$EDI$ V,E* 2("( @E" /-G~sǣ}? _C+aйg ߴek>o>mą[ 7;[ja!z p{b62mu8V:#;d"ֶ<1ƔArShaiL"uTە*NhS&\+ڏ53N"J%^|+B Dn,PB~OIIZRM) "RգVW¬6WU[ڡkKx>&u$:lҁ.)%>_]i+3"p `62̛^ mڎE 7lr Η gLacbP6=]\JsTISu;/Q(pAqhob+.Myv] vU2Xq^$Y JTw&~^iF.VlZ0B/l\A4y)=| >A 04bF?C5N"?SLYM*朔;1_f)3ב!ꪥbx*ָ,Q_*.h"I%ꉡOz|@RĦ <o`uSQzR ՔXL[Ih u/  2mtjtͷ`P dcq'y;n?22Mמ[=_D}% 0^{PGSEv "DcXQ?B/sb,E٦C‹"/c4=j)P oZ_*s? m4D;: ,^~' i!!Q8j 7T DDo>Ƈ H_rC+WgCz0=yq17Uxu]7`Ǘ;EZ.e+G{IBɳ0 YC|@98> Eȫ=ѽ{ɀ6 orz~HVP\URbT2ey3++5@CzWg^/xngW++ܜec˪!ӄ9a5c7By7Lڛ ш6X]e"t"CP(i^S|ĭܧ8C<;q'zl~u >tCx[򍎢YBxLrW gCp >!ӑ41p蠡ŀ~Cx+UmG}u! Z(Cw"T敦w)cfn \` nBw'ͱ; s Q[86w%zD7OhWW+g:xCŞ1a>!5oϲ'EyC&1m &{G\xQ(A8`BR=Z긳цG(큟׉Pƾ¢a?PD1"!ڀ",E.,X\H"1"8潇+v.zb4 e _k6&jnZD=6d\|6$$6oi˖<(gO7 ` j6D;)w"!h}6!r!$"끜]ǜ k^)]_`!+ 5 J!D;94@40Es$!+96/Ҟ Ͽm͈yxg[m D P rp=;@w*\s #riTnvw4,\G uybdu3!f0oö*(FpOV),茵GTǡ6Ơ`4iq;R ͙ Å,v/9QzqAo4xž.\!^Hvp+PxT0Cxb'NX\pdWnKOv2.ݜB8 )[x3[(~ZaLā[>pi&_Sf,Y0c1R;K L ZT [ 3.Qi^ߓ׶sFN{lYT e~1_Ͽi&/ Ҽ4Ϸzkx4Q:c|}Rgzt߁^@6'v=_VLH@  ~5lX["\=PE. bB c %D@M#Lߞ?ЩΐzrR!fƨ*ObFXa<œ,þ:^@/{ _Q +##~,׼F[p'ߺjȯP W\pQN^Jܯ+ྋw2':3NYۃ${pFyS9;"$j_˰ڔ.T0bC^ax> @6t~& XD8(c,b>ù`y]a溧ٛ9M#Jv\º>5y ?,j. ć^vvI y\W3]Q߫ğ$tw-װ;=vA kއmkr-]D46ɤ_\n=$d[{g+/ݹ5Nt5[3^\zzoWs "/S}0r( fXU bAr,mȊdbΠܗ#x $(DŽ+/vrel3isu<:q j&8h@Xl"(( 1 U4ӍX)̪YtN:_d{=V#6`C<,Bx.;~ €EZ/ǟĈVkʐM}=`XXJQx4AUuWT P5WTr^dp2 ptO}8r~ d"8@@"9EtLq"a =F7}-h|M")SwR lvsSuHkd05t[%Td^R9|4zeVI`hG|? ӎU _B OW˔HlL"P"r<櫗\xQī@H9wz V_r>9,}mmC.uM?]ֶC#_;^7~Ƌ;,j08Cwn I‘T&> Ro@SЈ+P!2djIB8RxBE;Yp^wTCNo꽫<{ī.RoN EgQhy\QvU'O蛆 Ti$%.ޒA иj @*^ r ȗҠ&za *S aC|~ * 5"uCE-rr~|yR\grYL ?,})-G萂z]r/ݱthf%Y]|Ar[qQU1>,dw21F)-6I4a 2"_u(OxхY+,_sn?M 7qڒe)`s/?rٞc=*Y *,+VbR--`Kh(@ c("RHRD$RU%U4U(SP(zb"թwkB;AiIJs@j6`wgݟ{twLT>ks;lC[OgOcPB7w!aU;5TЭnNu5o _nN@nZkaP@h( MQRy}Pu@(;yz;`*R @I馀2fb4dm&ɠ53SDFډ=&ix2O#RxM12h(daLF ɩOHSiM44 ! bi$z0S& =#F=F@RJI&@Di5?Jz&M3Q4dhhh EHhSzP 48SmK1\uQ f&eʬ3Y_CGcf1s5lPb,+e4^SqVk&Zj1aeVmm4cX2cVZ110ccfdV,6V[A+1s[Ljm&UMlfٙ[F4&ʦL̋4lFcRid|hoi<Ij-d˰"B†bM,[4G棕M#Zh qNX䍣re.Xe 22de6hL)454Vh ^:U<~1#v~zŸ]Jb04U2H3>لA-T-Q̒G#z',rE4eFa F,gƅY5FTbdJꌺ+3 LS 3 LL&ddLa2112̌LLLe3#ddx#2229K1FR22FFFRb1jThF6BI$"$/ӈ?x ~qGK}$62uTsR2,mYP+j̱X̴щ8՚ߐ)S/IXjBc2ZeLRTA>8\Ͻj왨SU,UO:R׿CM?'u1iVbj+YT1+Y[FF1W1X8c)?DHfTEԼhG`FFET}h2}i/zJ=C#G:6XZ vCzCC> ;:3^ L;Cy(yHȇC^Q3-sPtAx)h #n4yfW^AJ7zl>s*XHWĿfr?Q#9#||DkAEG,{j+mVCCCCx(3 ١8SF!G6)Fu#WGG8n !C U4#b4FOm~:=7|+=uqH[^F&z 4B A>BxE2BQTaW(Gqqܧrp*>ڌ'GiGrE" ZT(!Ćy#m :?E?1Oܾbwj1⡴CC~$4hQU h{:d;4xif 9;;5vHsp4M\9)Qx*<&yZ 2!#3uw{7e!!(ruhxs_x{$=۶Bk[;H!QRID;<$3.١!!P!9{!ӵbA!#Bt*Cuyv{<<:C!!C^~ٸHg!ii dT" *z2 ӕmٲqduC>!oܴCCj!!TIf::Uѯ[T\%Ka CCCCng_0$jhc桎иC!PsHk݃4#Clж=yshW/ufUٶɅٱ 1cl30l5L,`1EIqB~b`m@UlW1GbD/MD}Q UFs*fS舽hCNr{Tj9SGގYIn#̔lhѣG#9SCcg*bVjaGOy^ebҲq0;'7k47' M2yQL ɓ=G~n͎uԲs9Y*s#r=dsƧ̍*7ȨiF>ԋydb4F'S3 CwdDBwHu҇`hsz~T?e 1PWMQw\QwܤR59Rʎ$rᔴ&~+j*zD>b G:?EQKwQ#T~r8b;؏z:ԼCa^"eGy2>FiE>h(ԍT4F?ZG(4{j;[VF :2!|u|dt؍bB=Tj&*Q/9 ɰOjԞį@W{d5뜚gHQv#vD^+I F6HVCph(v$|4=bCB  rg!Pt=T?C!$M4JE)"_|>25?GTz}I!z HP:,!fHQ<e (Cȏ磍#7(lR?$xQm&b>l;[g;&[709C:!!QއDZ{ü֏GTw#>֏evG4C0жd3(o!!D/T4(w+_H{T=-Yb=dx|;J_`5xFHu($NY!}>-"G}UHx4^֑֝b4;1?GGQg# Խ9r:QM еv/BHQ\B슓CC!!!c|B90^:"w^SGe)m[\Mv~;rmq"|1;(إ^B<>4,$<\C|!}*!(QR?GGF^0Qh/q TnG ?bSvD8G+C#G}b"Hpّ~ELC Q17a:t}EO`#cN2b7Ua#PBx\C > #a>YBDAxWm׈, l'/G}-w#r6[ҍGF$nm% _!s.]ZI/럅 $Y# o#a1KƎDLGxѴj6C.O*cp&[G[BjBhjȅo>H^5k#@.:*9Iߏ)x#U$pr=7S茕tt:R& I2Bw|``T4/c%$[imG#zRz(;[Z#% f9$M‰ RC .̐S !#H~z>PGyޣ}4`~":7F T2пֶ]DM1 !~PD=5h`![}FhR-Z~Kc=-RGq G>B8Ǝ|r8#jh&ڏ|b8pGG4GxڏTw[Q;6#kQﻎ<FʎTnF6jU踅Htt!̡hm&Q5zt#dh0V5YPմ=`F?zꣂ;H1GmGmjjsZvzu(e!xݡQB4(gC$3 Gyq#Mލv(vE2Pա!qXCTnFGƨrmq"hycߡ In(vʞ)r\B# oF:I#QM4{$jn[BD.b>Z:Hq#k깊aF?VR$3mXCD/лCvАuz<Jnb؋4',JZC4١fQb6#QĎ$th_фFoGj?Iz4GhލQr*hKYBfd!ye 4>bFďIF26#a6DxQt6#mG)9-Y FɭaFoF"?##hOb:ulG3b8 XF#}0)ѽhގDlF9Ndq#-܏G*6mGi#8ѵKi^N7r1)G3|}4Gq؍>:ƍ5pF/#.VpXڎwPHpFr48܍rԹĜmFr?j[ȆRt#qTRGZ4G)Nԍ$*7#b{#dr?B|b?GFRƍD29dmBlG21  !Ńd=d2D:3|teI?R]FPCC!㐴tHmFD!C$ą2PH= :>;P(#dr?<<;t0Pwp{H*#t?R~c룊!؎H#ע:!=i(CQ?JE#)SXp#GmGi0i 죕Cd4>,? !هvH$hH;d;u/ C$%d<1ڏ>G*Ћ!P!F+cJ5Gg122'1'19~}b~}D>3ѡGi5a>8~6?GkeO##F5wGb|?m IwO!a D|vDt4#Gѣb?E#Dhmb F4a#G#Ħ#"?9F_FI4b%dbTZ@LJ&b+ `&IȬXTb1A* "FSda"5JQ0†E4{5MNuLQ&hn49k51c1wV=;SGfK2-,jd06ō,ifՔ}vs jƭ&Li/q,4ymƥ3jƵlpk#f.ay6_+3n3bmc'j2l\ef[OΜ%J>>9O} Sjv:Os/|:a/f{-6iZ2je*e3QFFFF|PL Ȃ!ًcVkM |xe*ayZ6Kz1hqf`̰'nyٶ&VM0,cw 㙵dզqw`Ɠ^4M5N]F5c19c[Lqs2ͷZ}]yZc&Zm[Y|jY1Xk6c3Lw閳&Y6,6i~ ^mOOl;k}6;6/lv=4g1y[s:1o~k1|b>=:mGt~kxٵуh;v^e_[ъƼmfllym6Vճ4hmQ7g'\cNYui3{!Sfy[~=:Mw圽͚5٭66fϧvx!&ݶ1z@!X hd4e>>n:K׻ ,KYZb? $v4[xi D<ʧ{rn7g1n 㓝# q1t!'ax.4${1_ڙ_7rVdA_!xH6 ڶgH"Z5v6g9]>jUdxc%|Qvxd=WqG#2?SoJ|}h# YNkdf6ڙi ikWC:/s^S㨶mKa"9I{%e_#(RTFW8#ш(~ҏF؏#TwR}4x}V "V(ubڕIJu-aD6J͞|ƽc/ _&GъmqY4F(1@b?_{_MHge]O[66llٱcf͍Ozߙ/d2AAXd yItw_–4jxbj0ʹǡ]\e6-W6[w y Wg?&-Q{mmO{mmǻ@I(,`=FhritI!hν뒮 %jlmaTBCàV/K,bnf@_{ѹ] !3b2%F,lQ`*MS݉uVݱU%AjS3;:#i SUˆ9r׎NR3݂KA" SnAu6e,Iڌv"qmf#\:4g|ڳټ_=BIE%TaTo b)B[(V"$ p4(-Q(` U|.mוL#Sios9lmV=\Jݙc G>kѤ2d:T~0‘CM*XPiJThD!1")e eb(,=2rxJO)M)0TȪidȪ  *czaE12lZU6%GG`ꁅjJ41YGHq: vF25\R# lRE%ruYJq+:U<ٸ8tA8N$׫5pP8 47R% lqUb<#,^#\ҙxJď0jBP̂Wӊ\1"8)Y5yGK1lbo4 b(!T0uxz0!@F#oia4 3[fZ0xiU.4(H~z{ȷ0uƃANJmh96lRJN&+Z^x]2I,c\yIؼ BMIB(4˥F?zn\H]w~>㮍ZrRi$֑!gJҔy7LC^`~n IՍ$P@@qF:qq$Zk)MҤlhg9:dΓH aF[Rօ KTV>&}mzvs͐I#׭IRq@Bx#n6PvYG:xd Ev֕KP$+еMV =7{s K${&MQ)Ot6T ak|1mz>iL5AJ3K$hMRѪb0h VVcTj2h(JOr TȚD)*0&V*YUTCTb4dY94DJ(e+PbdeC#R2&#$fYQF*${5GM$&&,в4j7))”[r(ād`W0V XS4JEY]TaǓSDaZe6#S) U-[Ff3f+F͙j ,f2+#)FReU1- F2bahS1554dhhŋ)4j,[4#JfƳbZYf54ijTYTRa++jVLR&RcVm4lؖXjZ*+QFڶ#6k*`\bK ¶`3cK1rH,8 VFI&Rв4ZjܤkB#QЕa\)0TW2dX0fUaii ٲh4ԤSߦڄ'ȍI)E/c#>R1O4R􌣈I=<(NQ?F%r=r1{1ğ_l"T lm֍0ٷ!FZmD7;nӌfi4Mj-#dơkR&i%M4l6F*&Me, 3UcbSMlmLa+h'*)POG!MVQі+,2C("ơ1eTɢ41SXU2AdjLdfJdf BeZ@Ф(`ZS")eb1!2dVY,LFVfI25L WЌ)2̈QA%hԔT!b1LH4LҲF`1Z jF)j2UK*#C)ddc%2 F0SXR1L!1 jXecV5YifV2*EQ ,A*3FFѩy2,2J?bӡziUűtc%0FL4)T4YLfem\gM"H; ]I4#{ ؎SPʫ2u0)eUD)&jS####FUd33##J Wr+ƢF^4GUȇ$}l ؛s䳹s:Ur-&sfL٥vR#%3'-C*tܰ4ZVqJ8e2VfX0%9V&.Tv'etlVEW;EI;+rnQʪN9ꍤ9ܧsVF;ӑ5&ё$thM6 aQ PG#!iKSiX3KF/fOjxG x)hL+-Ś2j,YDhyJ94:=23,R#9^(QW)x+s f1USS#F2ƲkSbM.*),JXeR2I GKzMW;7"n1mUb,,ZU4ꖨZ&%q_mFf_ Kje3UQpPd5,2n4!}2vt2`ș0QXѸTQhw 3DF#bG&gS`+HYѡ58`(R(6xho8Ë!Σ#qsm8ғ9NWdMok5XY Xdo7h͖x.'T& BdÇAB! d &Y`"NG8x)1&V)& zw w]I#FPYEAQc:> ssX3f}E0J՛1#0ad-[eɑjQsFH^M`CbYD(飉4Ebk #ؽ3>=|41 q}w\upn9)9S|GSXe|cKp}ژG $}0쫕ݫMuq#$b=]D:úm[EĎ~bŒ00aK\XvN8(;>wĮ s G(5nnkV[վQ&'e)YUw.fV:9<1Đ85f2ӑ3rrN`۴p`H"eQប,@2v;P6*g17@սU(I}ۡDF!Vܪj^*>{A"s`P{ûXs2IŦ݄l%m4=BFF9D:B"x3mK pMQW=S~מΆB;b^1W64HB$LIv$i{Fbry}7o ǖdwsM-;Nj"hs;H;7x:߭R'ݎ,uN+ JqR:QI *VʱDU%P!n 07V:oΣwV2j* "te0 F*UwiY{`${BC:RIOTҜrCJJQ)EK2N^ o3uE5Cyoś<ӾvzYw[$zK8B"2K,upz43ޭQ ݑ#%eS&2(2de JI}c GgVLVs>Vμίzccv^wiso:8|*+ ?n|=dGW%tIc(fK3ίw5 n{woeg"I6ޟX$v"nUiay;ynM$8 -s8\{dqiJR9'a¥3]l[ؘF^!2I,'tdnQrH#Wkӧ^MΏ@w{H&@#u Z"Y%ƚ1&Z9Hz;4:39y|7wD{wt}ArvN'92bf4,8wyxƌ,)\i0idOCdoUHܒB,0bꥆRJ&KT!RKSZ1+bqriL@T)M{  0 |Wϯn][ٴ.kiMm~_o/gaj÷ݑf-&fڏ1ymî*_WoUҩeDpcLa\~-{%[_->n嵵dwUϳ-Bn0c1ox{gzFVnܶ;jsLJkVR䭔ð$.ql !FjED%/LEh+|]32fo.js_T`ͭsllvv3un֩4>W3MCF;ySf›/Dϐd:Fl8 BLH/,ٮB\qٶ#r`5ǓCbawa b1u|g򗡕 ifeQQC.S1}c3;0Ӱ3ݞfX'Up[mMŴf;ȅ=ytp.=DžHa kӱb,ِp{2MWq/a6= TQg~J-ڽwi6>Zm$ܨd2 &+Ê>yoo}OM=if$\ŷ-\NTE쾤#1_f,Sg:fn8{dm }>d vŜ l;&" I7K0_d]WHN:ȏi!9wkEz~[y! Ahw^ H F'fc qڡ{x-o;F=m 4nUi늯Q\5iMsTR&[J ;2^m<3ooz66-2ͧ6슛a!9*BV#x{!6RBi-M mف%hٚM4Ҟ0VMA#]Goqcો2䛸0͜ө9g̎Xj4r7ݵT%X۱[gV޿WKz,ASaʆ2E^w56l.kjvi87ޭO.bu7hW6w b y6 Z{5|G=oYǦA`Uv:ӀVKLa(W޽OW1y5Ci6PQM]!Rp1 gjxNkRm]eTԤ{P#Byd0 (02 6;C X3 867`Ks\ ႑3j]Y,޵흲MLwoF DM \¹)ɘJDX2pPV恠e'HUh;*I$fƁ(lhDjpcux5DO΍zk m,vJTɯ:3`а8a@w ¾ݕgslPk5uoצDz: QjLd#Hxv(lrB]CE-@^׎LrNQ+n=Ʃ01lf̚V67waʵnzeY<1ڳ|JY;4lmmmm5bB:4(ai SbvYeS6JPRg%ݎ;uFM1͡k&Ӑ&rO>*z&Ks&4fD٥6sa|[6UT'οf۠fdcxiff61[7&_5ndL m!#l#AkkfliffYkma_W.|YIƼ_7ޜWRq'z5-У\S2thq|,rcy3+Tl;}m*AXhEc͉I3.e*{9%kXe #^,cPIɹ|rx۱I9Q I1/|-$&@!잼 1:-5"&%_8{{g9+Px}eNDw?ϗY]`$~ m`l COEn2&+ ihO {ͬZlC(]qÑy܎%M1Xb%G G% XUAK1xF ^#hǻ/*KP/1Zݜ̶ԞU⛆"4Q3*Ӷs5bk>_)]3"c=Gbiut0}]SKxt$l$!)y_iS(ۜA hg;c!qśAq]JI`ýbݺU±r˅G--?Cl&5XHbtߪK U66MvUT1˳yyU>{Fz|aę.{oˏߜmԆB@&6i6;=9EwCx3Zl3e*UO[ncJiWS$>fude+V'Efݭ' TZ^ޞ?S~E>e2}qeRURHaFVB$6iq#<>XabLF"b*2f,LZĬfLljͭm#&e5mmm1B|1r s?cll it1We{Kv^&ɋ WcbWh<ǣ86^m!LeijFyEom2D8R8cHڔcxH1]Z<>VB0dLytxc}km$h0Y:TltA Jvr1V`'=^ƆLMB֊[펻Q].\)]o1zi]~W(zyIU|]7>f6u|Oo]v|ouuo8qɺf]8iO\imcE\ݹoW5 2/lv>M~+7}-ةĕ2١fFA@Bt2Tt4K1pk {+M6IM@4[qēI ,t)M/;p&2'ga:Nʙt6ij13,6{ T ̡il-Ë.v'1'z9|T4,֟G:UόvŸvtgϵܥF3XM{'6vnM3]d6PfKV٨1#s7GȮ׎m)Le3,'ZUcIb<#C0]E*`S(ϒ|QHsGm86Q|[ޤ5)fx۷L2c|*QYȹX'#"jJ[{ScKEI'{%fGK lhȁc>73(sYM8 +|w:q{b}[}ӯBu~V2=5d䵀js4}7tU؋Id`hҫOww9Mlc'-HZ.î@L7~1*h@ژF6~s#y~_Qy:SaO۷C,HX=pʺyFoSG'mmmGntmLjtYAUIyݗXE ajW1T ș߸m8)ӌnϢCSXّs)dGG%;x[mm[mmY42n\#O㻼 >vl !T)^@o,1fkBִxW՞pu>Ezׯx,YdXNbPx#_Z[A<>(mr%8~F>ɚg_7t3K#H{\ $۵ZRFdT1e2ڸCmNaױC[ZTW|\dkt64 t0bxq'jޗHXrҴ46$0dAq2 {FTzhņ]Q&yQ'5GhSc5&)Lrɪ><'?Nvyz6Yu=mmy!zd 6ŝ0eo\Vom2 4-u$v\cŻ\ƚ{dn=D7'>j>Rky3c)S( qm;Z3,,tQFlv\춈LX'_Jͭۘطnɔ(d%Ll`2FFYzKOOfco,C @L͸VK%n(AbC0@Սf1f3"F[4椁*.j!$Uq%aCm茦M=m{%0ŝC0cz^c&.Hwsz߰?=l'b9: nˆZ? 8" Mj !"r>4Ԃ4 c(3 %Rt uGd8I'W?+űOqd+mnJz ޥd+=A wh$P`GpwĠ2<,D}S>K2z=<qťP]\ZHZ)x*q4(2FSe4wkh&5y,7`1iI!12$A LޏoI ,ԫK-4dU0Xfl.XV,C#02( FC6d0kj<-Mbs5(cbf" AѨ0_ώs=|]0 1IQ22ν侽S{^ia1;]i7 U$F+;e&/z2c4hGGާ5f It`LX{۵}ڱ=řR6*I{(dݼGƌaGarNMf*vͶʹJ}\>7ydqq%Ru,iM8ҭ^|OX%R}4bV*33<ξGSvCF4)sJ='9iu;Oeti)HF㷢- J#]Tp'ǏU"y1Jׯ=Gx%^S޽w)y "7Dl>#0̌9Uudqm#ٔ-gW{zPrZčNsr:R#z܄—*x܌Jɢ&R-uFeUa:WZbnܥUjdxG]NG(U59919Ch,):QuO#rr3#25)zy#<¼bZ/)F J+r=5LC9QvGK*dudbjV'ǯKXUÚ+e=5SjnG#%˺hCr9xUkfNZLEl̝8 \w2w+Y? 3pCO#G|l3P#]N!aOc5(>TEI$?G$lMYig:rTi4&INQ"8DttF’CRC`Lc4P۷}3~g}Vy>+6ՊUXԨNpfOiϲe$6 1o( ʸFM9zOV-4-Ixu1'5MM4jh$f+8ѻX1Z(Lfhurx\J1 QDjVǓZw&l+ڕ'zsΖQWUr]I٨xFDӱ\dWj*k`ec,=Ḿ?M+qs(IxpQN3G~ts_C"5?gqx4U44_g5y3od9⛎+Z+῭ww6y``/g$vE ;nztg<~x[з vܔǧO=$M=q&`9iR]c*9׬U-E͊0 Q8"!ISX h_KSGGG"v(nnHc"c[yk4a!zTQE,V8m!D595([#P1\޿~#U8Bҕ UOL/0/(/iQ|7~:bbbbddkQI:#Dq#wp-TQG#(Q#r]##FmFjQ7܍FGij6#Ga!D-СRE f[|있^1TqzoPӖIMg *95`&``,0 EeEAfFx܏j#5jHtz9I5%ub2T9"e,J,_ֶ []_1ʖS`TlRa-ȟ綠}$up3)0d:dq,REqԭ,ˆI=S  >S-˝ rF!`#ɪuߠ|^r#4U >z59t^3 gjv`l)ŦgE)"kKGyitR %v7k,lM~]M22>oskG,~dxF{WڛNj VÅk\GmJ ]^V0A%B|ػ}eϧ!3%Yi-(:EJ(N EwfȨؼCRf1` 4B-!Ru B衠(iǰ<2eQhX '.m*z\;d|.|>ڪSH{Hr)\nw!!38>Dx'"|PV'\YDNYAxrǛMKB4̴1|!'-&\Muߙ"|9‘L)DЃ?SfxFjҟ7;[lճxJaof(>LHF@!)@k+po;sXϒ.rN*To_n"CAJ'HS':(S뤍ם!!!ijhXXP\#rB 9_  DHv (WꠚA /!HwP_B.QZTqQQG%!Hu١=K~Hx.Ո*-!1P!aD?m!ۼnhTһt*Yc VyTA pT)$~#DrtYz֡!vD9-={B9b81Gޣr1#hr##ȍb=nFuu#Tw?F'߂ *c2_sjcKsCPO;Lv S_iEDKӭm$v\+;oRo~MgӎsGٚzw0 7dd^`892jJb gm=\ WGvv)+ӓG"[fPw+~pNhrڢc?pFbUA׽#_4ޏmVCJи-Q#~ <}=jl)IQ-+;OS…uM4Y%ncm -d̢5zu1 kxjMfa9ː((nL NCn[xjj$œ7,RX8-=\5JKKVV0 >>ӆs&Q(fY:z䤻Μ3n?am hܙs>=yg A4&!Ԟ/Ih)1 4+ܜ{ Էzs|_&.'g7rL #KM?X|i~*|PTQ<:Yu!M!W.KTpXMU O}c70llMCS^>Gޑ=_)-]Qe,dj5eU]vHm66mE;|!,1s%vxA D=)!:l+G _j"F-7U;ޓzNJ<9n&wGww 'J߀"y>[f*N̨\Q;'R>{GKw#ZNS1!|?V7~y//Ә?OO_~Gs/Ҟy'zSl/=A6?u:l[<5yl͇rdž=(C|ѝ;W~T[OX%- ʏw K{:;boc 6wJ*xÆc_Y4o?[oQ}"GTTʙ }`+XUsUkMlA,y;ODăk+0jȕ)135RkSm.nl+2VW8&dԖʒɥ" M%ZmI "٤n6ɶa,LYh=-QbU3(,1e뙿ic,|RmRR*3uUmWUVÏ*6MYTz0kO1D֯ L19!˔̕JV=ݼ1LF~2%VRZɶ <^p~=~;G7uaYlɀ 2PX̔f*̌,X 2Yz۴iXCZ jvsZXm b` d03@1 IA#!e`HI \ I00ͼ~Gyowz8DŽ~v4=VG;{&` VR)a* 6b$&mY-J:ʱګDm 2,U0d,a2ɘVaG;2ḀVm@(% H 㽤i^|<*%~Vn0aV|-Lo7MgO @DA +5Np2Y.]Ǿw7979B!Usw]K4Ld(}7 G-ׅ5^&[vJ='!@-`u5Q~n?.s<}GwXG}RG2q#&^E<PRr];s1F>ī5'r}vkJ??m : aZIZ5 MBl5)Z5Uc e[MC&T,eO&@܌nbQnRBmRT@|P/۠{{O6N0 9{h\ʉnp7u WRײ ?\ l9v86& A@xyN2DkςPֲ)cPLk!Er< ( hx㞀w<`jsgCNw>-']r\` }w?AۃsǕvSNoݐhޤ =sR_?]$eS1 3|03MW?Mx {ۜvAwAb#?I@.pp;-GJ̀D9uh^ 4y|ub̀+@"4|/VFFUE=lk0PeA9!4cց"{wl=3]wGm\+vE*.XF?;5 n#Y`d*U,Vj+Ddv2aS7UZUe fjbd54U`1VAVaŨ2ګᾃsx3F dٍ!D4MMk f[mXY*S[qclK&UWS&..3\]6~NO_۳:viz@0Mc@;zA "B`t^uw+/o`>׃l(yG0<'{^F:r=97wEI쭋F$P# [Yʲ`+TwGMRzwe1|Odׅ)))O 2h 0 R;4e{Jh_f|r@PA.MsSi4,`jUd0L\m"_! j 9Izz=+>P x?'~o(`4p-nZ3Spm9h4Roc PBoƤX|_>_qz_w 4|lT*aN(?ʗtwPQvjBQr%gWFܦG0˗~;1`GDH$#?.}' #Gg,X,i-^_?_^E1AH Cwv_ewzBs39!9e02mF| eVkm.rԕa$X\\6`"ji^Z~&-18rH. ރF<.dr "3T2S&^eߧOgԙdL+֤FoĩyjJz&\RpsA:^ {Ɍڪ_ )dLy{ ;9J,d' XFqD焁y;@_gE)N G&9\-}`ᓀwTς+YXTB/+6~y> Iq%mgP3I6{6cvnoZ04@>hmܝy|aZ zR&Dxdru}UkƘcT }^j HuEW(ѾŃu0nȪ$j]a O!sZߖGAu93¥s1@M} ؃:a349B%14ƻD.v _9CkhmUl=pR4d]Զc\Kx scM6gBGMkr124YzFL3 $Zfä[59 r\CD qyՙy EBy>Ary`K^W b Vxl td*5EhF1iq;܄Hbz㻬Sܨ M١qWJ҉(Kqt#$&XGϢFʖ9|Όۚ]߭dȑ`%+ 8ѳ~cIyƝY|\5czMtF: /=Q=oiUATV )`2$0 3%L}mUDDCkl4ibXde&P j2Y&fjJiYj֭YIKLFU WrT/FiL61ܲ-kf2 WZٹMTestq3jlh:HMrBHNu\Dfha n%~>Pd1s 0{,mұeYbk6b̮gvQ,*\n '6 `, 8J:AFfeʈfHL$Vd-kw)K&칫IfhdآrSr𧻓#z)dӆ7w1 iէ7o2yNIJ+ᤩ6Z"E%l˛.82]qk]+n\]W5l7l6mZQҰX3'YeZMZl fpΌ6&J<8ߝL0U٨`fmmw/>5mJ/sfJj 8kYLcjJj}f-\cvΆq֫qʫV&2|KiY7)RXVvla\lw;f .`փZkZ k <0nٌm csծ:lƗs]Cd(!. I|] mzqy}y+ }NrD0'Ama}&RyR)<)?_HN5wx[Eъm !.Ľ6ooMUDTVUB߃\ҧ5Xg`.@Z_;16adٜJ٭緝("("("("b("("("rڻk<':u*vEYQ1}2c~h5OΞec~&+L1`xd5?ywV'p궵n UWvsK·& TZ41VbYM  Y2bdZַAʃ|f3`EEEEEEEEEY0f 1pUjT+ЎuIWEW]@v8[nnvx{?ݟeuc#y{cY1L3K=.b)qVA`k^gP&J&&B-ҭ8nfEr蚲n[eUl-e\dgdEڕT"XJlHBeU&BJSLQc-KQ0UTdP5 M67F̛6&L T&1SdMf!0MḻTјmlɅ,V),3(֨ctLڻHu߹6N""""""SUzmVJZsөmUWRZmb3d6Q1[ȩ(fJi6bmIhXk5K+)IB- m*Hȓ`(Ql5ؓLĊV:1W,NntN6W$:08dubbr$ 7jD<#^UMAw*:1|RݔJU*po"USbj h=[vfS|ͦz+-DXȝDSJLʌ ՠEeBժKAUU#2v9ZTY^+8T$xRԏk~, rF1Je6`fBa e  c jL Y$Ѩ\*& Id.¬e´MZhًg.j5ƒKn4m-l$meHd 4 'DS`Saj%M%F.jZ+s9v 5emk3IfBF9Ϊ+*[mmrk](fi4NQun]Kr) r2ֱ(ٿ(16d4[c3TY-Kj*rV,[I[8-Ee*ݖXM5q*6];nWZ W{(`1`2AS*$K!RV`2 EL0̒(2#,MZcv`EEEEEEEEELbTo/Tf,J6 2`"Ld#2DdcC05*4 2(QLj֊fS)e ` e*p%\@q0̂fJ@ㄈoe%!0Rn)*&dc-* jɵRj--Llb %VATY#ŕ UX FFRL&JJ%UXAeUUfAe #*U1 0%mRKRԔM$i &R2TLʉQ1& I2ijZئ[\KFƲƍMXIő 7 PeU 2FYaLbcLAĸꨣgC*0~ld NETE 3&,2L q-):Ĩ *'nM3m ڔkop݀ &!2ʗva0 pvffffC!fYH[QeEle" %SVL2bɑ& !a(A,C(YdɄJ;RCMQCyX3!XJa XX+11%! X2ՙԭ%k&ٴԩjjf&+i6VVd$e0 XX`*UYd 1 0 ePUMUkdSDdٌ7 ڠٕYb f##U*$c"6TH)(5ڒ ,m!K&ZeZMFYQ%2Ȳ~R-oEZ d7# ʨ8`I*Pƭfb C//;xRA4o}Xx*9mrH -j6mj* [lH!1IdT1+VmTXiFff`0C00ff@  lU[e35[[df Y [S4U&!!1Ue`,R2,Pc #*@& h@-a2b1eDMhXcYY"Ek"+2VFT)I SjTUݻRKeҒ%*6q60YX%`Z+' opW ҆Њ[D1ec2b(&ݚkjM ~Y,42ʪZ`Ѐ*Fo1[}VIFHjj3Dbf3&dѵRmL6Ta Wf&ʮw1Z rabbG ^pymrp7?kN~wN/46n{\ u5~՗F˧}e޶|#|CϷ;<6̺]=fm1797|71s<yʸ7۝?&u9_lӫ1촹[w41sM{!Ʈtt{mkguqù]'Cs6t ӟki[Lw1mp5s̺]:rjÃqn4-\gr6W3:f9sMnmfnF6lcsnit p[<}ӛu: 7}069i[c8.-ioiq٦-:N.,89c:s c[}u-s Y ;J,4 *ͮog.goAΫ*HW4jH3k;;5\ZG* KtC2%2z,4edwvs㰽}*.ۂ\uIvݸm.)C:~ddG-pry*޻u*]NUU; a587;["sc:8s]e]äN9Kn*utp}cRɆS/-fd,ZXsiV#jDx*yjZa K`ZXfμ*'}ҔWC6b4¬1}}'mz&(OtocP-KI33 /JyIt :v/q{'xG^˺wy}r\nYGĞ@mj4/GK"'%.['9 E9o*1VYzRp.lw]N=E᪇itL&͇uxK~6}rKQN:f'y ]AkCR7*# %ɴ6e$fd*WN8q:\fKKCB^J'c)LwNs:[1/O:o̿ ߲,!ϛfټL"oo`f$"o[xj^*7b|Zq'[䥿K1c3YX q sqYd*n0-Wu*N_\ NՄ(f CsVvUoUloׄV:%On:.udMR4Uَ<- ը/7Rޕ8V9$C*Mb⻮f4{V.+m-S .j7'QKo5u.d4nB%sí'hw% _V\o{&2 2 +yK̾Wn UпLl+^a.)1Q;< xJ)~F:LO$]+,Nɷ [vr1+ʸw u4xL^ZJ_-- : ZV~ʁڕ9i:e'-tjՑ'm4eZіͫeUMeMƙߥoяNJA(-TrVVX6GvIW3{:dDUMhm.-ǀt!2r$ؑF1K1azJ^U!+4k5yc!vZeY7V&d[^gow85LcL|-l_6LnnicM_-sf{^W vvrbzX_<.4=^&qiϧ<.Y]K{sgk5T.U14{ve`1 ʈeVffdD̪U =FbRۛ@Uwk}[]:%x1KeR{54&z =wZ0RMUMTf\.Kj ή5mx{9mܼ^iٿ./R6IX]NN] Zur\``*Ad `;vDߑO`=YSy7W|ҮeWEXzX:i}}2 j݋e5ds 1q=C%U8 S>?@S'3gj*b C!!W+ڥXěd/ 'e+n%=/SMkp/WLO2cYkJ뙷}\ƶ~* :|La'pWU܏5.)O_}+^uAb:<*:N+nhN}{.,l. s]6*X}2?F^t^h񅗜җ}"Ȭ6JshTmVw`6a r_\%ji#2b)q&Lkgos_KuSqvQIt4u R>P=~7œS q/'N䩧~*-zV-NKr&Swu,9 *Ы;BҖ< ݇3߳ts=&?xqK r1ƛmaSmlKzR-;j*l]0I+,fff0xa W=9)ZsGE)KoPl YLmpK$2#cW&RЪ7*x4X\HQ4H`JIIy.4nb%+}n(*0.fԪGX OsrK` AJ:l9*!=+=Uz _Wy9E<-SDؑe+SKyqx3 u&7:qu2YzCGq\a}]-N/-uIdQ%N-hGqr943pb:dɥSKEѢ1t]:l)z2wkۿo1eUmmi!?!.w:"'/T=3~uQ\g*!!09nk\0` 0` 0` 0`kڛjdԱc1[+AjQװKvg- ѧm_G'߁]f2WgŒԈPCn 71%r;Tppo;TfbRxo[sĺ]nX˭39UtJZ2U[l32s]֛AFͳтpUm]SUrkjԦfz[~>ż2 %7ln[l,. }.NnnӃ{fٻ qՌma' Ǝ7n7oN-ΞlcqКfetptrRIofDL)ӹYX̳1ݯHWYdW,9w^^Fn:M9(H|G4Joef`,%c i$Nd1#t/9lڻ/`?0WT-R$5NսlfXoy;R6h`0ndrood3-fKEtFڲUn欴1屘cgm6F;7jj\͛9-< Iq[9IW@1l˃:ܨrq D^)Idd(9-ZN8U߬?2^ݒ].-6RW@[ ou[c0޴YZSBN NJ hK;^ɌM<秒1{Nu-juӻ oxUz|}Э;]|#oDCq S`9^H~"GY yr. f ЯԚ7qc@'P?U Hj LH*"KgcP#4q8P{unmͮu_F17n|D7M>X_CgG0=:׆t 2ΐ u_9cu#!(?kP* %* T+}pTtadEQPH $q<_mXOmIw'-"|cό~/ÏrE_U|l;19z،diZACP1N'[Ƅ1ŋ>KTe/YD8b+A.ſ.5'ժρc9O@sj z'C ~@0 gws89¬'6o41(RІaSVIVpjCGI1oSۛ&T+ۖWX`z;r;@pwS+mYkl'9n<')ZkMI95o&V\FAZUUp+ fb^mx8v)"_fnNo(F%abO`0z1_yH!K@DNql -Ԑs9܊rLe;L겵4H.1V8Dxb[M^  3CaYK@lMQ(L:Uo :X "&/ (,oYqHF*M 4+W`#H\눈]̕hm(K2A1 ) !9t%[0 "&-Ib40:"B94:0i e*2q"lb mT+HoGʾ Zxg>IjWE \n9gLM<&iӬ|zim|[30M17qOόy営'c_/ˡOڀ:ߗ|4rwO?hswx ڼSg0! lM])(l.e4QHD `ܢ"qtm~<Iʿ|??~r;]W2D !#E|yg{jD:{"<TZ9J™`kW2R8ܥyRw Si pCsy\2#{&8U$+_4l39gbI/ܡ>鏳~Ov~Wm>&~X'>4ptp:8@4Gl.\Ķ&U2vG_ST*[e|&#衾,5`K4@0gaG:He9bPP @B,'Xud@YMJ6.QDDUYdz̏֨vEѹ(7ަ}s_UsOw28XEؽ%ZTpʸ{v|k"o9Zs|r'e&) VbX{* WA}V %\eI<5Uo˯1arh{]DDFS=%TNJȭ0Ɨ1)dꃓOTfb o+|S:FѩKQ!,>I4qROJ'8ܑƎ铳DAN $R:80xC&'\x{h8[vn'O\nti~9NuWG]WJtQ4 ٫nOs8Y11{s/;|kgN73A 'dm1wޚE5+Jj%Z]wƪ{}VVC>OW59;$i?B:WLOrWoyq7:;Ϟs4c?a0L&DQ(L&cB_M4M4M4M4]:իVZjiiii33333333333333:iO gMkibs;Im*~#E͉+Go=΋&gC9DQCQEQEQEQAYeYeYeYeb{s3335XT-PB\r˥Ϛ%mOl{jf3y> pDЖh~% Zb9W|~毑}=}wERҋS g{}5pٙe#f~*j&_9Zw gP}gh۔\}5{ۇi`zTCXPf&mJ52KֽSr0u\ T8ENrC[RM=Es<rH©^I.C*2fRMC̆RJ,ChbD1b=Z!!y/:#ՃUO4yx=sC45Hl%?He!CLH9(SlLYVavIi5Pm*8ԣjli[6أJjJV-bTmƛfgԭlJC7ʮ`Sfp㊞- 2 uA`:x81`#dF!Qq/Rl<8! 0I$I*Ȇ)G$) XQ@P-P1!3I Cɦ7i!JԆ30Uj0ȂXȘ̬$6CR0AjUeT2Wʵ:ChML+ZVIΔ.CᡔHz7P! d%®k#Rr4>47v!2!E^CYN!h.1QCRxP>O%I֐h~2C!Hu$4;=O Ct8!Pm!C$t'Jj'vn$ oCƐ!ReHqCWR ǺC:YWp=Z)]! ` ntQ :zC=,24!uȒU$lf3a r`.*3)2#C XI*Y)2 _.H*-k:50Է.Snq[cbU1.q.7 j6g^z\VLR 06%J9E\[dA2xwI2eV{Rl›pܲ C)*PPWvT1.V25hg!3ZYcǛdf1n61hf1ulql̬C&Yћe bS9cXT8#@ h:uFĦ*L-)sf&Q2ʸv65lf>]ۢ2t6^1fb3V\k6TҥC*e&f*٪3kVNb 㲥ŝ}(s5PkM!:N&4!i4dCͧjyopKM*Xc*`1Re 2GV&Oe*lX,`***************6ڮ5ZE1dd#K$#R;ꟹϝyCã|̹!փ<ϕb2YŭYj葒{m녏z:|߹;оF!ͪĢCXV $TB$2C1C31! * @ڈYy7͕KưETmJ$pʒL5Qa%2*)2HT-(De! QLT ,0&,2R221AIhTVb$ZJTF(ɖ,c 4JopQV P`HpUMUXIV#l I(ڇ2*4jPYZRjFSiA |\Ď0u3<^7n6Lf\_MzeNJ]YR,)23q_+_LG)r ^/Dx;u='HtWrZ_1}%U]E #@[C%rgˌp*|*qlGEҳVGz /*޸MfapˁEg)+jFıKRw[$TMۚQMiE+m^.U$/kҀ7(E.\Գq;ԶSCu/v{^IR`“ޑWK##7T:W%L^e/m{̈́z;;EOTG_ Хu=2<ߙc2Qu{\Ug!a ް86;lۂ&ۻ*2{ȤKjM17kE6 ~.nSſ{Vڰ-Y͙ZffZݴm{' * ZK_.[ov*9pqvi΂A&)U4ʉU~py0 ).D^\ Ybt yefeVlҖZIn%6tBMc"r Fќb] ;(MFy]"T;%h+ے-KK"<^_ bL v m#ide9ĽB!lgpI$5p҆1 #2B0*1mrGe;_s^_KTHtMTzU/vC5C C$1#31Qqr!U%ۀ&0dCC+]׏[OJy%QȪ wԻxlIMiiK(xsoJ`,"H YgO|("T)򽀽kO[e*Uo 4E5]=f$|9_ ykC_nX 0e\mpM÷pl,^Ā{=a/aK  g}ܱb]\r^\ko %_9v=2(ycvn >}uw} [p 92R2'Kzsqw2YvU^{{]uU몺YUOD~y^Ur^Pҗ\`9`@a,f]ԌTKcY/c)IIIb )m2ږ]b1LJX{nr?DK}\&@+qp/XQtb$%dv_W.4$ 7$-3{Bɓ1rWgsdZ<\j*Ŝm3k5v EеXQm.'2331ĥAZ :EqҬ%ʧ_Ku,IpY ʉ8G w ( XIJj^Kꟈ92gm2𥊝E&zK/wTRM6>>IK { Rk<*Önq' (UG>u]#gf]COEnKOz:#K zL"zkyƧ2~hXpQSvKw>4:PSjjsΕZ~%{NK#ďDq#A젫TRe` 0` 0`& 0` 0`mMb+2Xɉ&"*>D`9xFxI|j>_T"_֎7Z뉷%(Uª'\qom]]I}jRpC2C h%Dˉ-P p`MAfff`6FT [~Ӂp -X LJ GPQvț3&y3ā3/NIl"VffL3ә./T§]L0c2.V]O'R{M00.=Q9`UVx,κLĦd*wNA5Gs-^ ,}G1Ed5QٵG#d8]1 $1 CK04w,21iNi75c6۽=L%O2eDI%@HŦ8E)U.\ނCdC4U\pRY#/w*uq$,vv28f,Z 7SQskFxKgL/%fg*Ttm„>x0[Gо({KꅴO%aE{mvԻ<{G_9 T?Jؐb}'LtV\R7ʜ,7G +\sriQlㆵUSY .C NV*i T%DJ.3RJ2k&1(0C(e R%YCĤAOQ͙&FRRd-b&QRf3be$ C) " Y! Hcj[k&9 KK* V"dȅs*N9HtkH{~%'*.e C(~R{?!qC!Pn0׺4Vî<<]I\:`P}uTu@/0 #$=FE oU4FNΌ;7 @8Lq'#^uƣ\Cys3"[(s#KQ8_7H ః 6>}#i90 C xC@iƟgd/Zeӈ̍ez3Z2C|%%]. tTKU'YIR8e PȆD6q1 C2PHr=)7tNCOJC !v!1C(iU^:R{\!2!υ HuC(mO 6IޭGt`;(0t̺ݮ,f𮩼ll8=- e<}3741 5M;ηcNd+y u|Gž1|  |6?o΅T)E5oC}ߐxY>?e}z;S)iǝuͩ56_$WRO:?JҺ*~G|ȑ=huT꬗#)]PTu~eO?$9T*d#]8*"KmQvp#>x ID432JW4zb?2'qXN"ԯEvblJv<*u%XqXIrI.o l~~އB" tD5R{[1LW⦨kHC@8>10;SgZ {,5X0lD2< LmnKn瘮,̈́ɚč(pNSefb$ [prsq48MNa_g"$I &Cf2{|~яyUcz~+/ҪkS@h V%h4S_}dz]+wt}{}ZiDg._j99:zZMeo2\WS}5Fƚ~ At .c1cZb(äfffffffffffffffo""""""""""""""""9a`rfTˑŎI98xSkn=ht^urlqHgmnz{%`5 ??˰ցJ_hb2U>E5JOU|?$<*YL>Hjq<mp >J J[%ҁOm"p˗ÛrV|@(+$k!Ŕ@X]H4V**RT""E[UY5T0 %ĨЕA5*@(R%THD@@ a@P((*٢GFA@ H )!@(R5H(`P "@I[(" ((PP( -RY@FIJ * UP(Ի fbl4IDLJzISPjO =L4=MMjA6zL=4zy)螧i@1 jSCi2jS@A4hzI'd  bha2&idh  `&& ` $A& xJ=OҚlPC@2h = h@@h$@2L# L&hM0 S'bښm5 =:?ԫ/S&e!)k& 3T&LbK8*r5mI[ Vm V׶p2J9G@~7O%.lU %\SiHTқ7ʓIfIjO%މ,YEN.klbZKĶi64&5'Ȥ$X)xKN]瞤KՄ^U5'|yZZ:/Iw|KyDޤӞU9zo5$쓭%)qSdmIMTT 8t%=>ߞRzTOTnNE;jLRxIR`ֽmNV`&&0FF#,33*`VRɑ a-ʉ&6BO%'^xQq%%e I=I)8);DTU]^1.%/\KTK"gЏOL RltĜ)2N2wI[ELK-Dz.PKCRxa]B5VzRbN6ŽB0L;*NOdv9NFuipϮيNBAޓ$d'*;z3y5'"z*>/^dfTc/nOIO[wS%$|pKKnQ.ir.<%m7'}ےXM)LAtQQʾ4\рO=sK-ĶP-U>"uc{CsK5*XUєe&Q0X)b5+bD$И(|(J `SM2&4MѦM*b1L(I iLI=spIFEd&5R[0ffl.Y㱲UL,|c-kN8w6&_Qͱ tiI)>ʓ rKTK%( qJ|"6RrrrNUF_ TWq.*vs<ە!ʯT=^T(b{?wҋӋŽD/4K,\bY {/%«WS/A,Kkd I&/N%ۉh^R]ȗ7KыτY%1/bu&T+^%5k].㻂JNmh )6RqIeM)5\ Jx}<߯=T{K-ldv<{Op' 8)8JLW%Rv)7TX)1`T }) 1)ާu)Djim~?? g;Zʦc,TI,1,ĖBC# X̩#/74]jm+pp%SiZ#2k>s[UFMBh0ܖ4lRH:&S\vŖYeY1gĺ/AÒU+7Ƥ&)u$U&`Kc骳Lݭc,1Ͳc3i7`~~7[Y٦aba;1YW?ك12LbmQXI\5od [9jMQlaX,㐴VBa[XָKk&֕Q]c׭ffl&QBɵULYboLp5o հ6Up#&;i ׏mN9+{7\vELV1證y\vŝ]^m~Nt5&5֚&q1UI)4bު{[okf5xS gf*Y FTzW"Eub RlQXa[rDNv8" -PiQؖ)iѿMҢRkP`Ub,g‹KNwuө]5ɘed EEDQEDQEDQEDQ-? I.$ScH׾ jFmfcֆX[uZrZx0[|_/w۫w۔sxeyJsݶ~\@>)pȸV i1QG UnUbY$ı,KV g&O)Vij`1EEEEEEEEEDf jbcʥvɄw-*^T^.п!^Ѹw?h56Rfoj֙bxD`t7La3$^w9+ *N]w(F0U[.:|g#ɿs\rטYxٛw^Ѿ{YZky~KE1FRX@RMP̨9`BLW ̵[2e2ƳT"Ő+K&TZJՆ (aeҡXaHiO/{9ꭚԵmflDDDDDE.)OřQʎl)782 *!cdjmT6M-eB&S$,!X(zȩ-*4&/m}-ZK~F(ISn9&qbPRk8:IjT'GcAt|mZ_ +q`«c n bj%N85 :Hj=b 15)59KuM(p7ZIсSe0TyUWRURIhE5& Q%V0JW:7aCSAϳ6BԕeSQo9*LrmUZf[Z!ƥaݔjL3,36ٍ85)iM F)+ ljUM#b4)hUISU&*,ޖlIQfV#*Poe35V1 1528S7!p #a8ɑ1FD1jiG] %C e(D)XPXJ&(Ed!Jjj-+ZJEs:k\pDDDDDDDKqjfjfjfjfjfjfjfj+mY7P/ra%a&XC*4X@!j,,bR#f%" &% 2N3VLK2a%L=;(]Y"Q`EfRN̥DmBS!3@(bsE) d,E,aVjM4ZYZIdʨ&J(JaL(a0LL0d $1FQ`e(fh5 Q--$ڵe-`*Ȭ,%1F[bmRKXF1uMTp $'1dZSRj# 6$$\8Y +hlI%ʰ3!Y TI) R >F|+ =qrU5mɫsi$ R1 6*aUZU#F٪ԫ,lAɖR1Y2`f1@YULaeYVYkflҵ-L`H%+cbEIBRbeDd SvI(b0i1*̕ c$Y1TYbʉ`1B0SC,&)`F XP2Rd1C  1IId&pYL1Kfp lAf"33fcmqB"t03o(Ȧ*1`++!bX  #(‘%VD2*8f%&_ĞG A~و}+K5ZLXX&SSS'Mj-S)7JLlS,,,F>udʎC)iӂ饽_'.K+})xUN輫ds[EBmM9LF,,Cxbļkeŋ,XqZ- -L#b4bŋ [,*&& Y4F,ʙ1 LX HטX\VCtabX\ķsY7pJɅbbdʰS{M7]sRY LR1nY"ũ|’o52dɓ&L2d7L-jm5 BEVEjo4DiQ"Q)pZ S&)bWB5Dq*rȱ`aɐ2dʘ1:Seb\V#81& q\V,[ySulV,&SQ,E U^) '\o3y]r22y֖ǴSeha??:Wzw]Kʻ¸Jx7,eY7|Q'ǚ/\ śdRM$FU1E 6l$ī!Q f1&2L)23"ȲK$2%XX0e&UTܶHܠeb6*>Y8F0ddT.6Bi)eMYS&-@h- X,Ed ji%K Y@,0d՚!b4UQ 4i-*R&%.F).e#R)i Ub7Qb &7"XB+`L"z@n,lDmR{3VbdKDBڦĥHKhi1KFiM%*d1 ڃ( US "~0~S̢L*|6NWe[OI:CHू I&"FR{4m %%%Ūb &/юx})9 o?XpN ҽƯWZ#olğ܇d!-׍oO@5#K_ >Vpu=O2l庋Z҇a+ 5B)1bLX6,U!Q0r]Y0TCY1$i"/*"1V/'v +1E֦ )rR_y׶!~zPvtnu3ȷ]bɊq]p)t΄W:|{b\F/j|$^E2Y.B;˥9$SKkuL5Qm2E=p]-ָLCSe>؛n14h.l qyK uKOn9Ϧ\O8q|cWp xXZF [/mdܶ]I|ɓSSS&MOղiiiiiyϫS>L/7M{|Y3]ȶ̜N zgW!4.Gv߽x_H'*\ӏ_tTmg8T^2ut̩݁܁ҼKivpTdm2ּչxKxK+-g;^'n,o4su\ IuN~GSŋ񵍳^;&I;ťXXbb<kN~c3LxLzȷFUHW ijV݋z#Ķ^cpZ]Dz^˜շoV}b/fzQ#vNKƼ^wqǿFwbuuL7'[7 wԺ˯-%/mu֦)y֖8,^Ŵ0,LX[YP0U}35? WVIn}Uk2/j:RilOs]^ˤiԸ. KbH?vt^%absZF-E-7m9ιpu.U\%Z2dZE# Crܹ+ZZ\ ŋ5p[׼K7[رu.ıp[ض-5k'\E[v;jN1a'bȼiqNiΝ/1[)0XV,bňQ!% 14UK %,,K4ֲeYieŋ&,I1 U*V1{j{т/7QIN,̓lksu?+rP{Խf/༲O\WU2bLlhMIX.JwE[ h.t֋ȹ ݚ-Ǽ宕_a3jw%˂ir]r[)1t\S)Ņ^_Qu7.ke>0h{^Ǐѯg6mlf6c&cz94xZ^ KKK=ںzV.t'ypvOit-]\dd6[-Ďs'Zv[^e[ubill[W[NSm6Mi6y7tMrޘrغg8.Nޜ&Oiiiu'H8Nv"bTAj:u0yW݈/"a!T:DNijOf G{>S9{{劽S2h/yD=ײW]sJd(-T6,X[,Rǡ^I2}ܾ`d;ﮐaaK) Tɺh2P;U+<A,J_tJFsaŧ3amnKp:7^cp{W&,Xb+Kbb,X,L (fc0菓 &R|vI&I' $4Ų8N+xI;:I9S]fpQVbqX 6PeN[mMdđj+BWl\`qZdиu^SE_u-.3-jNK-U9'뭠#i&* jR}-N/#AnXLZKgSW_"/⺢uKдkin.jbɕ =o22byR|x%"XT}"%l,^5d KNЉ[ 4ZL&Q `idL^lV>'$j]Eڦŋeܽy-6尛ź]H"ĸoMtPS`Wsv eTԖu>d¿!/*}v~#Ʀ| X ߫ KuƎ5:L&"_=_@sllK;UKr}>\;*"}QTONOΟ~BsmE_uSi{ydC_.5axUرahF_T4Ua=+ߋxOW9^͢{ Ndb_=l_F}el~Jwg|[OTnNȞLwdK՞W\/`|+p#V.+łn[bb0XN>N=oʗVӜɓFMNh#ϥ{]G؞'vzKSKұx#bʓ(a`x!|u-OxrNEʧɔU9WiM4\y2,b,䜗1y~Oq\rWυW|u=>E~Xch.)ݥEJk!&eD۔bJ+r_ iɧTq>q{ܸ$Jˊ'LM>HYw\ﵙ|/Ǎҩ KWvS̙<`͟'g1+!]w۵moݝKr~ܼ53RI&"^{My7{zKETd2>ohYc{p[,_-Z\ wr]o`8Tr_Vtnv{+F?-+Oˈ1{Ko7Ø288A2qjqg [o[”T_$:ρOkT/gZ? 0UuWV,!*5bŋ/aG~8. _i8N3ihi`[eܬXZ[--eur\.k9k54Z[.+M֋๮+⹮ bq,79-9Nk':anKip[,ZZM-NEo4Z\fbabŅŋh. ߄L,XbjaiL.)¸.TX4bh[ eMTpr.#1Ms-Tp\qAhp\WΜsd8 57P}DS)&L,+&*VkK5ҳiem+*1d0LY&œ⦦MNj N n[,N e[LMlMCJi.%%^¡Ңb?TZ#,Gos}7C9e_U,dvLLF|5 Xb†IL,XzO(b%O'D)jmJ蜄'ʍeUF-JibIXWYaa[0VɂX,6&lKEYMMRԮص2i2jrb8.+Nal׺p:i8Nɼ]`6,GYv* tG(W%q. u.$hF.4.qPnnq\ul\v,j[/lK]eŤ}]XF$Ņގ'_i{(X--eKeZ ,--ŠlZFқ,ZS b,SKdp, KeibťlK eddbl4[-4VTe84>DbUMS'"0/2'L{KS)YLSƛ@ÚY8%j_z'58$K ax"A[vGX\SԣXKp\[I&ei4babbe. h̛-V }qxEs? ]R)nx)YK͚ɃQ`51!KS xyy +`\\gɅ&LSȧuir#N F"*%+y)Gv.Rԝi#e1`bW!{BQ9#re8)7w2+ `lr1dɋEѲ Ҡ{Dq%3eL?$K)v, !ibq\jn[c 1fRY] SS5G(uy;Eܞ կ%aީZoUp SщugW2g8Jv*z( L/lإ:'eXXK(-er)+5pl.SKbR-yБ`)wyjXSz]k,YTŋ,XANLbL軪92RwJMܰi`DV,SJCU$^%8B&^ ] ' 7Zn{U-Z\0BEzgz'Ȳ _I{$X)F$ii^M K󗴴,EL)bŋlV&im4YV&,S4XiS[$5 ur-d L=KbXbZXKgOMC&EGت~y_y=' Rn2$nHbd+ΰ0*ĝSsЫZzUޅzEfS,|2:G1u]S{ľxy{{ #?qa?Oܼ\Mb ޫH]MYJ؇BMejYZl۞n0rVE MX[NU-9j:d^ꕨn.rɓeRറjXa4(ZAoKaKN MmSuTܒdV @Np]dXƼu%S GYUȬڒ}L꘰}-9ټ\Klj&BZZ(/Y)l-(Mjߌ%|O6F$V#{qazWB$rB6W)| Sz_"*yF:o32 4[Ksˏ$K4.7<]5:Z _ßYiÐ8k-;'%:Ms.eex[5p[IcDgS7ӇyZsXs:lh=NǢ#o2pX~:&dcb\IJ'STۮӫi'I'a'I'DI'b[(Vޞ_ !7ju#eUw°wLVEpyWW 2sY9 s<3)9ԴGg m++O\[zS$wwp7xlC*GUQ<MM/7z)sR$$S bTB3 VZ={-$⢺=?t# TꯈAVP\Ҩ;ľ6n |i*JSv뮛P+R ܞEN `|y2L`Td%:^++N2d0 KRsLeOb ^j.-P]T"S2jjj,FML ;&lVL SjiM.w.//Ua]@U#ұV,LF,F'ii$u# amJ's]MO*F6@^fR꘴ u*[;^4^p=ȸImSb0LFTb1b3ϠiXkRҤ>r332\jQ)Bl!Nv)jb' yܿl8EKŨ=֊1e(/WCTĕT{3NƩLLbdO|>?UG#+z>b^zOK+*=GP Ő)bSv.??VI#52"`ޤ솪b'6w$=4x=:EOOVz.GJuG98UF͒[+̪ RtGdxl^R8H j8Xإ~ʕ }=xT=X *S !X <W@U^tl   /BAj[χԻ}OJ8aQ/D֙[)bmbdVi}ZTKsz1pZ)Gۅi*uJY1YaVXTO\ƙ{ZbURiJ,abb"^ :#^ilN.v#0wK<IuǪ zF: [RS0ʼNi}rI/қӒo$2nX-MB(vo2e2PȬJQeI5{ԩKiw/t]ھrdJ_/GkgQm3Xٿ8n[5'HeIܢ蓄LIYJTt4jF[t$^(Qr 쩺8ְyŢ:+U;AYiza՛ωՓIXGw$Gw N Q%#v،aLfTVR jM\* W{ ?Xw+"eNєN{8S5)dΡe#^tT]=M}Pu*RXTSdؐ5|p/[CFEVS'ŨhԺ)]#QԹŵMuJ:ur̦e9Mԉ $a(xԻEsc93^}VfYXd5mNs\kr銫m 9vLi,1NfXu-[pN)d4&ӚbrnT1TnPV.;2Z2[NsR,oIj`[i+`ZZ[-uAlX$U:J[&eVNRN ubŋ,XbAILj0?pXT}eObŋbi|魦&&&L)GkbM1IEd&ֺ9P9pXE#G ũSLYLX|϶F]tXF9l'r~ į= dɓ!wQ&A02`dɥ4\tvfvzhKd<-MH=-e7=bp]NժiyμyPhFœYVL22C(*b# jԆ\S{7,{s7tgJ,s7~źGZ-//+/$1*a/ZM.&:fMŔ63q\V/yOWj>n)OTtCuֿRj|}W{Z)93_:)&)b!`'ʛI3Ƙ-ѽY[d[ʌ$٢k} bTdZ;g\RœJr-pZ:)e%SiQ~|nCK]T:\ܫe֛|'(V-I.ɁlsUg1It\X)nl*-,N'JthO,+#$MY -p#j!aWU2m0R-L2Vh"{U0LD] Ip)66R݄_g_9rD%=!WZP9#vs' +8Vfzd,DUG 0zWqY,XbGr҉;}}cdZ/*;WJj`수&zםKS ślȌF Z9҉J]k1|;aq^[0Z}HAx}Li2em'r/ G3ǓjVT4.4VG>bntqD8󧿘u*>w rdvO%*{__a_s۩KnKT/.d<)J &L/*Vɓ&M-ʴb,[^9WaV(XDY2/6M.+u1>XE^@p3iB\" DsT?IyzWbJ?.w8G\ʙPyWz*u,G)č8} ~DŔM-. !`'U^Z+A*$җ$)0,{~Oy>| 8={o\ xb2q0U>܊>C^/mib%bV^(ʅlUqg--Lz/ 6+eSa_)2bޜSMSZ/~&)'S~/؏~ocKNvb'7ZeFf+-3&M2̏X1[XG-ZZ-ae4Y0 0Xa3e6pRink#1mXY9ֵsYU1jV6 9WomfͰd̳pXf;%U-\|97 VbnmR1g ċim52lФr I2LXI`-TEkW**5kpBR!6c%i% 2fPdc%fIf dUK"ČPY&TRcLX7f S|-e*6QY2bRX` Bɉf@Z &O'WAFX Z(q{O_ qlXE}VO%З⻂_KE[.)!$U x]Au'cNJx7 i ͡:NUNlD4 H$5@n/J^O;$z |U#:S*pseX$~]|/͙^jP7 0H<4cO+۽ikF?U$_}ʫԢ~ID}M)i,-,"ȱ,KKw¿߱D%$Q:D^zDeEKb)2M)؂ė=ԥJM9I>Oi>>T$2MOR6qKKvr zFtA*t-s/'I$E𝶱zTR0/{P $\Yj*l8G;IVP?? n]򴟀>=_Lj0b~@K7z<<<,,+8%ʓD2PaÝ>4h-5>~NRszӅw{yz؀"wc*sWWWK\I,аIw.U>ARBS Z;hl$KpFT[JY#F 'R1$MQTmovTT}.Do9'_r#_ũNL"xqz_`vI_QGz9AێKv^_#B3HFy:R|Q?eUu%_)nD]1~;z?GKuB\eIRjR?1 L#/Q,5>mv!'xZQm@h\0)=.:zSW?-mH+Vy/])V&C/VQ+Jr>;_S0/j'Z-!hP zO6+RG#oqB.r `ܾO(jÕ |fWFqbn6#bwyBPT)e2PT*\-Dk35s}, ) 2L$`(V!K@41&;C]lwU  *מLͥmRH*\@S(  J(H(X@ Z)/w"R@UQD B#PP`QԀ6:.z{=w7w>n{>w|<"v}zزgӻzn3W϶{ޱ^:=龇] :;_o\ cg}gWݯoW˸ww;u72vƹ==ݖniV7 TR*PlII* )Orw=ڭ؝3&{[gb| z=@}Ukc }>kUmgo}^Fy{,w3^n.Ǻn'|{'_}̻_>5NlP"<}_wM}wW{)Y2ʄh%JU*UQ*DmT;4TJ Qau{7,}:o)e۩s훾^wysͳwջ}mk8ms^p>յDlˮ\}m[ξ}}iTak6h|| (PT(j[([|!OqwU(W}|cM{%AB;:{zw ^}F}UH*%\;=vcn;{6R۞q}v&3T$'z}M}J*^w^t!_muc@_e(T*;wJ@3[ JW>;=YƵ3b۽׻wsy^4횞<ϬF:_|D>Yݫ[An=mMM㥍*vSU+Z6 l`&-3QnU悮#֪SR9c3m.RĉMmdUIEZY*Vڬݩljq,mfR֛ZY]Y)wMJ(+9m$f*֩ZmfRHJAj5m$RT֭  QU$ҤQTU"T*Fvv-ړ pwwNVvnl[9Mm1l!f᪺l1fk5q9 6ٷpv㮧3s0g7mۗ۵nw]M,u6`: dA]GVh@] 9 mZSSCCMh :4t:@(WA9Ŏo}W8H;׼nLja4 `ѓ"xdC&FL LL  L&@ 0&d4iLA &d1#CLdhiF@& h !F4hDЄ MM@ $@FdhdѠ4`ɂ00Fe=4Ѧ414ҟFM jRHЙ1 MF4dh&M5= #F&dhim FOSUTI $@ \@R8-, J}T*!$*z-"`1 .f6 MfE Q *G I$G@ S )-uwĩ#nsx_~N.027b$Tеӗᄳ|-j [f͐gό́q4D@64ȫ 2 )HDE5T19W*,\s_j+-Z*CpF~F٦ŏJR3uMM 3/FJ 8tSPđ_A"x̹,ddNRܼK%B !$$ Č$a ##!I%cE BDa$Qn0$BD!s՚]qp"QIAPjF@I$Ӫ|tH  HQHHH$*Et.魯ևr߅h0f} 2mt4me09$?7>oDG\:y|R&D a7poųgS y V0Ye @#`p@ !,#q?6nkTܒB$7ucY *o7"aQ!u6FE PH VQuČPl^_ ewi"q PaFp:Aa2ƻNq}w| Mx/1wmWB8s^±^w>Gt^q҂mrf4//cێ=ja3S)_%EF)dzXs`3rvj"CC23< ٘]=06n+G|q\rcRk 7-w A-&Zn>ZЙ8Xo w]dChQ(Hivbh\@26u)PIRE K*ٹڗ,[ULr+ ɒ\d0T.%"6BJ7kE$R(nhZ1>\e Z:kHb D肯&Pצт$*-pۖN@NdRAv*|5rzQ@dFHICZ6B*TAݲEEH2"",@B0a@dHbalájB#B/T`BIPdBиF$$R$$ 7ТI"##!I!$RHI$dHI   !$d  $a $$a`D$$CI! !Ȱ_}Eq bf!H2Pg/1r$XFcDV78 }{O)تN$wb,a$BBA1' .ޯaF$idB V$HTCH "B,I(hRĈ_H 7jZte'oytK ]|r0Q (Bmi%I$54z͋bض-bl[ű^&ű|>GkhMh6Izaٍ1{@“4E;]bw|KwPug |8N%rmIS"Gmh$2-B[yQlӝG;r & ~h 830ET /~B )Mœ{P}$;|9y>޾yqxd曝ɟ}ßrsy7riɹu73tCp5y8mOyL=uNO窝|*|gjD P*Dd^\N:?ʿ9en`%yQD.sV W^츀69õ˔ɫ9\ wygvEo'&QP!r ˎ" unCjJ$-d=OJZNAqŋx`۸.3$$-߷UӇ|V@ f$BDR@:CIy? ]$.o.k^>,zQ˔x0.Tl ƫDQĕs8I%0pY[X4Ǣ= mG[:gfA8X4nt)pp3S?3⍴o/F럡lw1|3>+i:yd!)%))H)!<(Cֺ0_ݞhaa iuB,#-ñ =IްC@ʣ$jBۊ P& łnlzz y $,A;ippF:Y$4K -r6 dl92cD[l H"S 5N5|% A:[ (,i K)x8k(FHiY0.XEdk^B)IELQeE  E'qJz}L 7CDPh-GZ[C<ޤԬp a|nͦ (ʪnnPb>8ع U)PO] ϥ47ѯN[KM%MH%DCɧ+75$5jt (mh H *N2M&@#s:A (pmlғC`Z+bJ;Z")u%յĕŕcgH4a-Z3 sSbʂC8ElqkIjPkShJhdC#I@-E{,eDϲ4$5QkcLnK"q(ҖPHR1#B A t(v&CQU8-93c͝yۃD]-CE7|e?L4f$wHV*ӡtж&>g;^u:!:ƂӘ0m%ZZbTybrJ/4!LA\ם $ ݚx y^]:-Ћšeږ* @c>.i)N8ke+Tk^|ZkO+!7`&jKM:pQs]"|̌$ENHX4V{'o[^Y 3­j󏃄^yÁtFgIpLoͺ.ɽ9L̢Fhf#F[jn֣nn}AMf+_ID1_o -Dn0:[lvm٩8-Sq|e 8@Pޡ:ӅQ ~|.:Hk0G3eIv yDe929,p1kyhZN:ocH=="nE%Q21Xxe<08 C2b:gamXpvY_KLIBQH "I "iyl;'Nj=I#W5bڈT-H_ssW/}I!u<FŃDN :\g1A6zC wq?HѰ("$PoM}41Gwomu8dkqN4njۻw7q/3Qr4*bov-eGnq_J8AKeb"lRA$U%Ϋ!,C4HHDHQ.NJjNR0obHM=R!8LIm#K*$|m}tlrz]p{q8.*(TwyE)!;~ nF>sż0Q,A/9w'rᅨ;kqy rۮ{.u;f> M8W/I9 8Ye6,Y5r)Vs+M9J=zBo'KYthN#<~RCh4hwK>LYiW܊|:z37s lw kz\Fͮ;̹&BgHBHp=4xUy"uչ̕!h6=)X'S~7y9hu>).&yƯA)@7>$;j&IդF :F{R/Ei9jmRvĴ\𬦧/XDAlT}DD+Kb G0n^&tFhJc-N5F#݆BƻcZ-iCGn%/h1PO{A@ /\*19U†A3Q\Mp8nH@ZnG_Nۥ oym y|a+].ϟ |-f~!5PTz {F>BÎ f>=]l!WQ0XE/#TY zjdzWSVip1ZhcWĮsF Il7< DپΙ:Pm٧ 0nl; hLq[Wj̕[kUo~jRnM6PEShm C:f&]QxcԔRweEP%Zb\=dPfAѢyxo+\ |:wMUuaqoeiT- & ABvB +Bzh;:s,3ÇROc` ~TjHy3do@I 8Nùl]y5>,<-'qKWq(6G\.8fCtkvBLTHD($ulgS"Q4PPaH8 0]Mv8dC!GUL_p/϶Gpx<xh4縃'ٯET#8~$H jCAqV< a `_s0s:ک'5 +N%/CD9 OָN?y\[c)0{n2?2q{Hjs,߅v/X#bFrL ym(C1 FM*0R۰GC+wnZ[Ws,❦=SBsCSRo ĢkQCG集CHC|p[ۼ͓{'#wHgxWFrgayr>&uHxo9>v}^So\e5xˇ8gh|q^67kuǔ+Bt7p7,:cY8|f./v6])le0t\ԂR7 vm2_wn83uzLSrhA'7JCdY{"/| gmrGGF.t c(.3Ni ZbZ,Wj:a5NE{6p6p"@o|P!f3NM nC3jJH'&By[)úndL9Q~5]5uH; u`Bn4,XćRp,HjsK-Ѕ),E-c1!wBi14O^pM4F)RDg Ι~cnPt! $rW[]RN*XMǐ?k\>]ݸfz?9rsfT`rQ5UJ(0Q4P3#7(#B?0xrnG/LSWٮYjC݌"k;c ^SW[&vNi+\ l>d&1lY&>n"\@ /=Y3HSU\k78'C[ZNCM3M];6xӐ05`w>?cyݔڥ-E͵]u <jV ,Ʃngp ·7v TNi2z|fnx&lV|K tos)Gy9P'Ab$r4DBbQ[ųM*gp <]wٺW2+~θS$Mb9`am*W9 .d謾me薍\vdesF߾2G=ExqK2ޘoZj ue  `ٶC>s2y zq$d3Yq|\QlϐQDJHt{ܘ>*SJ*jHgdPX& ;kt(FP(tPL`k6ơM ZHf/׳iLgo]>?l[uS~#ެ{D6\1vEM]@kc YHe]I@Da*/ ʓ?XZX%h `3>0KƓHp.c"`eLcjzPy}v5 u74NF Ű0=8C`\6 sТz)56KNzc">NBZI$.P&nka)4 4I)r1 ,)r($ t+haDU$(H(ey7F'u,! d_жC2PCLh@Ё  BB$ @I!DF@ : d$!HHHJ! dd0BPFWap1zQarh٩\rir^ F;Dm?ABv::%goog^ד@BBFFF Xllv;E$fddDDDD$8$*击fFΜCsXc$$3@$ x| W ;z |R֥q8R\\%W}]B& Hz>/>(?0/<+44;27眗g;<=CB>>.CȐ[#TjHf㎝YCBIRI$`6 [m4GfqԞP;]yKLm4H9Ο!!Pp?e%7K Eaud4Ovm@;BUV'Qu80D0Gȝcv?AF1,L_~kr-!OQyuЅ/]!Ӂ]l! /BI$I$ifN⛮ 1N!F``>k (afq(L *YRFQw<%tٓf~vBUXHnc8!E#d<%ANJ-r}kGgP(i x qU 2՗N|H ^~%k00h R (˜ 7D; b(\6i+CCˆyJMZ=Ρg4KMņ=Y(n#CIFMIqq@m in_XRHwoq.%,"[0H~<($ #  . KpUT}aB6@2pddO +q@ˍIMʛ{GGn4(y!,A G[Q墒_'.jSXA,-43eZ%{YE' a͈8[~LSdNa{K &GF}DK)1 xӆ?9$NOx}nyZM/{/v&_i=1y=o qoˋ@dPYP`R P4U$DV DA %Yf)AhD  @ D" ʢ)!KY!Q$\I!`!*O1UT#i QBQSEHul$ "("+P^TA/ ")1%$ D/7)-@RQJURQ~zU $do$]F $I*UTCVaaZt*SQh(h6ahA%BI S"I1hP!P*IP$)TRMk"UB5E* ,ѤTE$BEU0/" kB[DȤ.*DQ!eA"  "TT!(AB$((*AR* O ~!R:Z&lf6l0ٟ{Eȷve]_Ah4 llllllllllnZ9X9ͫF=27[ЂodD  @aZ8(@䲀FTXP+B t_qSz>Vw |K)>wwIrio=pȜ/=J|*ކ~2_H^RvOJH/Ą7@J,+o%VS.,FKܷ}TtBF@:ҧ4Q64+W/Wc'xFa &r,9O^Ԗor%TZvXD(y26%?ӍHwe{BKOR+:0`0{{)h:Dv𳩀-7懅++lpLOgv*>@@^Trࠆ18rXFЅ94 "A(H%욺`l^ ];\ DG+/dc1YWZM=DJ"R]=Ł/Yޠ;Gc#OZ \v?}VbwH @D"E`'PEHDU $$D$"#") $IVEd$T$* JkYCF(yb ,U h[zd{* @wC/YKo1k/*f+ow*%k%daAIU&0b)AU(9hXR+WJU"ζ}%&@R- ؍Pf%KӔ.m9uZ09AQ]i('%,joq А 'ɦ7]P: #k&)5|;Z}#5^sNfDd~_NP܄ 8O !B`BD( `D_խ)_:իvգ{[;9DLzoٟW66L0;}&v.#LW͎"oUnl4vK `p8|ql4ތҔBW, Ĺ2-Y-Mv9lh9Ch(x QDz .Z~̮k*`H(=Y0 D!חLOx}-(t/ W>D% (-x0p3 Yk_ ~UЎBHureYt&4`hBTzKDPUPGl:B)Qd7Z\;FPz)١031eii@: .{S ifip*8.]8*&Ln?9֞4]o Fiu}Cv^ (CݯŇk˪̭cJmd,&9w$z -BߞmqjĭNq @xlbHS|eqc2+UQn} _PGhGԚP䷵PuvԬ<6dTc6' leCY[L(nBQ)$XHEY@ @"IB@$HA$$ !! DIDPIRdI ,$X,BHH$D$H!$"@a$A8ءĈ'`^& "(0`H##B,U0 B"I QD$ YB,hHHE*EXJ@DHED``` B@,IF!JС !hUOi/@E(ء-i<|{5 M,瘰GAuoW"nVi@pY`2DySPbiHX5:'VF0Hg@4fꡡ;PZ+uXWfLP _@;?qQ^¡% <ϱ2Y{g39KA(yz>ݝFfJ,+Um,?!-81IdF64oNg"RB9<ߍyWポ]zj㪯\!}NA@0@HCZm%  ~ i5NQs4nZ m= x&˘.? % cL(#+#E~/ %5f"a>^@yHW_I@`%-iDQ p 39cE]O-N.ΠB` R>+:wfE:wG Qn cw7 sFl~'l(!J 00%)\4GbLOX}aya($.R`QD?} 56"Rw:@{Z$~yv~Ah}.ڵpp o+BP0_A1VyMH*o7@(PQA&FQlu;@l;۪4YB( "L{9Kqp-Id0(sV3E pu0*Y-[`eMD6%bN:*Tϝ9=Apn;/Uu9Ad)&ZDF(D^Ā>$$@$!EdEdE@EE<DH`yaCA˽]챖tj^w=ŭ:~w=UQъ!Ww )%TK[L!RRSDD-bztQ5U坭W>UpuͦT ؗdbz2H! IVw crz4*Q NwvӏU^rK̅nv9eed?*}s#$_`/h,wzt&p O7#G_\)£iea^3q~u!ed@,_m0yxcJZ/ak!FoFwRP"綀3)Wwn7({3|RR2=Jx P VLT}0CeLemY;ʝIcb 60~2Z,6RMmχ*w)?-? >9VmJ {y3]cC>ܾwE#R}xUfKZi Gr˽rQhY8$@CA}2FLIhim+Q句v6+m)`N ͿDB|q8S}.G\NK.|; `lP찼~jg:pM_)[8W~IrM|^"0 Z9@A1 BA" 槱7XȇKzޘ@YޟAv3sؒ V5mE( ~?J]~}A98--UM"ߦ)Q58 hlvs;}B;\)G*ʈ`LWC$/l,e|"ӍCC|pD-3URH𢠒5זr0>!)jcLs`Kw0c3('J 1]+L J"0 kHAsWP795<-vWH@ |?smʓ/븬!DϬg"HcT $A=[…R"Ȓ遰usE‹;g!!0/ y Пp2 ͥH+(bs=N/ޕ$7Dؔd.Y/.O`?d7QJMdޖ_ sd`T j82֪oK &}N ߃/w; QVa5lSe>I8?@mL<@BӁ'Ϲ O X>==)co]owDxN r4ciŀ)DM3}. kw*b0yBmP&}IXN?Z0-܁*@YD[/z)J om3Oz %ADf{[ 5QSa "lt D`{3h|wUlT@G;$FC/{ : Q7Ym^Keɭ?f󭾱NWULUA%JI$pn*HȢ=w,$J(u{i9㴴~>[}t'= 0Z@YUW6$/* n1casꜫYט~fz%^N![Qܛ"GCP 3;|FYΫ}tɝYW]bj%+Cb bfsUL_;h:<4R_nϱLohnΣC{[B)jd}Mw9 <۾A\MZ3|%&< P|@ M])vN۾ƾUj,P`ECg_  Oy$v??soI~/>^oȗ |5w+O Xڅ+; rzтk/1$70 6=dq\:v*eas;'~] ZUb+([2FItikCGl9,kޏ'} e3~V ެu.}R9DgΘ1,L0##{uB Q$:(n1A7d##ޫDߺdVsbK?kckHJ5BJ?=JE*icy Kbڹ5ԩKfEe_u{#*..L+h+nsⱳ] мDDJ(y' jQL 0`+ _=ώ5 CN(N0%6T#Fd)YXh!2 4(NӯK1zM:y}2sj37dQ <ڜކ@~JI~TV6c@z_괺?<ƕ,ސ$kF];}h$04ƋȈGTD`  JxpcqE$2s5$ [=9>~Ib)݊J/tQ!ԄUdMyKo @@a=;˼5[9CyYpO ޥ#f&Z=ihR2h/E^,sBBTk.z1oR)P}-c갤:][3.m8~pScK"j//ըjcw8`t0\F0@!0 Ic]P;s \/.%| NEꀸ ]49Op]|Լ"_?[OvЀi_zV>Xno0t7tBnY$nDepin.enbXo Z Yr e,Xbŋ,X- h(R " )bŋ"e1`@[BpbB",!XRBb'-i~rH{-.!`Dn[[ @!!,EV"- Jܩ,AoXu`[mK=r ,,\,,Xbŋ,Xbŋ- rб``-J[- b԰,",x`P  [(K,ZX !^[,X"A`@AhXB Ar *"@S(UbbDHDPzŋ"ŋ@D 2 eWZZZUiibšnƁzip[+eb@X. eAPeŋ-ʖ ).[·,Qbœ[. l@F+aҴj@,S)z R1.{1ɂK-e̦eS)yr)|e RM*KԔ hACd^ܧ@1W"&( -NG0 r!Wљ9ЙѵB"#jL/s]sQDVs'@4!u%g1_A PY)⏹R F2J@!,g"hGqv_V)CO{{"qܫ̀~1:T{txUCbӑz*nwxLBh"`XëU'&?d+k &FJo3# Mե]QtPR/'QR# EWP1؀hǽ9 %*@Ўey9TvbK1mĩglO s@r qS;/k^AZ_XJYKs>^ZU.T CBu޷~)`+nx ?Ap؄^2#e li1!A=pd`w=8cab=.4\?`aEqnC^/5/f?T~@/n59Y){@H&]d nؤKȳ7]tҊWqsVG4P9`A }rX $_v/;?OM ^f1/>#du][2Br vYjQxZ 8pͿ>fǞ7)} @SO2l/4mfӝOl/J^N,z]&0ިgRpG?:8>vD'ڂJ} 嫡z}V4P<*w 7[%]ۀT!*ʑe- qN+Z}Q3#?~rw^M-oa_[~ q-ͥbrbJ*ƽf#HPIv2TwH~q0&PR+¦F粊y?J`(OQS}h`̱o&HI^õ"X"ED$# X HRAV HB@$#fQHa!!!BTHFAU$Ua$QDS}_u;DxB1[O[3mcO\Xq- $̞|cOw|m3*m4,c˼NASpZ(鐊LP&MסiL@G9_Cf򦻽Ah%W pq4BS5`Y;9~RG;ZLى(oy+%K=BHƞ~̏ҴwδMYeS_6I\SZ/QFx%n|h\`z}%Y[i$I.T# %1(#(>"U2xǔ^GcJP0V~'Q 5*uJt=;+׌{=RB[X3,T)z1ԧ7[N$tn΄dcҁ]ϰZυ4"2⇃!Y$v/"+[=QQL&i$ފQ@ $jiRJ dG"prQ^OLp iROf'h6PYP3(N:zybX49̤E x]WrFAR&R>^S$U``q#N-ROX_ifpSj%y:UDp]|$4'5.q@2"eFI'p>+XL..7hUsdnʆM۾)fG 5@="o(/{Ci+SզחaƂX_L\jeu׾ tMOp65A Pf0` 1 qB^ӾɎ<~_=>lf''(]Mh5XUߕHy Gތ CF z0Wdhh^Oh9I@WpTHf-Ž32\`1 ~o0XQiuS?>240RtU{( Nt;|v>]gO˸oϡ@oX:ex-٫d㒹k9br jEƸ55Ǔ.fEs@ݬK{TEr:a[Ȅ,aG4,Xbk^0=#qB޵(LbQFͨ 0leA qVA7UllN[YgB toD@+çJNlTI;,s %H_p4?$~Q$}gd|Bx:;Y&[P&$4<~C)}s zRDb<\,*^"::ﴈY6dcl-wJS'm㐦u` 6 .cTbx G˨g]'tdZR5$9U6Q.PM4egQsV#a@QwK8lc1a`Pͳ0_ހ }s觃#Ox&y)UBc{0mWɔr1Ǥ8kpJZpNiw!zz@Co62t:L_goM3^>!Nለa˸|VEF!~R}/)z yjqC::Oԕ@Ļ(w濵2G1e&;iTҾà fF".T뢍_|P`ƹ߃,cP%iR &tTsf)du)Yt64O0aHB ii}.QOw'EE ] 2SfĚg{FӅ4P>3CEvm},Dۤ,ӂd,Ҏ s_QU37yĢlTW"MKXzO[gQeۜ@/_Er%{Cvׯ U P@W-A$^?|p9v#a&BU'{^߃N>¿d{L阫+jPε5kz4a~ݐU`$jKnz[S?>3PRtY+Dk%#^)1Q[ ݽևl>Y2C[VpW496 MWMx޼5pW@ú @=ad7HV;`f͠Eլ>YX^fTTrеh hc5YCBal72*jyzüa[?6kxe|]LV3'Z* Qh*4wEEZJ0I,$rɼiٽ'w_e^k;1xS+sY/``o;lur& 'L7<2j-)}\ϐf[P=LkT6|~[J~I#SVe_84|CgfbʤD O⯭"FSC/e :$fԣW/ї" ~sVکmjSoK;m ,< &xe.BsgmVM7t65*ffkWތ$%>[>#$Ai!!Ҏye1mm(ҳetI|֑M8Wc~ k8-$`ۦC[n@; >} S6{(PP~xĩ5o4%󭡉2 ^9H:)hj^H.k}Sk2 iUe20|:< _Ʋ;&p%Q2 y:Y֬}?"xdp%Ԍ:)P'a߳`ŧ|6Ql߃ 19y8oеM G8ݨd 4H#3]';8 )2'_ DlO(h88od МpFB~Ga+jDuY!*\U|7/j{\C&#`Hz6n+~J5+c%S.ΐCZ Jtvv4ّǒLeλ}f0,:c{[aFd C%Z}Hh-~hЦcsuu _~VfE OT s} S E q(F_Xgb9U &x`I]Ri29'qcE$) `8Tv0h3ғp_ġK?S v{!MLv+Z/7J&FA4i沣w}?H#񳙂ʨE Td"07crFnAίmYᄜJ[j;Ԧ;.:`gyH3UBG#*a'xmS>otނ̔Fտ4DP8)^!37N/LDz 59;Y<.tqY)վ %L{i}Jqu ==VaOnʞKS]ZuX0;O:Cb'Zx'\W#.yOݶgBTΦU8^ԑ̕e>l+ZLѡ7]8 > =%Jsq$?)@@vZ"z(jۓ {`K>n5kAkN!wzUCS&z5h|tCef,J(7surKS#qKYlqNҴVl6^ ]Zg>Xb]sn*vr*U"m e`Woq a:%܉Wc|oEv#BX=+C|a:7Q)oԈ:p8%I|A;žMLy!h>y. I rXŸ]51eZ v=!zm]Hhj )pő M9س?8_c1IUAS`TՈf !Ysc!Kgh%4 DE˜`6 i@.CS |d^@M FyCL h1TJִOL8kur÷k-ĭ,MF6݆M iN$^)T90hX+d[oy .ceDc"WHhΥ55-ZǴU}*0_b"U͞ۄtGWA/wƳv'C)E .1J_;-|v3V_uI30ZU읪pRaf;E='S"Mߌw?Zy)@`!$Dɲk?#S# 6q̈́F7!*lBsYϨmh4g /&ļ~x▗| Ǔ8A̤]`vy\HY*_Vs ޞ0jד8:6㥛*,,E oB^&k\-L~uÓGk IJWֺbb PIt ut1xĒ;&>iD} RhK,1HکSp.#PeF@ҥ,G*xb=S5|2/ItΈ1|]uWRH3̌'usv{nǡ-)vG$%_^ [l۬BxPo'_RC8axJ( ~Uȴ%lrQ~Lomk?EAŶKt@#M6{01y+Kݦ3)%V"_EObЦyR3?K#0D  /Hp?_yU^NW?1ؕͿy۫ldLHOf?B̖&x#\{ Furj m?TYY>hyT,c=k)WWRЀwVe-v,i0X y,% -u/bM^BdJ 1>~hpvC(sG(77ƗIL|Fs}NJ# np8#y H2ϑnk/qT0f\4-_12zXyq0hV)|6SSJ`-@O-4˃sP×ڳDx_!%{{$kT!Nz<\/|K;oBH"pB_ nUdž+\^ +j]^Nk9] /Hν=uIׂ֣6q8l4g+ZmL(t84:b2`9Kg^lF RVÂ;4 E05!j/pqMul$̍xD lhsfD ep1+E2cu|auT?Fbɢ0[? VB'pYCg)Ԟ` $Y= ǢW\zvU xfz/r%9}[>dUP>K]pE1>]IݿRw$3qI p}3b(zʧa3I2H .g{4K:g.*jhr,8uJ+PNJE+Wc\? 3 "rrUAwKxc̺פuE`oX]xҞQ-x$7 :=> a,Z uޒ6cL;F(< j_D4}gqTR'pړn2*Ch RCA K1C4D <-euW Y;y(we?o.Fؤrc90SWPDܒ~` 4v/Ao(jI B#]g'#X?tfڳkCHcY:mu6 ĩH.l%6|kWe)R4R<0ņhͲEjzx\ޒ1/yӻDz@X( 8N4,e[L S{QF ӲfևRN!@ggYly{K}y3Pq佗h|i+TcZPkLxڍ؝st%y;anK;%c̭);Vek8Iq+\X;r!r f - E񚋃ᡃ<LiğtH e;}m΂d{ʅm~J9hW&wki>b0AfD9IOc1MdLݥI&uYN,5kXHMF2Xt!END&#O eu8ϧZ(͕ia0@+l3>. N.Fx$%bPeR.2kELOq"7|֗MC &6Wϑs+&wy4VooSU`ف?vqiQc@YہK$FQ;: 4 klڰvĀJ!AC6ᴻN~ih_Ix<%RUZ6E>EuX63HCX'?x=G7kλT! ) ha .EޝbMacDDhR223gzb'tHyn66>ІW҅×T9p&( o/!&gXfze$aknB﷪H{9wH4&T1zىtctWC 4hch,:D7riц*GfWg07ygY 2hEH/vkz9)pe~4y:s0VX\' 0j)! cc*۶՟!7ٵ~ʫNy` =1 cJ-qi\(ϛ{ЊBSe2$VN^u+̻MO:F.EĚ[{QVw*Q̆-W,;+8UuǻTW_sdܶܯ;W~d%p>?}w9ʀu pN=!P=$ WۡN0C!4`S"m-@ rm-}v0E#Nwnv?j($D 'EA7|&M]F@o W~C `3ߖpt FԩfꌳfUm!GI\ ^p!Ƴ%z6̷u]Z-S+@j9 dJ3$Ӝ+uPڪGdz- N~LCU)d=,K8nW\p3@|vi #׼Ph暼=Z^FI0DUYm02 }~Rd`mVvŧ[ƲZ@W+"Gԑ |jxS:@^pn5pҏ 0T#!v+'|껓nz?s`#RyddT<\ /6x 2#\NteO}/jj~+#BbQ3t("L>𨖓o%~i,eE4KIѐ%]5vw Tj)޼O( 1!bREy0G}ΤN:w}޴F2 PQjjhD?vjuz|RgץFJaf>Bvކؖy~ +yީMYx̤Nq>;M#@t#8 N(]Ow=Wca.eحCT/)O=2Vv,0O`tȅB-0^hpnƱP g=E8 ᥊a$ cQ5Ob&]W&UDm@7\.NVԑjMo]y>Ok7zw!#9<`xǪoY*lIZAR-?ֳ0@Hsm,Ck^DY>=zl|.`LJ.~n{u8>\|#zy"lD=<l 误1F ^ly1z} Ō;ppg!>#~]vhR %vollKq:FY{oV;(yC]~@|eUؠکz-`Pz00ā' PyM^A!_#m|}ڼDbhgw$nRlpD+8\՚5 j #< ðc%zE?+S"JT6-iG 2n` BQamϪ)Dpjš!h۷IԽ|$^l9Z2  /f1̈OkKGǣmGf5JM@W:ס ێ ؁Vno)4ct[Tk2.ԂniGש4^LHJƠl|;=f,5x~UhWyt B4Ld 5 &;8K&pS?oAT:vt ݵClѶ];+|Rnb,3=Za9s[^YMk&_d.*-"Hݎp`t\JSq.q~R @LXǜLUI)V~/A"@9"~ 1">82`X4,G"Lo "ĺhA1UѮ;OzRO3u c/W2r FY)o|Zʒڧr.*E @BG 4+p5c(W׮wS~tpZ^R6a7 DLE!3Rpߎ7zm9BZt8 HĨ4{˫M6"mLI_h>FJغdkZ~3=iQ4S[Wª,t\CtvU# /]&ݰ;v̘IR?A0q/S ];޷u'+r zWap IH;A#݅$! IBRO>)8ƒ(¤R@JOR)%R%(E'(@aJaWdx|BT@(U` ` U J[ )AX, ``lRJ7 7}5򤓛;d}mT LJ*ET`/?@sZ^X(@ oߵHn]%THPOsma q,IŮoH"0I,%gw_ˌ]O}a}š (r!> E|Ox`3f_1$1E>n#XR htTTk֒ C@8`b`,(2P($P TQ K}-[P,~4.sD4Wy@z%:Owπs"QT(H$HE ńĉ@y7 5Eb',[`* `c[,QP9mb,[/Nt% dAGNQ;%zU_6_J p( D"ŋq.Uʸ `]XyC;צ oK-m.]$vCIGDü"'pq6o3L.|oCV'.rܹz\` .uUrs֙ 踃(.诚=9.ŋd,[,Xz^zE( KD[,[;i o@406򪁦 9HZnX-p^z0DB\ ,Ő]rX"|Oxv5BބLɾ,oE̽9:_}O<c*&q dH 5Q{eV/Jp@5[n]g r#XtWQ{S7gFBAr[-cQn[^r\\-qqrܷ-rz W[>FAE@ WLb4٭CZ`\AG,n"bZm -.-˾x@c FHo2%.e.5waĻ F5+旄^d`l QdvXƻKd4B n]y 5.f&6@ٷHt3vqIuI)e2 %MHSHϓV+`]" cD+5"%X.!C9/..4Lb&UIyĹ%븭Q79<{u]޷! ĸĸA-TbU]5],^Yc1oD('&Q˅fnŸ2 ܷ\q1qD2%06z.s :.X+e7^/14@Qg>tGAt3&06 &6 *"I<,.bi1 Kۓ ZF6C&MYKoDR8Q@gDLFdH(n,/TCXAI*B hd3*ǔ.hKBÈmMmq?(T 8!aeWh ]A}~٦c^,lg2XxMA]ج-0K [p8 bRXE-  H)$T[˅%_teI<3 ?Сגvc  jK lNAp,]-&GHSqXymV M*P%̄70yv#H+k*{BA#l?ygYj:Fxj9b5~ڷK$升6gf#%>;޿ np+܋@$|i1]!p@3Ie:ǭW{PEP, n,x2:Pñp~GB&K{Rw롒rEN,R-J$^z{ԞjJ">JJSjBIɤu_/ٰ97~OA;IxewkZDݬ寺 C:_"w *lZ[N]l,ެ"ŋi{Ųŋí@jx]?>7p= >hB/// eĺKKe (`,Xn,X+,X. vW["R(oWz[xV X2\hEDX,XpXb$  +e.`.D .h3|9DDWG;ekEMȥdFE%ĈK$Y@}fNJuMfc^~BG6:!ÔkgiPU /.$QƳIe*ȈX4Ȳ &g3Sƙ_[Y^rq4w5R q̳,h1^PȭOcSHpɗBA8BJ0lI*p.K9$o;׋ᐾBnx-/MhUAxo.ЌaH4D)fQ (H2O0ZIs FYO3)~<rJr߄lJl5&QOϷ3 ܵw (Ex cZ@̃eT@~;IEC?l+[{= `j yOAFWGP h,>)  P_yƕhLWwT=ʳCl (/7/P@;PZ`W% Y?# ^K߾8doLoiuǕ=S2:JKR&7f(. 3$<5z+OɲnK{g'A㡘O5 sB֡)(C,}Ar]8(0?wDiDmy8A(wǏ&#ɑžmϩΧTx3/r-#-OƨƔ{ru"Fq$\@GTl-B$B>s1-0d yuxHhWxKIUFO7(00:f\QhD(,_C+нtwꠀl)LP6I]ϡIAQ!)OHk!l%0M*I!"["XH |zVG2~* 6@exߵUd_ _ o>7t٦I?ൠ&X]gx9 twjҍjIMV1lel^ ĬJh&2ɤZ@n.ĘCg s|Fr:x/59KEW,L PCC^,-)QU5Er#y r^ h; f' ,Eo=vedj?\"[^;!!̐BW@pvb/ . +"Sܧp;nJC~v ~ԃ@_ޝI.ó_ܴp>o0@˺fs$XCaodH-, 2_ zw>}rPvHelm1OJtd v5^rfq7.m,74yBaS*\}>_\mw[-/M*?J:K2*tR"/ubT<܆K{x_rO$pS:3V\E?o)#%ae@J[Lv]Z"{TiV,qQ% *0$yqh75G!H>k<^S3b Fٻ SSnV3Θ EYIr_`4W6P!av!'AIp=HyW^K1$ , C;1 laU)m@P6hh\WV}z.6 [e|Ej48Dݶ.| N4D|b}P) #(7'LfO-g̋lov*hB-|);T/lJSdB[+<$ 2Σ=zoWhw'Jb-k=+MYHaBNy*yv gV)EHOOؔb \5tZ Bo FD?D@D`h q!Vԋo _lp=tGVR睡Jɔ7u~w2y6 2|O6WaZ {UAv^pʼ jBB+o1O/V^ _5$$x(30-tꞄ5)ذxRnN>Z$YS_L]+bk`.2)karl{ bAiO1 p95_G-!h|/30L7(2A_;wӋq kl 4#4 l2 ~;Ki[[/ܫ>|Uf aǓdG3WRv7rsZn'?& nr;=Ig.tu|Za!˫jCrLj(6KtnOa1qTF$Ő!wHCU 0 f3l/o^(Q/Qt}6AebC̟̀]ξ \렺K.bޱo^//-^0Xl.uN+o\bnpZXll,["h-`eo[^DY GՈw'eI Mh[[6N8|/}Cq;/4|ᡰmmvzp]q9/p3hGmײ$$IĞ{xWRA=yd0!E "}hn ί8/X/4bXrl wdix_]rn/kܸYis K@6]/M񤈁Ӂa/RӵkZڗ]qu]q]ڋ *VnxxxuZֵ ֖.إ ^"./_]]7Š</-:Kup/6 ,X,' @| ]U۞G>:풠pt_|&g6vBO37 Eu:o7~#ɨQͼ>e&qyM)@4㛳踅4;QpݻleP̍{aj&FF6硠Ђ>89[ZfQǔ/n"eaLUcaRh,Z.s=.˵C~,D@W-/X}ŲUP"(D R,}RA#|0k._o~I^X ~[(P;*qqz8&z0 --ȁw1/5 S+}InVB"EX m xÿw/l|< tSY@V.!@]Catlq:e/d\zX,@\+)o_\H p""D vD[A`.\z@2(, VT^|S "$DZ2er[%)"0JHui8.?"szȱk֬Y.~BejXvɮ5@DLܾ6'DDεriKx:V.E]\pHDbŋ ,p `"XU̴(\X[-P1Wbt\Hk*С"P"fX @ 4e(h %Z\HΠ\4,XKżйV\K/ BZ[,DR,3 XlSYYs5$??{5n/F=q& z?!r`i^s~]I0OTGPخ"7lOk?dڜ% D_~#{yINMtMTIC `Ȅ./$,U~{B?{~YDRʰj׶rF3ZM}y'Uw5a;صzB͇uYͺ4tQTtI`` zFS_G•1u'maNγDET,o]zG{+C-U0|UWV9>tѺS'k?+"P:⛇ AvbA߫KߺݫUK0DƇqˮ+wzʆDg$DOќH#?g`Y/1ٴK`ּT*Dz;m5oH0@ E(h*@Op9u] 龝~g 躏+»*#:x!(+'?&4__n]EAi MJF</;l?SyBQY O*!-`5kt sq&N\0f]V&mOB<.0S҅fT@v#S,DQ+x{) OЕTl:rN9T?ypl_A(Ewe|8=;gҡXV[gO/or: "&ni$lJ[@2 9'tR"Yfׯ R?kbLLF?P=!+"/:gWå6?}ken,H02#UP"(w˛/Wmx#,X5W$F־?)թ>Q݆/ Rq>j;&$_F?=WK{|PX*%Y*r<3S˔Ƕc#GB @DDdP]$x_[g+MQEb'q0AhP>/lP$~ 7=G_ F#1[ J8~쳰(!(L]oʒc MW[2spu[~#ۮF6(]$i=dEOB NÜ^fP[Y=nJ  %HdZfMDq ]pkWYw c-Lg.5L]3Co,#ޕ(ܘB*)hW8+T 0؅ɖ)t~-F¸ &c"1JA_,~eX[x^A8;Za>C ,WFFUVoװGز੢7d]˒٤g.c8&N2ۍz?,c~\8ug_/׹$֜c?hc =X+y>dPDP@ nZH<[e\ˮ?6OG`"\lWrұ#&KE… <O}wn DN}Ҽ~xޞt_ưX--- KBň X1b+X,X.!ihX,XbłƷX pZ,[-[ .C:@0[Wx rn^cBV}^oUc\aJEq@4iQ)i()N6mAa"tCť]hgϗ6zPoigY [ <3!T .t"X5ɺ.XQtY7,,,ŋ,Dbŋ DX̀c@[ KKB DZZ"бb$(Rł!E֢e>2޸h!Z;CˮE dAdAdA("(Rd\-;Jv6@-/ޭ"k\KkM`X,XD\@d@{U+r5iqޱ-]tTŀ etʹ`^Ÿw抚$E< BSqtfȟUBv?MQ(c\qC&l i  夅q˺f߭; ֗ôͯImJB|_s4#e J+`+Bj8IyB_|3MJ@Bs_I+jޮEGY~;J=.;iZLxġ! #fx1 ʍ[\x kG8FVY!W&9is^q?AfO\V ) }#+m%k>P:;_ $>&BVKL&wrnC. ȐA`j?h"51~2s D˦7no'fw2EQ{|n? FF|]19H@gad?~cuʔvm^>ԽC.W8]:Қ˺|H,oBj@ iPs4?J)>O|gwna.aC3YƩyvO+L5DB0!v<)5C_D֘|NM?{MAc6oԐ0-C\ >nWF=+i|4]컭,9Tŭl}"ՠ}A@J3z'r/R -͓[]pSp$V|I` f@e~q{~7ڑJR[ւ,?뇦>bdcFa `+>ᕐA?ϒ:/v^ ` N!|L ECʰ:灲+ jruw|(«󕬜P#Z:V*3v?\$aĮBݶ˶ (8(5pHک4/휑G69ԑb`1d<=\pO#R}&ַAA1]O t>^A]_]PI\6 DD&"Yap:[nd%ckBg9݆A}HS(Q@kn;_aKdóA45}מw*MT@XE=3TiulWg^;F1Wovy;1—ƞ~9#^&*$<7/]JwpfؠhUpb~@QԷD%PoQhB"/ŻU4ujA@i( oɦ>K @7^m~1-mBj/Wlz(!/O1tK Xy~G(n'v@+bРqSxh#yǚ󛾊m /^VsfXe'Lzmd>!Qyr*% zWWL`j Uۋy^UrO&+Jȏ͒Uw6Ufm~$k."] G-?ZeOM 3g3HBC{uh%1OP-G DT]ϔk#"≸;2s^t+BokrLBڹy'5[ ?3h|AԲҠyX<7<6RQ cۿ )uE~:cgjPlgM,%j Cߏ >Qb"( H(2t/<{*.k[!퐹㡢+&G;Y(@d@ >wsMaB/ib95p}5X VDćEKe:T<Oi"o"}0>aU׾xC>Zݮ.|CVٱMM!y [ˎ "Mpϓ)3&Ծ/ʼns 9W"[ :Kˉl.u@m_(͒I[+w2>"_Qϑ;qX$u;`]qPί~jpvsv[U46[2 T!$ߌ;ԣA{j3rX?s_o]kb 0m?I$ H.v":-hP>(+T';X7)C.A$.]u2Cb-Zm8,s gXt +v.l ̔ w>Nj< kp~A؅b8NKM=pj/|OY k4Ӌ@/R1fsc3rRb os%JR)\VWG؂ف9g82'ybH8NoC\V|M~Dڧ6_UO6>ayknN/˛ѹ;}/9w*&xRıBqTkTYJ!tP.[rQkPDK D(7@KI`uOoE4m}&Ouyݸ>yII(%I$IukE{]DDT"^AA,ZXD` @ ,oduekc{bA P=O|W-ܺȦs)C]זЋmju?ǍSu]+9  -n1d5͢94Jן[TD+6Qae njW@\b2h`&= ՗ lfƖ-슯ʹG[˼y5")etnDوZU` 5ET-nu=C?}X;#3翋Oֿ3= 5$eӠZJ,/~PÔ~[oJɢ SF)JRɀE< \H)RJHmz<>eg"D|111&-PGndpfֵ{>_߰4c1 \>YsFsLCy x ؗP+O_(%>R{ˡF8 @Z R"ҵfⱅ|g HfUaD{3;ߴuDN}]sCנS7N[ JXi#GSU:ֹ}wV":=A17:=U׹tifgH85;a_I/`%ȤKl$vNԩΪ{GyX}&=+oP15rWAIb卉EAh;og)º!@7# j]2"yr.-#JܣL&(P,O&XyۿC?oĿm\Pjd!.Aaf|W Ol7~LȱɌp3ᱧx??v&P}c4qHr 7Gu{] !R~/3=ɩØ<[ved8s!,Dw]=(8w^@+/#Pa> 4^mS,#64C JxYYMs @OJ y`(zMCXE}\997ƨR#n~:s-xE_ d&HSS쥰P{c9iI|L3K-ZN~eunpO2I.v7W~f F liD{;SdV+pY//j!K*K4-Nl~^"{]"T9-PPg+i֓exm6;}:"yADgu]W"0 ~mtʲ檟E(~Q!Eh bzttPȂCPP( C)J!^ , ݿFVl$;>4;\_Đsn:يdqY cdN,iGG4Swd<[ ̢BwC4E&RPzH-* #> ;jZvL ;wͷ,s?ViB漴C'Qo3q}ޱC.YZwJՆ5~E7dnI{46/u[KC[fNgxF[u7뷯3$ξW1G% s\p$d{% +ds͑$S7D+ߏ p.cy\ZejRtq8G0"fb1CP0~8BC 3I AI\"Ӭ1l"~ak ?uxynɈE6\/&x}-![9sM LwxY޵2g "wh"k]l2diƾ{Iv62o\6ʿk?Ƣ]5,Q _1.8Oro6\ ƲgF;D} =IKT]RĈ1cܰ Sn;`ǏY޼y괲 o~3W] >\x}<pLf; " 0cSXJ0yxŸ"@kw$J ܾ.=?ڳD|?^yj~NQ|qa͸rko.pA^W Ș3܁xęv[ AX*ټH<ΟLQKʀAv` X4aͳS0;kAwL Y+l9-ʉOgxEݚBwA]2]2uR%b,YFZ#2{\Ny:N:Cդe ,hٕ:AG,HmN uS#e4L4rMfđiGCLwYv`5ݒ>ڷOKr B@P(40|7ma.\d 8!1I] NqhyL5cEUaz[*iщ]>q54#[h2 #i+LjsSn`qk4GA,{9ۈuۘOCnov>[uUAI*(r/jE1xT517ΘPhx,WVʪBRiSi&\jg Y(/FEjQk~8BE_% ѭCn -iAP"=_%1݉a}E#3#UX2uYmdtʼ"=}tO~LO謹awN`"~DD<-"j)ߊxx^;D@wݤe/6Uvi{4Wa&,&;G#y9$> "S[yi4 ė~#1W. n:Q:y5HP( ZMwAa!,sڟ@B ॱA,z½￵EuEft}vOnN,-`G%2dϼnkmN7n10 ~UJϸsI`rX]F#r^iUPrE);;*Rɤn*z/iH;>cIUtH/LW*9&XK<,+ͷTxR)hnyn 5|b1iZlɓehޡ^E1dz?}ʯROC 5B"@agQA&”f/ȈBc`r[$bfLxؚ~ W73X\Vt0)!kr^K+*4l؎+o ? Agoly!͌]dNo&!qG=&3(_'n2"Wrwc]ˢ],JHr(L܅*Oo)ۑKV?}taL4 |a|fߠ.d/ґ/W?'N8)O,"LR js^p@W+VSC WN"q>SVփMpիl[@g%ejhA v`=k`ya=ы!㋫RjxLkSa ͳVG=J4OT3 }=k /l6էn`JpaG~qE͉F]]2ɮy?tMyԵ+Ս#7X_]=Z"8;EB?` / CyC4/L >"3ag^>vQM -*/Db|G}/]U5]з:Ksx/gvv׵߭ZESY noy_>3lI'~sUԠzCΰㄶ/W5~g]3[>Bo!ADy]^*7 ,6G`@`(OPdb< 7@F $fp+Fpɵ?g՞s(x JG473=/_x{ga-:,Z9|vu-]RMs*WxH@ R,S.6Yo&{ɎƌuZۨGRt )u2_:V;=6nGM{I0d716Bʋ7>IRݍn'mRxxK_wyĝΏj:b hq#KGXwgȚ"Ȉ <,nEn_R(Hdn`Kڇնr]X yH@!2pj5݈%Ѿ_ =~'el b-Hz?PdE (~ԿG1V`LoɟO>S=]~Z^V8fɴ*!ҝǾ-뱍bUuC($^3.{o&>u oS|- ~ܭ\@^[{i9oUwX;6xG05eA'|`"$)Y+'~s&./Vqlo,X.5_J @/,׫,^/.!l(b,".8X Kr. ^]q`Ȣol03,؀q`hV(Z\kebb Kŋao]ĴbXb/Z[eiq.o X.!M.ܶet[eb&--,Xbŋ,XP5Pkȸ".Eis.zuRW b˗.˰XŀEr.5Ⱥ q V*ʹVPlP6*W*b ʮar%kT2[.5ȶXs rܸ\.5r.Qh\Kn\2\0ǒbWPXb*A`)ʺ+*(f:"["dXH Hto[ Acvw벻,_Pk{ DGRxxl > q* r ȭ"^[[Mܤ11$oKxP0ޱz`:^q`$(2}y _<|*x's5- сiSeo#8W@ͳ{Hn1vKokT z"(E! Z#]s`zxxDcDzs{2z@&uq܉ y;oJ|Nsw {z몿*]eĆPΞ7c`3f+4إA__W& <$IT;o7Q}_~~/i/M$eF(Sʢy/[Y>/ EA%","X=n:2Yq?|+~|4ܘ)mR3v>Q4!Gx,> [ ,??|-$ҿUcFfb?ݧoZn@ 9i lѼÒx9e3\7|m&W˃*gA{hkO2ͯ¥&D FDr@؊ƘC#InjfF[N[F?qv8ntS*qM/uAZ $ '؀eŀ+*hX "KU Qswz|{p|h3YR]rx3(/'/K[K¥9UHJV Ra?~[(tu@%ǯ9VYWϣw˕J̖A~஡M6!eƺˣ?[K>^h픒I/* O>]  z)Ta(qI3Yʗ) / [/{1e:ڟ3gơȡjAY@akr0z0Q+6#i#j L(b 3eǏ,:XnQO\[ZH:CuQ̕JtyI}_$G4j"hq͘C.Q=n -.7z0h~:gaRmnoD'Xۮ9gYinjl 'V$Ehy.?c}*ETb&r./d{'97Q!Ncz;sw#~R`$ŀeŲŰ- `)O_M{ ULȖ̹"/2.5Uq !i!սzUqqp: PIղoR}}}N~Sc 8L0t u'YUhAFLfpeOxrZ:<]C_ `F"Q0x˝A{T`^?v#}t=_mdZn\P~wyQd|Gķ$8^ ,Bf;}1 }*7:$g`+ڲ^454D_JI̱0z>o;Ծʼۅ~44ldJ H.W a>rqH1߳ =}zFd(!^@8I#lt׫ zU'"7},F{F~ 6_|Cd5Л. #b,mz=umٷ|ߢe Itg˕XcD,0sNs-WӤxᢠK7̬f2`zܓ2.p;iO؊3Yw!1 i eh(0~ 5)F9({s4D17,;BeC@ybR 5Ҷ-:ǖ+TAagwZx$3+dd#Zoȇ l \K{f{/xV_ ꥜pd( :h30/`Am3Cbyդ+4cנΟ07x?N^P*hS5v>Qu(t^Z""j&݄R0 gWY{4x1m-95GHҙXRpM1l'My>in4 18{q%^XLErdbNbP'ŭ|]"u\n=M,tB}Mch@vTMpjؿjUs l:Svh:M`vmVVԡ<@XJo<YTepַ3RV)5H4d1Y,x *Ni`K2T!~:_y//u_LzQ[{ۋSDYp ڃ >9@^X]85;OL-,ە >W5>1BYi-ʜNۺ; uȲT6YOj^w(")koAAbb^N#4A1LXV,Xx8!G:*S}۩+/?~ra^$H3JMO[In'~t"잊Mկrvz4?&6g:=|xƂx挲L-l}3Y0{Nzu_<͂Os/vQǩbGx5)~gNCp9~B [oiI}a~;)Zb_:}K_\VɧPs1?9ȓ@S}ULU׶ Zgڋ57ϱ™ ;ռso4TKU7 _Mt#'91?d&؏N0EWe -5%Zb>=|6jt(W;]+䰡҄noQ{d +yŋ4E2FX3`m?CJyDq2O|]@֗ p뭣ފuŠQP," ~P҆D0,9) uYױ: ĸ7KzQd(װf xWc718M{3599 oRGܔoi:܈iopEo4^RyKGF4%7IxMvWF@C.9y $ҶOye{oi| +=WAƎˏ,&d(֕eE71~;-;c)lv9!)ٓ,m ~jtѷˬx3?gꦼWN Pm ^R9󞧾's C@I춣޸*vpum5w+ ,L}`W.Zl2hj e[C+TD/4Z%jZ4ߺݶ[kDE+Y*[_Ak N?~6~ Ю2-NbY*16`xZ(eP14t۪[`#oe݇gw/Ld#|INE+RMapmb=P ͧ7_Nw QDxhܞWm̳ pz` Y9@& *St E#)ǰ0-H/(+"}{q,G( Z.Aэ,a;^[?|hi ^{Č*+Zi^H<~*RzեFsgm>uz[#5 wDˇ^~ gIW62l'0znu%U}MѺBЊWxެ`oy1b]s#|vo>֥VD^Fʣp[JCU[&L̖*&|]d_"=Nj-SIY;I+C]El}:Y,RZ\}TԷ[hs;~Y)Gs wasAMD_`#~ |MJ_;5 i{2 ĨGBL"q^E.᧤[4~'T_s=.ؾV~bG;#= 0˃j _޾l Hh-aEN["V0$JYS6IwB,"x^ϼ'zq=3a%?^~3D\ܛO#AnWPZ`/+Ȉxg]/^[JX }Oi궟r">Zv8~쌙uuL#-~x-|ŪK*Xī1Y@8x3,B&m'+R)[8pi7%Az5wΡtN*!5_s-v|V}2Z%5ݤ5H^yX_.H/>kzHC&L gyʋq. ׍aFR1-כ-Mm7M 5_Wn:Jj& +\k|}ƌ6s皧8*D Ⱦ@A~ƣ!m\:[Ů:r̼L;ʖ^'[x8=B:LO2q^pիn>X$Oݭ~/RE8\ly\m߾ sJWdKwn>zt띋*5W0Z-%|k4v#|㱦Q:op0fz-7e_twW6V7oo`iLXTgΙ+,j#ZXrߕ#'DGMOPf>}j슊v֛ɸU|lűTdqPMw3 Y91y8~fVd|Sns1ݦGߑ䞶( 2|9S9z3FZLX)qw0=WĕDl,XȝJ8$5yBe# {37RkaЕ`A Kdzh?M1XjYz_4P*EbObW`a荹OtbOy*=c4FwY -2$J= *E°9&Kx")\sfR}?3P?X|:+>,9L2> <6sf {uyG;o2W"=5w4w _ 1q=w=V/}4 _Ξ&  F kNjNNRyWAPX1"L0e£P\?4=kV؄w *]Abgz^ɑ)Z'd[U!زy٪E7 앮tk= wiZg:O*F3}>S Cr*(co|>y,AV;Q\(;Vxm?jS 8h*IJq^(L+ёxri,g:`EQ/8=( qy`l 컦II:!+;DnWQxUι7Ƴ?"GUon1-[ ~%q\+M;O~lѧ3Z{]f:6||<{, +/KvKq=uvBg\)ܒMy SS}*;ALVtҵDsu:Z!aǽ!\ As$Mjc3*aZF2,6^PUe]r0x{ܭY&q=֓ Nh鯺}m@vSd]*۠ke¬VΌvTdczJ3`J3&#UkGDžEP;.>J&}]<E]xM֋v Jx'o}+hUW_j=Ǘ;%}#3ߦCcSkpcGmB068ǪڞOn]r4i^L>SZwke%vs!+gَ{ڬh1\roGpкN 4A'}p[eF@t7M6]]| &OJ滣j*$hJISsgےaEZҨW5L eb "<+h iz%<~}$SjGF *2Y>?*׾kY)Dž?v;6.TctpQ^MՅΒMѸ XQ@{VEeHA bD4BYw mA> ?JT ;Q]+N%K7N+lySL-Keؓ66Խ%&G;EXAP~[4q5a8=ˮU/*,*P"3,~%PZw|_sGŢ 2(„ o?1|i#M#d5\y)scq*^?ݔB0F_F~qm1#C^pAL'ۇb4!t)xe,>0# n5-0O2׫xz. L][*9~ɷ-zjOqȺyꝊ_lO7\י=Wgsܾ[iy: RsYP=sk:| kjǰBp:$"QfE,hmE:s?osTհ1E=3uDwmi)g15Ws54+ݱ{yYtHɵ}xpIe5fN^+8" vy9rZiq4I }iXio:z)tcw[\NFyEO> qR& ~r,Gwߟe)&t|]m;p]pI<^a1)Z?= ?]&}L1cT'#,)ףHA6mmKMHQBf?]vd>c?m}oę'cONk#}=8t+.-|䦚5z߅ۘE|B=cHlx9ȥ]|,2yMꠝwàY Bsoh[եd܉w6=D~QEK889-gNUx{"'͔N]h@CG1Oi+mjP*$ r~#ZEKOzwOtJd#{d3nՂ}e!8bB蓭RpZt+0ZNFf|Vӟtu{F^EwJMcuOIL҆\>Pr;Yiv'暁KLS{MN&뒦|F*a؇6¹6=.ck~3QWG >>jnnA ڞώANX0~9e >\ h ;ll/QI<5S_HC-A3D$z\]1魂h*1rlrTgK"V?~*O*b=2VRQ@O)MP+rMn<$ uP(QKI~Kd ? ~ek4wԑ:{`ٴIH֬9!+#Rka: zϩ/m˫Nѣ-tǪ1KNx9B R;x"Y^cUh+>֝3E  1Pa-5iAbaB"bh3q򬞤!;r\F *~L2&^KdNy߷Gl){E륱`ti~؎1|řÍ/:8ժW*U}f F%52@p{B('ID;_M[ǍsBpwYMV}׉͵i݂MӮD/-.yeiT+$͗szYyo 9ݏź_J (hHw7}ߟwU>.k^}-r%3*<|d qmˉk@戍{iߦ:#*$s*9 ץٹb L'>w0NHU{E1\>LqII?Aq.d얛M!''@3n%ɲҥF+.D6M{@);Mpy^b:t1 =Mr !xɃUZ+{1mSc L"DM;<+UD ,v@%V6,w`4.fv-+[g:cF=rDZaېeIT}ofsIeeΘPL#vP*U YDpAb9KplNᐛtf>(YSVrgbW"2=4ʤe1=: yH4 +qK1j8zw 5|JGp)Iy.<j!6Vߜ2(DFEBZ3W[Z;yaV&itkoݰ&o,y,Cy4Z8n-"-;hRe~iТB8Q7i)q뗼WZw50Z\be?s2}f ӞS L7vz=Y|w/9e$BFD,:z cwRpm%}:53yXșb+:_7Enҍ^ #՚^<9{HĪ]+6Z_ :<f]wٯ1HwL-So,4˻L0 Y^-t[sšjnc9ՈRSx[m%sY;)[׺9G}x!QBMTkV \N!WK>|5 {} 1yt_:];@H: }ݓ:KC32T3-NcؑZPMBe+H\l+F*qoWTds|Z^ _Uc I+LJS =>ǎo9o_꿷rH@$2HȲ%@Q@}{V?{?}4k:y5p&|^pp-H^q'g2^',E dbPht]\_g5nHW;w Dc{S]4H`NÆ?E,dUC10Z7S{Ucv;`q @|1Ƨ֦+-:KXͳqcCۜv|W_JMi;nDMDLɐ@B2ݮ_-D;fr#06LsZy]T4?ZEG'6>c$?fE-۷1U`P.KVSJ FdD ]=Zn6g>9ud0AAs 6}@.(([]) T~{+cW1ڿdK?6w "uYJR0\p2?_W%`v#gppQ⛟F2 &m@uu#Š3AJz_dz4y>>dt_$ҦoF(d\s2| ~\԰fkXU\{Pqg4dj~j^L!cCU,ߡ6N jP}V% V?)e]){)CS "b7uha2~K]F(u =H~ZݎTN:UJD"}1.UBB($@|I!?awS]nSH ~~x8 IC'vԩytY53;׺t5*m!C &vZe~7y@ɢPmRUH 9Ў oэcQr` \f%gkxG[[9'+ _z:>ŭtS`77'>(v8G}'^=ꨪĦDbhޅk M9(u=R+~Y+g"+5[Wo1PTqvv!{Q 7}ץ~}_ug~*EC?psz #03wWAG_$c sgtRsm>'SvPsg\r@Kc[#m?}͡(삃t܏q]R}"F@DEy `o{4|9?6wsuMRW>N' !B:$;$oLb۪N8toEX< JIWd_(yow?:>U"5 R":yT{oK~R̔>ɵ礉C(_kO<ĝgiZԌݸ"zsx{|X=|`\Z+$>/c3m3tk޺A䔂O}0)a'"fn'=A(hpw^Rn}CǣZS6;S[_ 4)c5?Sl)m& PE8jz{VK0 @`mG9lVr5]%\ؚJ^ \#L| z3c ʴ|~VyU]@w{NK [=Oǜ|b ܪ  %.\B6WFo_틂y i2EsZh'ī֑vOv@ʬs9Up)p-C t;\7վ@Z҇h^'ȞEsW?#}(\(fx#:9k!C3ԍ ]7!/%]6LZCۭѐLbŒaJ$O{֭;A~ A\>?K f &nkorBi*iZHI'3w[65Fp9`t3YL !=$!Lgvٳ?s3 oF; Aq{@H#$B`-ʭ`/3BKɇzy3AgC}_Ou6jO: ;)mg8SȊyIڙ4$ }T)ln>_0vhU^EV3c5y3#ƭQL0WZql:Ŗ ܣ Qd=2ڡŞshDKIʅeYmtʋMl O|d[D.8X^ؼaܢS9|@_7{tq[kFbPPQdW` \tu:@d=XTh X MA00jU $B{-4."Ml$ْx8l,ؼن%츪4C\^|<حyGKlžB8qi:eT9_v}s$VE{=M*{H:?oatVf) ;[t[O''6+R-kThg=TZGy.AkElg`~8 m&R˚ i'fW0`XqW\5b0k %l`ڶw `5ܮ-5EM߁s IJdU#h,[24ڇvQda$gQ :E}"Ӊ$IZ}p^g:PY-W6z8>` U RfAɃ[+.I7'^ ?o A-'dsv&Ę[-ٖÐQAf/i b SPWPzL0p@"vZ:1.u>Ux56}r-{~H{B77 }.W@3zUЄk ηeet".f% Z4cDA`, 0oZ]DD̫ 疮M0㽇K$.Vx Icsh8rfv6~>ګܯu/)R5ȣwa keej{s:UV-IMtWP[8<7xH->fŁT8MB«K3Y5Afy>yW6:9{As:-Y}Jgդ֦ssvc!G$᪍U5(- ,}Ph* Nȕ+xvIFώ+dbS*qZN^O6oe8᥾{'n&4Sݓ]j#vY}4'=vA"ni0Evpf C r1Wg!]X[WXiS>p~.#AemB+z9}Yѹs# rYD/uI$T~w1y5mg_(GcoaZlp,I zְBy]괏]o!A ƲRl4BF#yT?9[?KtR@! +wX9[z,JE gJ[g+?+;B"zgd(Uh.aPB9]z4w _OK48yX0~a@>C  ,|o۾;T9 "AbRd]Ͽ=GeRi mIU3dXuQ(u8CDKF|b)JjqB7_EpFBlN$|Za pз%kzޚd7 Pb3(q*R9j^U&.Gѽ*WLuI`i.;L a7پu^Whꢙr=/RzqbeAN 9!oM U4#(Nv ʷrs2.+˲F \ǩ ld&L.rmyo)iM+2z+;ZLq(:h9;It-8:0dpBfsËvW9 `5ߐs lQy;n!|-%~W񹣋gY xbV8޿Goas^]P !<`sGgX dKNʘc8y`^Kq,d$ZZZ&Rizco5W}[ȩFDd[Et$YP~:C]yϴh4vڙN0bcz> mF~`4 !"%mq6`V89^ I-qM,-yOT ~;@Bd[>i!21D p 1 F ,Xl0@Q'*s6 D@u Nጕ\Tj2՘xsspitneX" XjRI5ID,BGLhhbv+SyPPpQ?0Q9Ds䪝l93!;>@^ͷ?{ }ٝ}X^vVnf4籲)iP/t4s찉jm~1zwD㿡el>J6q ~zQD_JmOFskB5=y_Fo^7:wH:QRp O8Bjd(-%B \s*ObR2BIvVo *u/X:XSQ, tօfg᧗'eu‚S['Pu@x6W|04X,-:D~GE؏sOsIOiT5 #6#"Y5$o{c0M oMX:P&5 q^>vFV3 @S^Wotm sOI;N1ϪHHNo[VIaa0qyDݥM q'xvƵn~֊h),H3olFI]38U0;`e5p6=b ӬNǖhFKJK4ɍno8:zzKo;DzRmx@k `/Ϭ2wr_Owvُa T ЕD2| c  O&=[j|tyvk]W˝]EK݌J`)/MS\Ipjyxv|gjyɾNJsZ97.֟aj|k6lV$o`x}P4P/:"|L_ԝYp/*osW7svԙ<D†zz=nO$u)I)@_" z͐fJWj#aGX1pX\s@'"Va'?gf{xt$AUݛBKJ<&;%+T@5L'+.ϞڑL.7Ue)SҦl/)[(akϘvS >Dcf'e' <='ԟs*0oz2 12I9=!`*M^d0^>KZ`^q)< ̟C:skOl ^_Nhrz?C4Q$Ҡ4/~{x6ITۻfRoE0f5D lr2 ̈L&)W$Fo6K̝J<| Zx1VcцD@~F Ik۴6{vD`c_poٴ'ci q;ܻݣ LÚGnOE--9p'` SRo$ ƦՏW7:?[rnG2`̸cMѺ$ K]Rؙҿ61˙.(/;_]`EC_Qs) :,4""gHҹ*s^{'k@Sq d{Х8 8 Kjƀ[.}sKJ;,#Y&mJeش2莴 xM-?[% 3y|]y9 `/^q3Avfk?^뵧|ݾYhd  RGq ƙƿZ:-fޥ`AТv3??g-8H|C\vay5h{oybP!+#[/mT` F𴚃7OTcC~  ! q} v8`D'$8G z0z9O~7e\i~j0kx :li@ qF@{ !$49@eqDNȊHͅ:#jHO@SK;;lqbO&Kk( ! o3YN/n;t?1ipuv%-":J<@_uQ=dXJ\햙^Uy*3g *\3_9 hqr.> 0+t\WV5qd_hiևƙ>+d8[Y2\1PZL n wsQd5SVll(bՌ1#!}u;[Uufoۥr"My5hjC}1~) @Kq=q($T2R|^3Ƨóa,/{!\uOe^CD+ i3J۰B Bqj)|J;睁p%Z)s" 릇 G' N7ԟ*"dI.>*Yo(>Xan.}5s)J.O5i@yVx; t Ky GPpeRG#r%cw_w/#{%UZQ-n]N`@BN{@D=ؗZ/[z;=yC4dz>l%Βfmf7Nu蕑]- W:IYz0~NTe?qyi!yLv!?h顏ESP5*{h. iFYt)2%?b^e lN5 _c:Yu{fڳi՗tei,q=Ƌq[<GP!!s3eo`%-ƪEpi"`asWE^1틘>&km˟b #|~n㿐w)%H)%H=^b+U|߶u9#K$`2Z/;ιq1}u=Yi:L{'wB*Q܀wNܻu6[j Dᥓ|`X0axB|Lj Bj,_rܾioolo?ʟ_(s>/Or-oTG6Sv|aYMLc]C?tZq@{nEe<8iӹ߮:c҉,s}]Y)`A~rA\?6)#U{Nmrݳ=xlyk*~p&c=m@AF3.z%h7aѣeR d;UIF*.KU]C: .Jr#zP5ҴvrH|l%̧dgbd1y HR`4UtK#A`-@q& &ão)uFd'A.~EU A>B7:dm$LݻwV@%~ 3m|hKoT3{+ӸqYep,n5SACnn}= %q7RtMȘwuާn!I,/\L)*w ckLVϙ3VJxl5AyQ]~LڻjFu$1;)Yl^PoCmxoԩ&MՇ l ^kҢe'侨|^&bņ\H`n㢦b T6H>әWAK< )4o%Vz,1g*'N\6&ozƹVw8G`g_9,)'1O׻>t*~J&:UG눹5A2i+!fU.=n^(Rhs.J):Ff#!% O&j w) }/#09~z]frxy&8O+}Clp7V'rX(xO ]y]yl[NIPAOMo4:k#lFuP{pu#2wX֠a=jZ5}Dz].ǦC[aOzl(gޱVfՕW:3Zo~m# 8У&>x_(z>R l\3xzn8M#Ty2Ի!vpZ~7~SoiYgbO+ƀoy `]Nc36tHrb]+ ?ҟ an-;4wJK$l?f 1!bMe}Y=^aW"hHdstNjA]0YzT$ IO˞ġH>}n גR(aa UO.qXL27 ~˟ybh[ "Cv&4 TtEk~5Լ_$-m؍51yE"d4DdFD`ȏazF d.0! nKV,O=*  (wN] 2 hxtiWF)ZujHF}cBve,G-]]ދa;y{x;OMBvrIzyP9ۄ|vЁ! ̆oA1a u.Xښ^E Flo$Y "H nD&HW~]NQxY΂RL䂐diE+y'lZ\?'˽|M.Sx8U?x3R8J?t NE$K>*#kݟh[sfUn2E3-w)[=;"+[\iM*R%U0)2Ԡ}bTqnP X<уPfW[D6;i) 4$H_Vܣ,g Xx! DJ4*w[f׈U:5 S) A($ن:W$m@yrr(܃3@ wXOL3MITOA:pfC' F!ݔ?02L r3qc=,[iyUG7 fKaB8Z{2 [! !)Lurג I G˥` yY*1*{ef'Μ}׵߯r2@Vo< ,Mz.d[D:Y^~yzo@O7|/nrW^k^} %=O|HmEu7@+޺ɦ SCWx]탹Z-|^q?mڬOxO|" ܷܷ-7--r p-r܃k_=/3۟X0el]C=1i]9W;CAG@eФ , #^;Y8yZ&>3Y!G!mgf(N;[1CSw_/xNz^XsDQgl3O= dﲏ{7mTKޭ#CX9$zF%mgINǿ\\wח#i"ZHoSx=~gg<=uސC4)j,w!/NKd )(%o(/f3Yz*B 3LܨVtmRb\QAG?Xޡu'{pAI2d"Tة868OU s/Z|uꘇ*WG܅a".r"n_^~ROPb,yʞk֌/=xsC/ts߆2q/Ttݕ/ j[$(tw 9όK8_Nnɼxz?-YBd(?ܚA*j3F+l;\YWGg^]I0ZoDP1b*'f{H&j4h-3y~v6Rkg'b$SIM>s)} YwLϻE,;.̅[*ZvC["A\:8d"n=y&m%@lI:qӛP(tO ޚ)OO K6y <ϕ8C^Φ2{m}sD6> Es2t0uN͖֧,*O`%vwObbbbbbbd2 C!d966666666666666666663AxxF72W4>]ڻ|8…Fp)Ҿ;7%"JK8E=ܮIOǩrħi 9I8<O<ĒHNrأLA[UKloN{C , L9!B5 0 ʖ59a42A-J11Q_JL"W)2{jX.kn"5HDU|^0찴;Ь>ح}6͖26yH6݌I*Ys+  x=k@f1iՇݷICPR?eݶ!c 3i56NU *=f~!řDJvg+.݇%+V7><z \U7I)Ts}_$Js{8zٌ/(`qWW)3QYܾVO|p،|Q.%Wԟ+(Xb$BUȔ䄫/Bվ=l[w#ibd ?p]g1ΐ7 }PBU͘F\4'hM}#6m;ҥdz< MgU*G{ic.G ;J*QeV-.]BcAJVbS.`#LC'3r-ЕW^6xg.8Su¯=qj(qhtEX-~i=HYzzx0tMdQPk3L{Cl9lS2Nrz*ʋ)uݲv;)<&n3c;M_0@Br@@_Xo}ϼ=k;/DUT(9[ _uzz X"@X,鞡uO zLh/- qƱlXYqE \-P Z xebpD)iinZZ\rťzʱrUθ̱s ˙b[ Rh [-5.5n-šis-˙s-˕q.e2\2ܴ,X,[˘ Z2*R\p̴e@1nD̷eη eޱp-Eȷ--g0"b̴ܷ,[p[,X- .ul--,XB`- l,\K0W`bŋ,±X,\ C*[-z.@[--.: K - \B. .[,Xuδeq+ \E q෮p ̶\r+p K2\r.e`*YXb1f! Eʱr[2%/1JR)JR)K2(R)JR)JR(!B!A,Aa3f ` 2(^^ KKKailZZ-e "$R`X [-ťZ[-- alDl!ňAp"Z[bZ[,_[(,H Z4Eh Iq.[lR-PX0  łňƸ\ 2\BEBX P*}'9*|,Z@ eQdD# 1"X"J" `DmuQ}hukpQ}"`b'χVߢAQjgqt,ڈ/tO,@G xׁ?ϟ\?y\tO?g瓀ݏI?1h 2eJ(kSwP:?ǂ ??N_?ÿh?fUTRIA$RwZii`U)b/t2bd_Ar(!\BFl(*)Qe.=BlN1-(#q,^@ u,X X,T`U,[*RQш"^ZB  [p V-l[޷8`E LP,EȉKpn]P.UP(["EʨЉB@@DUn)pX,[ &Uop[-.%ΰSAr- ]%0,ZDNjb L'M4J4MLȸ`SLbQUH\M[j `Jv"`ADOMN".1b(Ab(EƂ)bwx9%:?S$eA"HѡtV $(kqwC xtLhcA ȕWF"rW/ﳀib [ ά]5/X$!boZTp!aloXb$Xa`PKtbbŋ,]CH:  F@pQx ]3L`b Čϒ!Q  "9Q`"@XB(X BD&FO6!O^\5*FȧuэV<3N-q24D:eCoo5j:dtBzL)x{?% b{}We\ R4G )}liocq yպLcSzN yC`|I8M'ډLxS\#7W.KOM4aw0 n[+^趯 쒮[޹nUvrԕb[g&kJ@A6mwegJ(R~xPJP;рwm#&7-:,T׋ik/Tg#֧0~+flhW=mRCI|Y\]2i'F\73c*LW:cdgq17YI [Ii \xmC;Z2M؃kin},ߪYov\VuT[$X w VO?i.%W3[od:0M;Zv,In:඗+t{I!dG\eۭJsH!Bd%C _y/{,C>Ue?d:}jQ{~DG`vވ{sBsYY? Q!d!V%.]Ú2 g6>oIKNS*dDUj.5rh"셤!Lݬ|"@ 'nj`y (sW>hJq^7Hhjz H|`HhzOU7;TDNu|AI2YQ"8W&K%4ȑڣ׈I (*-5!@!pP\,D4W\ PXdEl⡌@̀z-#g(b t(p \^` V`h\|SBFZ۔lQAAJPJlXrQ2zRgrs쪠(McYxEd|qPPdO )!{|a Y̤;\LNֶ*KL?;Q[TR.iT*SQAg?RjLfF'{"P㍒! 0@ B盓w=T}b4KASAWԜo6;x XHpFdFbBEMZ~*Wz4 OJyZߗdĠ͚jϘodJ!rmIyGü$TNt5bI.OȪp%[B@u֞!д\uW|%q,$,Iq"D`hW^Y~QOrOgւݧ=6 K 1`",X@Q)/D@t_- @( .AĶDIA&ؓW~7HOזZ92CJwʷQ8']9@*y"B^UgizR ͷ;*Terf] 5HŚDt?10S!7¡{qڕM sX pȱO1*];q7{^lR*X}I,.":Z]Vj諦cG`CXPIfr F}W.h+쳦f7VS Vzu^3IP\q]JAYY0I ~Áҿ\#)=?6s^,}ipsx 4 ` } (x#S|^vuj9aVx,]b=OJ0Y=%v0f^9ng9ق*7"^T+@"`7p2o!$=Krl{gX$M}[={P<,ȓjƵ& ^S$81K=F`q SyvT+iGFCStZkRl ,a$]*qH`"AbNL`50deE/Pn\ A ĂБ;-;u*^(dlP,2ŨX(ZK+-\Q04ܗ-[| $2@L냲#bD&*dB- r$EsLb"FÈ ;Ƹ@ϥ [ʢXXRbbbbbbbbb([,Z BDs D`spNhB|:A˷C _͋`VϏȼS35Vk2K=?}9:&K`iTͷ1{d;a'rvꮐšdtJwm8yʲ!']N2nX\N %4ej& fp/ߘ[!@GtyPvýeWC ?_J[j~n>.p@7٭t |\Obfe9Jug(ܦok@%pI d/f]R`6 dn!4!cЪtС?}ut; /[a7^i7q9g;u#@8Oi! Keb64\@ד"gֹp!`, EঊT^Iq$ .$鯓N# <=l~:z-^Go/&;mk< $Gۄ!x<0<<ɍmgW},߷,>5-Q[VBb>fyچp_WퟫBIDIÜR^b}gCBFQuVɂ) iWOއhܝ&7U>%a̡.fOFl *Z8=i+o/B :¢k6z<7Z܅_Է+}AB =  PGlYwT16h0YvK6P5gHPKjYj{_4!d3Ճߩ!o&ZC`uqVMd4k(X=טxocL{߾f*d9HVtX[4<ƃ/{x.+請~w[?eϕ?{c~&,5DJ’h"+a ClV/XTpLn X-n3vsblnѤ#Q"Om'Īя8_DB(xArSLvB(rR@fdDhMiL?9_϶ZT+\z;̃gUcNt3 @F Gwo6-?N )#- $ ň XE`E#EŊX 1b'i(6D(B#:JH$_!lAN%cseA/]R.2(bUJL9c흡qy"0jR"'ĈիU5,e/41"*/^6v0݆g+uѱY9$z-"H%.P @l M1C}A`r `s(}t;fQ3F"JR/"xى/L*"u"y2$\(ĸ׿|aO淜Yh$" "pT3 ]6܍ĺX R))()S)#-h$Z61M@r LJkX!E)EDQEA1qx#,*A{ CϱZY' M/uE&D`_[V3na0e/cEV_p^@@E0*2*.}ˬn [&~a}75AAD1 grT.LNG[ MKIM,JRc1Hٖk $AnE `L"( ahYqWTwuE" Q`ѹo5i1DS1(>/LO@\o'q I;~{_t?pϳݦNDX =DPTTzW<'< RdSИ [wJabA7CuJ Ў7~KchC~3< 6¹vnâd#z/ZVg՝^ޑ7$|Cs5ZKl4>zjgJ(5 Y$4d)k&^߳w_wMg>OƊsjT1u)fUSV6Oı^ /"E:=n?>T O=:$F"O|H}#(;=Ƶ> ۱jVWk{MԤve$9ikLFSZyx[3DKsmJTYW4+נUoqNj 8mnk['u;[!k .NW /q@$ DVA ~CK-Y2?"-"d4#jo:o]aʥ($t!͊t?_rAE`7@k7<}Mi$K,@=M h sy()\,&?Iܧ8ofø8yXjz`;!Wp&qbC{FcTּ,ղF_ϯMT rDÂ:גo$'6^nS'C_yxD>@`-. e "@;=ް'8h{Q`~!eUCcJ>\^"9]~qn e 3TEA#C> ЦE"IjX!Js74!Y~Dr(ȝR-Z j R U?^FŲG4r2]Cesm W2+ӝPߑѥn {(֞e[Ĵ^mtCq&uC"#{m$1Do&A,dB"C=Se~wE:6l,/j/3~5$u3$9C,Ù<}@Iew~J k׬Ej=Y`|Lr.PH{)j#1'Q?6yRswZJUH?iN1@הB "_B!{CSG.x5y{\5Ķ/Tί?0:-'gp0Jkdg:M|"Ŷ} YX%81Tƌk>&hր3`&TY*S5zz*Lz Jr6 󟫍.!T"jd_]^sz8|HO݄: K TڊDK99{Շ)}8UgBՃ1ϭPiw]P8I`[b*![g7٫ y z 8H;d i)$h{xgy'p<63_ TNP*'=/0)]X2.K|Beyo"Om+^0q1tc?h_|OCY wDciGWwzo~ۣo$Uo$`$ْ{B'v}%OzbvOQbH ?:/UKF2?4$LWv{5(\APn `SڡKcFK[ nR(8ȊEC/ _}{I$ΑIeRH}E E*/ ҚPHZ<7,p;N#d=PQxaÆAxy37TXfHL(#J%!$Fԏ`|LAljR$UbP"{T⁢biβ&lݯDF/Q*IBII@ \By͆}L"=O\28cPfi&Cr$EH\X8p 4-\~5H0± CD,R!ANFj4rC|w]1R /e1heEf@BQ"%!$ovJ[9*vale ST Xq.0ˍ!W9bARR &PgyjSrUh OnA$&Ĉn/6a C~Z j2{@yFBmGAm*vdp@UN?xQ¥]XC8Cq˔$! % 9BD'oﻸ'y!B !1-m.%¬RCּO1gl9c2Rnޚ;Y~!%+Kq.>ji>8cZn~+KKdm1v_O^8AHpWއK\4Fb@O2qCU ͆|ГzzZQO= Ʋ,ךbA)dl 05('4J;#E0d츱$}ZS s2`'Oc ]4ZfP1](/Bn$;&;i̞S fĵO 3yH\v(Yw  `:BB*Uj*yҞ(AyQ8tM̨)2Խ!(T@Β7MԠ6m J}~( ?!*WD/xVǘ?Ql!@Q‰^l2ka\9{1NǴdyí|gHl漭G4)Ĩ@MZ,,Er~?~uDB 3C{>G߳f9=;7azNW~.DFMuR@VD~oLG_.J{H۷meQƎ -brgG/7*eA{~g7F50TDA2Hi3ru B F1ZCL!a9ߓ{Ğ:?Ga.GxzE{xޝN@ु+j?/X扪x&<dp='%LKn.mIl(vsx" çp4W/E%~LyS t5{MgZLJTؚx$&*,k*raHi$h ~ ^-//}f>=S;0tH}քO(vôlѠ2N_eH֮Fr8[ޮO #rK%<0Ly;~4&SۙIHF b rȃ7Ҫ~CޫvrMbEw4!GYM Bۆ2&y@G,ZϟНȈybA g[ ߿ePZ_Off9L]mi8tɹ-W, .Oe0/e]*6* 7扽k\u#qhSv?␹T/UK"󿛨C"IPe |~v" ^5tv)>Eޚj`ϒflv^nv*բ(sp /=OL$D%:Ern[ al-blbܶ[,X[rܷ-l-@nelXp"\--*Yl-Vn[ )e뗤^JYybK/$,-K.*k;trŋ^-*ulnZC k+4u$MD Mo :ĉɿWpM ̈A*CsM$2A^\x5YQ2#l25R ({Z= r%; qhwYwРU l(1lt@@*@ /6e[qfk Kloan>f匧ӞRn<~%sV0)*޳X;+̉!"] Ғ Gxh4ip3LO/ YFDzUM?0[~^ۏo WKԿwwn>ZayBp A9;i{ό_յKky()^[™ҬӤ`#X |lar<"J/{ofY9o>mVGXjZYs["{RpD!c@} mT@XtD8,@l3/}[UIa9^#x?v-/vWThzC2}Ⱦ&2#/zgcQuݫb={yFw}&A$$ fwUZɏ`fj Rl0{5NJֵ=*ljw]xcQ!ZA{  "KQ^z*|\x:/1R|\Rˁ$4*$'ӨF^T,I$rT"B RVeK>`핁z)R\A`!p"[\{*$S}H#"3H[&!Ͻd1ɛAAUh&I/% REpPiu>[`SP`2$Iy&897 GV;ɺzgn" t|A4W~)_W/9C;M4{2T- dE` H %te (7E%"%,"@(!`* + PDFyo]R$<#A&p#rAYiid(>ȷ7wmfBKEhRn‚8RZU,fb!|"0i(PpX5 SnbJw; kPq;-k{DX -;/Ux)O4AxP V j7|.J I/rXPDm^kB,#h\#X&H)C[a,4"%"Ejx #at H'DDRG!#0դǷRMq@C(H*@`$ ̳g4Hl#C5Ka3PR백 P IE4, -X- -!" NE  BBT(U BPBJ\""Br bh NI.@$Ɣ%2%"MDIZ?ݮaC-$I&鎒^>Pzl"2.UVn=vЊu~RO tԣ{6yfB% 4 Z4#1!*GAB&ެ0Bh n\#&JyIW D"a]Vb/bUVh) tOP{%Aט#5s!˨yrw`˚ M w|νN3.Ltz_;?:Roj2o99Z˰?sck`?-6@u$XwB C;{ ?Űc5-=w3ңY*6ItƳ<ȩ H֊Ds #@4}(sG$M\M_=A4ׯ YʋPhg""^(y1jg!ybˈز?-@Aa%PJFY'IdI.I mBcJȱhEq0c\ kBKn,(Dt+rB.u["u+el)`DiȑR,Z.v@A ZIF@~CZGYb X8LMC /u\wh`Nc"mⶍjy5+oѺVv1Sm! rABDILRnYܯg'EHE<(Y! QMY,@#"M$`0|/( 7\!b(h ] 耼5\ Eb%h/6|]F=WpI DAĠu%[4 !i ˴?8:\ L:8Y:n)gBp1OwE7xP!A<<#2>(oW3P3x6cyM@/`9w˯z*ڴm1<*0f m6oV=yPyd:_l9_&.?ɝ = P/aqrViMM-b#V\L/qJ d \IEd$ȺBeqW∩(?MΑ8szujBAqGJ#f)%Xv=04)qe _6AHBYcXkhEqH1FMh!kOV1P-*) 6=؛Ա$ӒmMQoeLq_ԋI&EI˸]0H>${scoVڤ8\^D;Ph$SKݓ<䁼. /2c]|KzVTߵp3)Arg3z!gD"o%QGN!# .;ͳBW%ih}Wq'9UP)R4M"BY`6dw32KzʃncGf| t^w+)CZq_gS>8]{DO_p^MBTK{xLF5/ : ὠ.S=mFŲBtuл-^m0J5$@tM t, -uzvXh2cȎ2w ϘS?. dXEIхDAX"ϊ5crkBH>;54׫X6+8_>U 0AηӃ:Ja;@M}q _?V!< "yJA=`|tĵ|qفE((! ]"\* /_/Yzއ1ܸp]mn}[wyAzk}e]/~O9(jD`#r5z-^G>uؿ[翟s(!*#gxVv;/$o?B$ o.{,?pIT| P+  ,H,Q")$)=,Z)%ChFDR-B xD/[pAPA- *J%I'$PFHJD3 B%"A X B,-p(T(J0H[31XAR ^"ȞQp:B]r-.QQ|y5]@4(v t+0x+g `_X,}]uлb l@id֤S[E&$!OVnI0A)&F/D!Y8I-"CO1!uH4wCxXYQIyiHm޼O֣  (h@ D^QQ rހc:YAP9 H@ BAAEnTYtCQԥ(JitHHmItdLX "ABB( J%KU)!)X7CUF)>dI IgĴڽ?d,V~U||J+ONu&tDtM+4 *%?vk$@WnH9yP0(Wh]#D\PL"!K6E#fH6 53G'圏:Ōu.fvǬg.C5vH%rC}(Xl` ^DI0j D!n-n] vit `wIRE\q"nm)UL\Z-S2Ԁ$#6 `H:P0D@ԿD)+=&N9(.`;5(0*H`H2Xp4\fޔXIB$, PaCF,!D5k.E,ܽsڮXEGKcB7`yM߇|-/%W6@o5[[ BqlQ`$fe-2ca00R̉,f/)q/zx9Fhdyqs[{/gDQғ6v^f/?pȸ.5wZ_gd =[芪Vv~O%ޭU^ HNR@ {Ӫt>_n=llH͇"XhleA|5%{yؔ|#4^'?AR_㖻W_Jej4B<@@QH  HBFUYŌ1'Ai!ypK O'oyJzē "I@ < 0,{b nT+ab ,[`M( @pZJƠ! BB@oX`E,X,Xbŋ,X@(!$oJw=?so2Np?/2p8d1DMtP r{"`wEYu嵖R!Ad6c[,X.5ƴXR$^Y$lY~IzF /.@#CKn> yAzͤ\2㐰Jgǩ!>:3oPHyLwK3-,B,T H@6+D)KHq f-Ԓ`!s(6(BPPZX,DHR ,9ğOr1;-:]2Vkt?X Eň"A,X`b,XC@D((Ɛ7ިdu}Uq*QPpAT,?TfF A*#z o5Ä4exxH)aOrE` I,HK-E-,)bޠP(U) .uE^8ce WFx z p ݆q*(tb닦": lPkK,E&QFqSiqr\/R-` b-ܬ.eo[ȸ.!^[\5ߐĐcBHQ#R-$(bܶD`(U4&l\ ѱB||oӍ\Ĩ ,C:Fth=zH{HzqbǧȍFEky_{NoY)`OyǓ)L"D{AeG"+ N &ps \ź >7W0 JYaZy~i珥uV.=uMtDN 52$S#k)O("O\ʹdҏxw3a?'/b#h /H9L6$뱇hye9%yHn e#0y9 Cf S [r\0l@ak^K\"cA`ɦ4vdnPrU-ͫv}OdPHjwے,*8^Їp L@RXDb!ΐ,9# 2Uy3-kʋ .e`Z0 BRv@@_.[/ &jMQ@Ab(iL$?\AQK+@EB[aCX" >@TAv nSg3FP*-n1#!dHN߼o]4kA vkTmZ0tAo^R@qb;AfĚ-BKc%. ѫ) P\$.XE2 Qt$}=pd?'a䌿TGCNj}|F2J*JTbHP!۹tMr B o\Ո"2QL1 ]rl$tq2*)WdhE4=s5Pn41r0DL+_ wƘѶgdͪLvx_Èw~N?|.2%"U8|]~ĉuqܿ-iPz9|+1 WrQ1V M6k5u)TvqQat{?r'MyV `.1ᵣȟ*:˕<:pD-b6B57]puYUÓ0'}5{tZńt4c7kIfOk YЮ֊AɯP'/kXz烴9 1ICƞt'l?7|;bTYO (?:.~mclS0Qpn?Mesz`;1EfP&싞T, 3j ,)NapH=3nw]ޝOz` Z o.I Ԏ0^7lp_ {jX :n *Ђ|װÊ'o?W @(rkS\Ȍ@Q~J"??2\Ocg_+$&TsCLNk/U:n. ]a PhT̢W4C!]$/H!KXo.=:P0TA.x<Rȕ[hʈ_'9t&"D{UV4#o@'%%_ol\1'>`w_%UyWõ7F !.27>&x#a~ ]_eѺOxhi~uQJG+v~ӹ+\4Ag7-sTvi Rh!:GWv9 w"Ljת0Ź6°grA ؅6;6n 42Vԫx!,`.3U-oz_˞@t #A9s]Y>]m>9RyW \4=g}Ӭ=G~,v랁vxFĎI_4]X$B"YTed- ݱl[qk bKO+ӚRơb~o)Nr}؇ll=P̨0ek->_IAz#OI:AMY:hIp)IX$A>dhs,6?MrƅX>WhGKzzpIkrcf@GSmtGS͉p/2*FG}O E}=cdjf:#t\ yv} Pj\8=}k?Rf=]Xj!#""*w|xRt?U> b?m'P>6bi۩@sI&yF<鴬 "u% >QlZ$VS &=LLsh9YQR1gN?\1:9 6hmssZל?+S[>f?ZB3 :nҲ$qvB)a {7te 7sݶT KʧŹVGHH + ),0ow ?w3h}iEUJntbӖetZSƅSD9,tnHcKeL=B k M∀CB|ԃ߯AYӴ C( (T it=g;yjb?Cf\?*{G [u,,RYI}>jg:놾gpo_D8DSѤwqr4gLJ|/M~R:00OAvPH((w9;Wd Fx'zp1@.zU9)قΙ_LХ Lo~r,xoymܐpF}b0Ly&!01 gi^46;BN̘m<4+'2 OO"/JkjNHݩ_eu ='?:tѶ+]6<xnB6g@2 ԖKOwg0 ʔmQ}&1k BgGssTuyWF&!;d!AVZPg8|ǡin~A?H< _%xy1[!ПRGT^G*iSpwֽ:Dq>J$I|oN~z'w/P.}Q-% IrěY;y f=Gx,^ , xi:PrE솫+4r@Gb[ir¸{ㆂ}=]9:$UP$KRN^Z^*W.2:vxAa_N؈Z}t $/yhw nSJE_4D}i8أWTz=pWf(c$iM7_TtPCǷ rEnq%1^F@M0'^)0IཾY֗eyXHePxUq6_@0aUZpaNJ*wIo]{)Ҟ'\aOw|%"bd!=a/)%Aws`t77L'sڧٟaVba89~wGR3 K|H }^3EՆ*NWl:ږv +Y6^@ve]>J-hzD#꟩fkOaΰ R-+:rm_eՃ]dzKP{ N RH(7'ꕸ󧞠6\ D&i#!T`9OùȌF40XQsհ`9͛4**N|b{SZx~+kryM}=gٕ $4 Hj{1ۜ> A|}G@Qy z6IR}zqyz8Z> pcoVSVMqz'WUT~r|yj+e 8@.yjNf_L{OߏGww0 U`Oպt)X܃hޣP94(=pAcq7a=#0>/`,"}$멣 >/Cp?P4$ccs@G(WwL+^-@x X;82HYI׊nu<>\:sBcz=?% 6Wآ9Gծ.LPd,Px(|6E#̓  z(;q:xݟ702DߟcAM"!{+P*_{| Utn]d*W4$8~>b{jFŵTn2$T?tM3kdi ]q2NA*{ &x=G%t4{+ZWt7NxZT'L !Y@wvfL'IcaOOq4"] iC^6By/kE酬_XY9!Q%\Ɓ,JΖ`>jj@yJ(DRxl~6%UlB UNV +7IJKBbFf"'JB9;lrFvTF=c<]qNc-{^_D^`%KD9@Q4 Bʙj=ՂE][/ܬ=J sA_s.}|aH[L<:$o@>4q%-ʶ7~ uÂV-4mzq`X!). (~xn^?}uMن֩әP5Z:ѮAL)lo]dN;I"ʡ;SD\ڟEx!lZ|sB4xw*DЪ7V w Cʄoǝ` toџk:J']zzΛ:F|L4y:`wPwfoXݫvU+y߷h6uP7a#d|o}pXi/FJ[vY4GfDl4tY, :.t "QG];z:ҽ[Gjd.AjElt9J;?9|kcWy ~ҴT'o/SCKTi |GA )/U@X7pͷjYr4wNV`zY6ܙU2;;V|ꢄAݱC7NиH|p '!#ƺiSBO#g]ڑj2kZ.b܃Fֿkf;y%uNAFz:1bg4Ml2.2-h 6U F~cJ lJ28d8{3ku<59]a,Tm?KtPq?Ϧ\שS+uDo hshp5`k;]庞8:,)@G4}cP%XT_* ;BXir? A1 -֛VҎNull'tg0~ӕ };_$*3 B! Wu@]%ꑹ"3s৺3YK?#(wʋ 'vMNo?l`xO4f&-bS}V:ǁ 9媃%ם2CrdSovg#_h Hn^w p<+ob&l羵% ^FA**˿3kޓ&&9 psyl)E s3 `xjo'Hv7Kꔒ0yݷ:BP{r]V3_GR>I~_O9p`T"+N o`zET,k vCFNc=Ga6(Stq-HUH8 5: 8RԼBc!C6>$e0(ݻh$w0z P@K2 nϯHjdF㑁J\;bYBֹ +wr Gv;`F֬d@ex^;4`|Q %C5??4:soG_OB շ&m Մ_c*O̘ f '](KiU)hAAV;0!O}.x .>~JG)žkVUmZi&K n;4YBKP47,ck4:ak$]rXp_ n?}Jh ױQ;kȿc})A kYttI{]J5:t)J( zIbDTA?̧8\/{vi! nXYa+jyeG |>"Wujaa@F?LK60}?!p _?EK!9'~ ;7o&s4YVo wy1; }fZKѯD⯖u,gL,đL_`M/;-r\5Ydv:"glf-j{N+ /&p󏓅g z7h7>K 5^SZ {8]S)hQX&Nk'E%7nhJ.tӋ1 ]C3݇>jr_v;WGO] ߲7<_{iU?qJ҉YZқW o DbEo>IO'~Yќr,D|P@!dvw]oV 5q1Vi!@5+">)>g֐<*CI[Ȯ fKY)'٥^ ={C2^?2۷O(Od:t9,)[>Yj,jHYm]IX1)ocI;ݰ£nW )s7dIM1d>f'9J.iEXE&ֽå#_~#eؗߡ}s0lTT%h{ T]w ,VV\jg].wkaãLJ+: }iB 9~yEP h()p1z*;)Pq93< u'KF}{9i {%m4,O+_FVWdmނֺ GswD4^)M$7Ph,[z;ZיKkYL ߘ苌cff:$pP!i>-4Iʨ_EΥ6$h?ΘD޲fҚn x@WO=) #NzĪAf—wGZtbYlM֚Y p18 Yy? aRc66x36]< >kLtdGc)s@)v*# w#75< =G3 W)=\xkcnPXs/w^ Dp^CةZwfw!Tq]4ݾ&&v o#%ڝ||IjG\Q$h)@[intQ^T'&Q[v O ,/¤2>(]֣3s5ҤLJz{V4h%ޝ)Qͼ>$ݎV/d!94 @j^̫XՉ[;$$abh8y$IX϶oz@*!cL1q Jv3M $瘨qyב/*RI43 B}TVzP=mg&7'fYbmo7(,)0;@0 Wv >Z\ MVo/#l3uta=3|U߹M៑ ;<5f;#=# l3{n/-/cӕGxc.)%:JFc7 BB2]}B*09e߶u-SRnjf{7;Ot:?1^KC9z0{YG)髧K \c4N<'KpUe6Ė Q/Qc0Z0]C| P<{4c ol8i*.Me;ol:w:rG sj$H#@&.HҚbmJH0c.xYp9ݒ݌=H:lH}Q> 쐖>d/B<f59~; 3ȸTF!;# ʱ_}7!I$'$1~85(yZ0[ckT~/Q(;GipX` u<k}g_^`YӦ!g=@XSjiBՐt?^ j"m\$"cLuFp<+zS4i%3?ʌ$xjԜהDHOiwiOP{"arm>hgd7OC< #dA n vk |ը) H S[obA7HGNUh;LNu{ٽ(p@8|u?euRd3RoC ߺ rwfb+3 Ljz^}kܯhd?sNIZ겈NacrSvI.Q~ݶD>m}dv'l9wI-Hٯ1wbY 4D6Q;Y9i$ U߅[L̜MnZfffb uyBW;{e#TH #PZ!#i0m@ M5Y,okÆn{7W0K.ԎX(!mcx<Í ־c,Pc]y[Dׇ)6F"*%:IJfP J$ovF@gؤ>cѶKI\q7YS&5Bo !:.к@=_Un*@2F/vcJ?c/`UGAC-FfXyK cuw/4am$8;/.ɍyٟoAnvpn-~spF*C^N\D[w\JkW%{q?U6].m޲[O~TGjtȕ BQ$ć+AnhrJ SGh_4B" N7B.e\USa-wY PlKZՄ$`!е>BYLs+*؈%Q?K÷=_h&P˔P߇BR2l+{6 {4~o IA>3* ĹhxfX3 c`~AZ.v}qpd2VI FIPoRO^0{akvоGa0+r16b4tSR^>һs͘fWn 4{TBTHv߀4 .~tQ Z5@,.h"( rB1uiۄ3MIi -R .,&A?Q r(ĒbYa#ܑ]Uҭn1^ vZ/W/lolFKQ&ŝB; y)·~FU|2@m03j~0ԳʵdQm6zFq]5Qxs|xQw7) [XX@鵝jE3 E"i@n[00G֍RESK4$)H spzr]~4~gWp5WȩaHd\lolwYbP؂Gtv?{JԡCӦ/Bɏfm0%:>5]ÿ-{)y Sn79[qAe6W%I.Ѧ6j n1wlsD2\ih ;YRrqY2/=%_&ey,;{L@u- L/BuUB7,kq]dk})ZawA%[,_vB@4zx*9qm!) HzEQ HQiE_-2= 4Q#RWea%wk;4C(_rfMs1P^558lYs^»|Z_np{ \4Ѽ!ZlJ*֭ã\ ~@$H -;QӁe_#ש"4q>*vi[`v -_( 1lF`#Aʅ!KLhu 7%Ew5Ƕ_,hge6 \g.| fEwxv[*fr9qi!YuӡbD<5Ɵz(+:S2z3<йmܒƅ|Ξ0Ġ00gUy(̹?j0h0v|TVVo׷ +C'6~gYktByeBb% vD({'q :jvyV_1S9kwy<ьۃ6 ݐYpSjLtK/3JN6T׋N]Lp͉7'H_*u`PAI\2_}469-gÓ歁~d+j*7ؗg3M‡qb+# b`Co(t~2AvOB]BGmr_ę J~f:$<_]oZYbʸt_uj)FRGO^ @`rvHGWqDbQlfouEi/&g3O"JY2P 7vXfg!*Q>9U(˅ y@Q[L>UmMD?[Rr"X ۥwG>WY+)@q4%3LXh"ɴO,D?j1a[aRi永IG}%ݥP>]S`xY$)@њwsk=>c'F f{j]i͐=RZ*lB >.mn,K"{ ܤО&7RL^Roc䆿y WB+Aڎ'u6Jw5[;Jbn30Q#b~ysTC4*q&Ȑg9rR0uxX1/ONbWk D־V/G'!l]Lm) tQkk"sL͘F"`J$̷û܎ {u j1U' /㥾d!&wpx}~;`̽΀*=@Xϝ}tQB{=T?sQ&.0sSP,uma"?THڸ]3:shFf4 !eٞ9Pd<Zz(fVmRQ26WP oUNe1 O;ev`Wi;6 U7 4較HU&дr׿WqfN!+*%%ujl} >ԇ\snbmf.K4{ ,H/eS@а7]F KBUd2z# n=s /=>.c=*CݲH_$%="j]>"bQT{U|0žצÇOurMumJXLGdQA>&E ܟ 0V2\LMa Dsσ㺹qJbH4 \pL䁘diM`BhuKoR(I4fJ:t 2hkA6~"5FIo_q69nHU!vֱ9_eiyW /P  /!>4Ҽ| D(_t]m&V&_̂G]Ao鿠| 1"-^m3 M2ߠQTc\v``_|w&_ "/ĶGpG?iVQ@]p_y\OW/Λ[أB I 9G;{X֜] @' of:}2ưR/L.J9m_+цTgR~6qtpu2Ќ@i5݅me"@urm' go/O˲`vqu 7I@܇hx6ϹbB#B(U)AVy{ЪT[Mo]\|{{\9g X.ɮѴ_26qYf%E @3Th ?wiG_ |]tBsZM*w4:C&|津QLVT\x%b3rYG׽|Ykh!nrJu%i'7n4YiͶ=3!l~CEL&-B?qo8lM`wq;kT #NH '.7#Jo—肬bo5ƫt"B^Hez%,06 XJ7} 0RJ0JD`MHhM]b ɇsGNps<3pm} Hx%MsmGD,*1Ue Ya3/#{=-hlXN=^XnO1ѱhB 6!KȝIX*&(DO;~WͯĦ^Yx21$IG/SP*wnj%k$.Z5vYjOܥDOF 4lr#* m  7@" 8=DI*l}ﶿΰVVa!JM}qؘkޢh3!T0+c27BnwhCuv0=]+hv0:Xr,sWjwpl"/IosݹG!*}OA020ZϭhyJR[ <%Ip*Ϻ+*eddU<s'OWs@$RUnR BZ ec%)GU:~f?=ͳM{:`=_>loI6o lx\lƚ>,G!5}xWQwւv#Oε_f>GvfVB]@ t` ` ]FE ˿=v-<duw#4mA<=d8Lb7]bi9aitt _1A3qV3k(;.hLDSKPO?*+d١QkE_4%Q b`>.ϴ:9gvZR+}l<E"87V^;BHͮy(GfJz#L!L 3"g}պGgfQ8׶=mC틐V~}@P\&\=ǽUok_]f[7)4p:_Pjac 5ˉgV,ߺ ʣ]6w1þHɯEKI*=yd:ܱ_7"nӄe&ThmUI]mFBب`#Y=9XLl!=~Rʝ^'jyT]MO_ }7Y;yn42f KƟv>(q0@ !ũBA ޥ}JP;;tc+cؽ%v]%A,d xA{;aYH{xⴵ)qiF :Ɂ-| ŜR2~xn*gޖ @.D%!tfEJcu*\?0#! 5LX͇hM-mM} ] n^{DHmzut}C3eG]Y{Z8x *'> :^ ?9 %; VD֙9nUDW;bnWap k_^٪ىF&Bvqu8[eAw|v!^D!u$a>vrtR9:xn n]QE3e\4S:*oL='l;Q~k֤ʧ883Ds<;x*M0xo:VVhwNw Tϗ=ת_$|iVo-`'}2aNyQ䅛2 kR0A2A[Su- -v7:~5 VPtVJ&CxG$w=KEƞbKD^9 L=8cM`V1[' GJ^"Y.󤯎c4nlMLi gu7U;$g%L4-mT*)9$[1ƴBke~ IPAA"33:$p:}/뻭x=\]"j G=RH@A!XHzf][UY[cPw #nb,)%"QT ;|n/Wo5i 7K5l =VmYW^%aI?"G48@=&E}T8:=$C$](J<8s{.1+ <ÅV*g]0fLp\ t;H~"-V+!21}N Ez邨GOS{fhzYaٺF ȬsM4س>|_F!$T$>W.*g4ӠM>c\A.;rJƝO#_]&b>!3|R|^rE-JcY4afOJgsCA7;ߟeB(S 9=8{ # -Iu=NxEB8{ 2+Q`BEޝ490ƙѐ5״>`R?mc9J|0Y ŕJ4t̕uFƳ* PA~7h &ThVk=}AXsoؐ;*y+}Hr_ҚJɛ:Ė 3pxWD;6iGWa֧ˁ%~a.:$!cnƧS7,>t.`yP }k"fZb`Mo"7*r3Št<>L-BЌx>GR,@`_ԕ]DnrM57̒YIog_q\X%D/:-c:YIQϡwk9ǃދ,b'MkG_:L -gBAė =|mf|x;w>}ڗ_rY,)mUS*Cẖ<(+?T5Euې*|nR zZiTZ(`X#O`@TDk]q1Pð/)wC!C˓*rm )v 75Y)f?6z]ku?|@q7@6p &H+v@ a} ʆW'Po3MQ<*C->.d௰92xZsor gw im7w\-2dMqAWWesP6JWm[|&ѝ`p'o<?u5#@eOZiB;Kq]vӹc`5TFVR5#1g݀H7B#wƤbEt?7w?!kR|ӗ+p,qJ\*A:yLd+yԋ'<$.:b8QEcg]: e$1&K=D&0Tx>f衔ߜo V'W zHg~zE׷+o Na [sv +y_{x, !:@"Iʑ=3oI>7?odSAW  T"R@Niks5>&V]H ֹՒ􋊷brHl[HU"zY |`2]-ka:ez!NKg?| )H5ćD_nzCHk_ޱ'zU=׏6U(/ub.icoFKl?.MR98VaC/sirM_צBk`LEv_*O2\XCo/w+p=,.tLͻ̏O h}YM3wcgw*1nX6"&cmH,=5JTY"k#z:{|,=o~pݗU+IZOg[o𰩌$ `_I;2u?rs7/: xDBYxda1&M/+lL)jQ3 dSV 5"ZIزgsm< jOŶ"T_:W5{,?3:o!8Zg_0!+\?q b D*]1~#ʢyYܧޗζBXEAp%m {"NEg9C%-;laf9R}\5:H3om>qY"×~Iu?ykDŽ =)-[A1hD@͒m<13!݀ݲwhW(t5;ooE q؏+$BG؍1S/k-e%O>b*~WTe:z͈K]&"-xzF[O~,zCY}nF jB H]3'S"*t tBjxrTi;y[=@??_&9 {Pax p"pϦ].9h}׈<8:[:%z0}cDZ$ P'SqYLIq.1fl0pͷү${S2V?$¾}ځ嶖ZmpvUth|4rFXGތL[a-VtPU;ެD^{ܭ6q4`ʯ ~4Z2y"հ ڗ0QMѬk?Ȱt$&X{Ci9" __)&ΰcجjvki q]멖O#‚>3~m\5:q}. gpgVFNcp\?&+k.j:< NaCJmW/Bf<* kIӡ}(ҩoE[BPwۏ:Fn8> L(mHБ1%d)e7xe7i牸^#L/^n/G b4+0& ke5Ь7G*@fBYއUW;x5.~j+ if@!ӮCM$@DOGTt~0'GfSGz ^LЃLw\ R~YȜP򨔌A t]jY:纞{W'y컮s!*{RzäFCDZpg2ꮛO(<t{vbኼESiqo`]{y_)'&V&_rF}J7R&n=@Gmp~h$y|S:hϮp8kt9R*KredjVZ9aũEoHĊLO^Zjn \GA]2uhz_e^便81B"nr3;,J &pg:e "pFS3'ˢWd+yjOf'})OQ2Uq)ԺֱWԳ/+Eyhshb_3t c52:`}N:Tػ]`w"\`!˦wݸTއ~[ @`!)5?P⎏/2?'w8e!o`/Vm6 $E߆Xtfb/~йߜ8 h:͉qDD~c]Jy4?aFe>S8 ffuG5ca"JT^Zm[٪:AX?l6ȶ'@)TzoAp%Ϊ[Ň]gzgb4d =^v%Z ,| -Ǘ'ݫy;HG`~.+U'9=7 Q\2^ף{ĿIvk==wMj{4xAҾǙu} /lȑ,ҤAPzpGv ]9h-D O~JVaȸ>xfr~[Sô+$c]2{F礬$(wI _Ǫ3,b<:aE>mQ= I3Gi>*hE3 G5tXt |\?jz۽]Z?(7쎂eÖ<ߚȸ 6l7f}7_?:Y%x)\Llq)]WG@.=W[@~(G7wmioǒ x]W+QMmf- !pq&yYǫ#m]}, UNǏt4H1Iz5>.ԝțF ;)^?3:4E5W)U&*# <}5*K:cҴ[UsT:K\ *5l-rK}e0i>nA`vj?_Wy7t0v%I8P81'MxnI8_cz 6g=0kBT5E,O'?вwi*|QJ$ݠJu$6tLcӊ[鄠7]>/=[Tu\-~+NQ'ˬ.2dƨWb^̾5pEAtA ƌϻzOʽ(6faiTZ~CsI, lwX^}!6NպDRؾ. %= mA05KOA-`񣦵c{<(Y*w:|QEX~ S&$"] eȇ*9ʁ, J7%|`4d'4L~vA_9{^C;rJ LʻٜwCMU?~NJ6mk0HB-|S#O23x@Rb+tEr.z!WǞO+-v{Í_+*i$ㆀ>){/Q\8вu $7zQk5/Ⱥ{;LԮڞ9cmA^H@ y. {N{ҝz*ABf\%:9Qr" xk0rMVCK@/b<fMĸ];#z{sVϜl_8nVhOr|ؕܮNX6LU ˺>] lfomADa2[gM{L}=c-2Ɂ9' M]* 6Mfցn?Az\¡X HV/Q7=؟PD@ė*ʓ&-S9߁\f/<ЩdP{݀< /wxHQDaR1S[}awǩ-.&CF_ry2t̜Iբ I!sͮ[UTaQBwl]fZقnBh\}~X@S9FI byNKuxTCj,7(Ћ+T_61g0>%,a΄it>mogȓw|+)f^sIѡ2IÈ<=)<$eCT?'7kѶ%9ƨGGn=J|Cc$|1,;` sE^Bk EpC(\,&X.1 khcp`t NYڒ)=$})>G_ĩ˲5,2RMO$b V{s|PBUVPsOxu6"G&X./* 8ALz^sQK5IBC4]1?^oؠӱH+S%>=D :+VR[V_\E1 *;=*ړ|ddE*2Hs ?%cšٸPRҁGHB[ZÍ8b ߰ !R}[ẗX}aJlwt:К{ДL{Lu7$>2՚&Gpe}鎚{@L[էPp}؃pJ8Q+ w.Rc4w>@ ^jjV4fIzס75 };e%+禁*6=OXYOZHBT fyf+cr"ht٨AJLnj=r'j@pA^}>A@R(H^\ CH>IrpQشNUEhNG?'} BRv:l4פ3(W'6UqfwJ`ßpZPL|鎇g9E}} s4s*QSZm\..[;d1s$-kq[0SxJl<ܵ[-q?uk6.{ c?țetbd_>Nb!uxo hr7X:A23I0q@k;qT ~@Sȍ`ÖI}x&JNAݜCnaR_I"K ~c+  lAJ FX/bD."#8zM>|erj/֩C>/`Qt+Ad|~8Jb5QaޱW#>T'Y;V7ed /n#L P6BXc5mrL5DvjDfſt&EECp$,wPkVU?[j^7[䲇P3*GV寺?Ij}8I ^GB]y|n>Qawvz]Ug+!bsYK}Ƃ{fZHZ$Ix끱mmB[TdU@"ݒƦKbx4ݛ쟾 <>bU}ȅI^2 Ll x-Kpս9xf1jTYUwHל)JQ@]ІqwŏTF箰6}ۗDqE.dy dR#>Gl17dђ?Hso,yz>8T_4^Y 51]vc̦Bd:ȐJG^g1@.fOhK|_az?ٴl^@Sk ܝgr.'{h]cc{I n柮fo*KҼ@?v2ZNO_hW%&"k4gp>HWVZZxL.'H8rZ')g}Dr܀ԋ9)8A2zzԷ?Tdf7 ȯHW맾]jMo9GC)KCȧ:ocCO9rxS-r[Ԟɞ7HCH գSǬFxwϖ_6aX+k哩a M ջ1@+h&Ͻ ?QЁ/,(|zʱR,tn3^1kn.^ُ/diwLJCV[oNF!o"p|asȿ2""LlTU(MZ#1r;qdW$|Ϛjw4]V5~jzgE80[Dߵ׀,z"ByV3->}ɰg=UF32{cD*+aܤ+c8uYts,w`N [hCë %fH/&lW$k@+hڄ@5!hL){qa݉S|j~L|A0D'%zqǛl0Z#)>'X-5GaEH"`;m05N ֑{:L)qd UF⓰#N%B`о6 Qυ:!S}]t<*JASϮYT46 UG(6ͻ&fj2vLS4 Ν"ɁU,qiz2RD8$7KG$ab<;8I; ORe)oCs30Ulvlp }jj=])GV{ُe+[6ƂFwByY[("$(/< cJN(}1b fH~>P#՚;~ʖ&h r,Smiko+P6;탬t:GˢQY6rZP)za;bIPHю-mpk, LC?‚ ڰA^g9,y wAh~GTJT/L7EEw^;@Eh#ɟ5U"eHĐE] zUҏ?+hJuX mB0cW-"t~>4GV8$GMXK@` =}k-">$gVۨh- \o7OC7߮*u@xuҁJ|7Y;Gk<-?2;CAܑwbpyETWpnQ"-yܳKcf^c+$'q}}M*.cog1"q7sM'8AvXPts 6MB01T7q'2em^ySA[Swʵ6oaF{7z1X !;뼄G=c4dlL^O`m2"3XQ&Z= %5)b`YK@f mLag350㕤cdCxm8 gkɔMzKT ]Iӆ(.N]_z,ZuN/hY^@%{vvpu)ZH:unxzSO8(O\q4Љ2@)YNQ^tu]'vc۝-҉@J8=\"Ueg~) CP\κp%tUu[Dp :>zg5yUkl%>åẙq2'D`rUdxPExxt ma ;(AmL@}#Jy@`*Q"㇐g8X/tů[S3'DLA>vf B/ QfF`S$!T?i߆@k񞝂\@KA!l"ҭ5uʹV],FI-Ң-pZ ئH&ڄ9&\ Du덁h @֐Vߐn"itlw7NA Hxi Lw<#+Nxjzx:\ 4΅YeGLM;5$l/WMӤ- KT<_0 Sl)ikΤ9zy/>UgUGбq`ksD76V򈦯oF<&Hӿ4'A_2AP:"{'֩9hJ:8 {~ٮסlO[Q&ڹгu;vE/]iGv̽nt{O% lrUxy1wo>}poQޣР*[؍p .l6pnLlSIx F|UqE0~!!}+ME1x\M*D UpF=V>I4t;sU*V?GSPe *O%fjA˘@jU{XV@gjT25L X}+ӚM>LE?DL4Q!&F+hkB%U~DcKdZ YdkR Y]xt =?fi<}G%qYX6/t#^٨tsBQDyyI)`@AFi>ʁ0YgJ\/ke#x:m&N9:vnˋ^3!zbmtڢ{պ}\Ԥ-jZ}QDp qX(!ղ2_+ Og}^q aHC-MpA+ߝCȦOK9~z,bK'/ LCv4K[9-ך!ӛ/Mo/R $ *\9.ki4HCRY8*Z\.:?&FriA/e;sӃOۻCA 6~\>&r(ƍ_u(r swL7+T7p׮%6ZװC4 +4^[H=]E ?RжpKc`8 /rn"rÝO4Âk<#@ǣp]v)c2m`BǧoO-Ig b V+}Prkx)܃j04j||ԚMZ!͏u/ ZWC"PΨP2RLLQz(``nk{pepi[6~8%WY&Hqwì }u@T-DfHоXdFd̂=VMzE$!NPpp wf-r#a1) k(49gh ,x0;[7 ̪ \EVy(ˤ/+3fh,;Y~xXIr~-Ƌ{}י5v 6V3 ߝu9cD>,k@,gP{U:OˡiԄyvTq~6mτoOB8ێm9|JGI$(,3o(^[quAccK"]u1e[p67]>e*:   ʹ(sXC:fG$UX֣_B^~q%MZ:r[Q^B1>6 sLk4@9^]GV3M~ }zBtÈ45޳, XCš|v[M?mmzwZ~@XP' bta[P,ͱ:=^xTkq֝E㵧._\%ügy-G:-{B sRXc a7/TMHH(%\頽MlGRapWi8b0uȺՕٝj> }ab;iVAĢt!m`NPy4mAFECHjC7b%'*hoۈ$W4WZb$*>'n4%d{0Iu(>$"*~ߞv.#q􃔝̈KF)EeH?(ppNQc#66MDz6Bf<,yߜND cQM}S`MX]\mzȺY#Tnfhה_Y#ҫ8T9=Sw0 Onyc' k qZA#+۬يo^uҌz: 3E⫏:׆f,քL]Rit^(65s_%%ٹVqv_y+B0EO?V!fO^N=:r%{NهH+ \7 !7VW)mۘ͠g! *R&cu.Mfޡ10(NB@7``=TC+A:7D41nčA50dB(xN2m^E t~89:aS|uؕqfl7 4?{_YMQdg[9#'Wdmi321tZb39j6mŰ|'$mD=])Uް l0F =fwZl]`~P٦]\ 2/ mi}3zO"J^ ZuG,&{xkIDwNE Vv\;Lziy.a򾌔k&h饮 $P`}TfH=dLTW?ݯiZz'qLFv&mS+)6]},cѪj.&Q6AᅀDQ|>rsFk; Bm!9R5D.PTq#F]aq]-=혹fa}E=]̞IKd52RJ#06V3%/Ɇ{92aktjk -gA]P@kMuf{I(0"Tkn*xw)^]s(r]QM>ȵsH~q!9bXۦ$vaQUz"7ūLqf9'D:M)vpϑRXUÔkl'~u n^Y-o|"J!y}~T)~Tk`@v2$98]3.mAՏ>5I #IF6n]}( }l5BVB&}/C`B 5 G[{ ƀGo`ײ1V |*{d&zlWde '豮4#'!q40->twXhɴ'j!!9,S "EX"7VZo߆ߣ`O;P^vK1uތ R-=(lˬR `ʞ!8k#;d1fgFv\ kHnM’8Ho, ;r/3Jz\WT)nߏߵi!4ȣ 25_lSյN8 0( d&O;y:}C%)Z<)!)Io=3@LyҠ;ľ62X}ճpJصjwt(|cpw_GH$$1"Վ:)dP!Ew@k?>bmeq A*$9Mp  Q=^%]Yh0.Ya-ISZ=q/Hm|JW<GQ%;8 ў&=RnM5EC,}ZTjp޲I#P,ELC-+&-/iPڛ\~KB9yבME|Rث=(I Սغ"g]^P;h[~Uh?ܒqcj_a?e:NC5Gfe5gh!46L=IO]V\z0ڦ8;ަdۻdEx/݉m ;`>I`םUt8eeTu*aGg)ͪ7@6$\K%M$PQ)Q(P 2F ܛm+|vՊr-rbBfFdZ O0ln>nW*? V`Giy@g2Km*"Fb"\}C]J9 %[#u248s17K?Uג=шMm4px@(~=%8Žn:3[ rm+/j#!~qbw 0q<TټF[?XT_]!qSs;z+%xH=I3ذ-%T_ۯS RF>QBW uK^PBC_mbsDG [i#BxE 2Iz<7oK+(~s ߫4qU\4g-|ʇYи?o<櫩|*[cwIKW[[45{yHuf,.a3z>wzHֲD]2刞î^{3H ]$.ݧ};MG>j'E7_xtņ=vcZQ]%8O 5ptM*O_e2e&D/}^P.q)܇}9&hy^zͰY/>Z? t~g=_)%'d\/1Ÿ`y=?f_B,["MssB-:ɦ|wAU&t'}i8Y⧤1.KfPQWwᘦ]ߎJ, H$~ ){7jUf?M# c7tu>O hϱXf96ŏ&=~x-ҭ3"Xeb2v*@"uQ1r$ ۰d~0M/$16omG܄/SҾSLNsȝ*?*[&~wFQS>[xK'{$q h@BU'..3gNMtۏߙC[߲%:,Xf7 U7W2{nq<3sïwpYʷ?ה_kHA{jШb.4©m2T;X bo ɿQ^Q}վԬMIG'"7 U|pl6S~tnhJ\xx*6۶))giaA0=L?i?Vمeɝ6ՙ?S ZV[WOwrȵYm)__K0}ؖ\lnȜ|ǡ5E ]ht<LJHjw '>5,ͪ{<ߖ*)ٺ8ۀ47 wl;iuu0e]zL&QY' !+‚OӍߎ u'MF[eћTLJɕ(>KSt6=*VLIN wnn U:_M7gF.f53wp/e@)%1bav5FuzY-:1b? WJ.p7R~w0nԦB *J_F,^zd'槾KY{κy.q u*ض5Уls:"-^~Z_pA=w#ӔnzBito%@/3L4a?/ 5}j L̓j6]{ \ Ul y]Ud&̿Q;j+ME?vM e7`M",.R1g\`F]ěT<[挹?n/^@WAN4î?Ċ:ݨ Iǭㄺ!XfVolؔu= ʃMGMu1@-<QgFYxЙVucP_\5cϥ{ VSsP. A"VQ-0hhAaʙ(z=slqouۓ) kU@^O>PȲa8XPMҸo*iɳQ0|TMDq)KzD5d|z@42HB|SO|`kXn|To %" -I-#Hx-=)Ǜ!i"*Ib$6@~aZ7L=YT5#,_K⸈Tz~,.:^4ץwj9l(]p jhU[QE9W4?6ډ SyλH7Fw. a(9hXCxWb̻ŻuOnHrʒej}Mۺe]V aL8FG]ix-o3>Վ?`՗tY>%Na:5M׳_:~f*+Jio>ݡC o!ꜰ'' 7HMw'ae=87 6CO!3v=*ʌqx\ٽ]%*\Yu7؄Zi'^Z#V6͌#g$} nȑK1'x2~ 6ߕInid$=ڄ^l_S2.Z{q:0F-I* ^EWCӎk=siBo5jT#+DvMxgW:Y|F STP/ ܥqwY, zn7#l~īvNnL?6h(v nb2N܏qXXm% / ӺP`o=[)= ;xϭQi-릈X? }~M4^#,֗o72_,O~##NҐ .f7|D ϖ$yq{mΥ$dU_O F-bM\/ j?$pǼVΗmqVŪa5%&S.s;mޠSX '^p&;cXil<;+.yܽxz-z?<)l*ܐ[e5(T-}HVmcL`=-]nܫk~?N#vJ Ndi;LUۋHEkh]AW"5 M|o<9hixQp[JLʭ}99C;zQУcMB[(ٺ18Wف\7-=s g۰5[T@V2 m~dG *6 .! q_( 1L PL"茭2RUY~t'Yҩ\gǦ͍K+MGe]Zw~/P(tΦȰ$sjQ}5R ݀6.G+v+%L.nStG})y^[־=Q7 WGN!%,ם?Z|E\ xGW9k !u;n%ݙ C>kXm!D2}6[A䙺wgA]\-PW^I9xٖl) >|tЕf'Xl-!÷8tVp pQ~$FF>D)|'Gs~}R. ;L֏ _kjˠ\sMkEH|AIy@D%,O8&m;.[ɒ$m^`4z_Y6{.(QɃu^z=vɄu:7$8y&DgƂ)47TWuqƘۚo=577s]c{|ape+<6mPh!nLQ*u4 IUjhxN==2]D( t߽*D8yF3z|/f0.WRQyJҕg79-!6NF.cwNE 7s&ђkD:M@)CIГ_y0$'*'Gl+~Zf;xiM6TVceALRC5LYKuQ`p=UЫ~F]3F(`2H7 z_'2F/'3'z.ypUxel>WTjVAJNIf{h< ?O+)H%Q{j5WjLrvoh;FZG 9DA*>p Yg,=qT{5w_PKʔzoOg*2e>s5\m.M#☴))C;fd̫AO"䈇X撽V`1ϴ%?IЍa @ Ek:JuwC`i۪{ӣ8=-1) ~v1n%_SL:v(jOP_z_x$ΌΔW^\54Fz}L7a_ҼU4@g%qڳgS%XER/o2:p wM@ ;m~MO{ڵoЈqU-ʦIN&b[@tvglh^q;T o26)*I<)+QYz4v0SȨcv._ {Gv~hIcIrenV!QMeo>:ȹ )B~9NׂTw9 u',= kA2%cNß5V]֨^BVֈ3ݨ !Q%\gHn#Lcj8ESQɜ7]7^;dD502&Zr{ίnwR3T]N81DGu֠o2qbDut1l-=&y3,e׏0cO96K j؟bYT⼤6:'㆔4;j2OѷTν{Pӱ+Tlcr!ub=iםQȱ?'g9~NYhh̀b-;:f@m&ZEmT`a|TPr]$Q,f+6Np]"S֤ݠ/M o1?kKcFF^m?ؤI&Dc6Htspq vQyu{]}SJmlUVs'G`ĤKB?x)<-_%&UyV~ot#؆ĠlLcwN>;8{4T(5cSBq2ImU÷kM$-,֖}ĕ4A|Ni1MՇa 0̺Ê,{KETcINA y\ CX(X:$3 QA3,83b5zΥl0[$.҂Yde~KIIO_]f> J]p Ҩ^FmXQ$E -mXW>& bPsx(ZG0:|gdqpN\TR]@8vVP%C׼m%\ܡTIcof);@kx̰d_V%1e(ՉѧlvyJ4pCFPj<>$9nD4{IIҵlI!}!qmbat91k<Ԯa_/r(m.rPKz"ڹ?DJI^_^r7~}?|Au!"gvd=yF"U 0Ax;E/e?R|TE_h 0g@6o6>S,+bI8#}V SW= *(zayJ)A(q~#w_J׬ʩ&p?8tE\bޗе>ڻ 5݌W047YcrEUDBu,Љ2(,#$ɳhC[xJT~7"wVVا_{`@W}NiXoYmAZح͐}xI |B5k>;ԯ vnG~4&$KmWbesi!)Rݑc 5B$;:^Kշ m=-#܏tlk ^ö_5(BFñ[f}+:*HF0Q۬Ԗ{$SegnGd7{ڦ9jrE/W=V&Hlo$4',׼*YQzdrxWߑ ͂'xWF ְ/ {)"Ž6 `-Yы q4QOgG60;V#-]w M˼yθ'l"jmj/_ؚ5{l A?, Q}#wf,^hwo-=&ULA3Btk%A)\NX#gn>|:L GMA;ɷ!Qqݢo"s!Dְ}KT!g,'~ĐCcLJjX_貙n ̦xYyq8:f=D8 7ē%Ty$GT1Vϔ+`RPn=xlĔ GNRi &DVD΀$z)*+f1 OgF>11=ޗ6@Cvf@ 2M-Uc/%s>%\pj\`Di ͘9ZGU*[owK6_#rpC;{wJGY>=MvF)@ I,l|Յ,!!rAFֲ|5c"A;/= .رS&Ґ\) "[ԗ@=Ʒ ܄^( 3u @s YWR]{LSHPeT(NI^q4fQpӫգWqrY:WH:7P2mǃ;ͳ(>_k`(Ud9^'= Ns ^#=e[WMɞԺ-V_ zɄ礿N IiX%0qI-3_@!o4_iYiDsd/Nw߅,~24ޏzM ݥ śE<9AP@ tE&xVwf~,3>kSd4 P&ce|Act_D'!9v,>ղj^A$ᠽ{'΄}}$"J[RobS̽0^\AayBR|hL9^'>n#P74O\u{ȩsQ?jh5QJh/Ӑ}w'u[z[k<iyłAGjw(~5@c<uDie_dQ,2BA+/gG'ɘ+xb;`H߷BHy%A$o)YݡnK*7/,ȘIKT0 Xܖ$ d1g+?&f3ӦBIB&y8m%,)}.MLg/]Se~60gt]dL-r6zjCokfuI*KjrZG=V8/6(0^iC[!A(#L9kpx('_J{c+*o2j;2c _Óm(ovӉQBg@X&T1[iWJ k =%\zܭVJ&l16M+R3|:Mm]?7`GU(d,@zx4 DŽ,udXVMH#+IG~5HJwr"NP1H+ОqeU" 9i!8j_?5 ` L=1N$5yE;p>FH_(,AxaQ ARTwVEmڤU *C^Odn]֨wͭ!+[$W{ Q,϶c= b,2Vʒ= ΧЁDڢZȂ2LK |!+$_PU0o8Q? Q~\^&ᬣxeFdGH(bqIR)JGDtfMD <'0E k Pb"թ=`K,W+"E!1醵}SS [jXėQˆ8EW8c |h œbTV4N:|-m꾺Ac9[2cfgh(U6IAaSL!Z26g=Jۛ86bNοm>mtXFYm͒e6vhq2b<0M/ O#e 0`Qu)yek3[0s4ɮh^uM;X%tO%Mc#W)r)NTATJ,{.φ\#j7{aZp\Q/F渾D  bKЉp4H:"{B&-f,ׁ\stAeO O e><.b}3mG*B'fY&P7J|P?hVI+6 p (P3n˩OW2{x}$:9s^$c~uhW&u)1uI&(ַQ+N@6jd Tj bFA& 3g?>[`q-ddIYX/,bT"J}zO4%v[iOD@JNGE ju_vr>~Pa7zuBn1Y!` kPXԃzShLP}|2Leْ0I0TER9Xpgx|s>㺺&#?d.9{0"]}/d'G- v$Ly&.}dSujp"oIP ); Ө@ߨDl~Ď6ÌJ{58VF*I = Uv))ORjK_};s2S [Z;_ 6# M`kL4@$(#϶:GAƽoڍLe_ղr;I, X=rX 7HC&X0+.Q<`gAg}.)Dq;G;A]} w:ufY"5Tt 妕{)MM]'i`45>kT\V,SFd`{~[L*WTt̑;+vMΖ/Evle%̥4}9I)IJߣ =c'x 8W?u`ɸӉ]؆+8o4.JHJ}axZ`[^Y;0Qy :q gE7 F]'഍H:(W>uʚME~+B1AzWZظQ.8XLQ`h}J YFPvL;3@M;6lq1Q^5aW@ b ;E0ث07Vf4F:.(e:<+7PxnmBU=Dh 5~"uoΌrƥ\[7epNy eЦ -Al0u8a|I!\W:awjnwlx" y'`P>~GğO6t-#,s*E-XuTuW;oBE S7tq ^ {pREd]X )5#P'-NYx?_j'ER\"™-@j8=$d<I!}T&ADքqttl V\%Wʻ `5p卜*}ڦY┨ȷVfJؤ Chx*wjh,ɮliq_ dp deB"fk!$*#yYrgG@y] ,ؘXF*H>#8;iFL\IqImRr㶦=k,тPg"N;P̠I_1^EwX Q4=TVK>V5mxKLaGI4W x -`<0<9hhX CUZ+a!525E )HL {cA(frx Вps?LQlg&҇R)sAv\X4NXl?%Hq?%VIqcY0 0x)Co XĭsG|%Ok:PYX;曗NF5+ <[C=nr#Rّu(3"g$gM3 o=h xR߹㠭0zc9qOG5chDz;u;l@cS% <&2a˻Xi|bb=ꤑW }l "nm2_SgR--ϗ+<@U&iJ)jZ$>gp^W>*au}uš`==haeT qe t]]10״O.*\w5+o@yWnev'$ډ5- `9-/$ltVpTʃy[b5Ɉ-DMϯC.`iDz_+vZu.P}ܤE&[h1Y_%]2P@[CY45N .8E-ǞI,P  #/v~Vꕉ~C0 Bu $UA)M֘4? ͆re&f.Met=2>D%6Jx[[8XU/8~ AR|sfA7Qhm)M&Ѿ̹t7K\΃Pi$m)1F3!yhVއX!pe%e p2zD_D Btc^yLQj ;Na=X>v2b7qz~(J"Vb q2tA<ICh&(-6x?.r=44 Fw,wԋ("UI#iYG^.׍Xݏ=``&BGvx`TmDWu6I) ([2L2:.mBiy.v =>!,h'E*7;y0-0=PׄPU|jƒDxR]g'B'D#{D$ 41^_L+Ja987_p92xPlhO~c&})ud_@g9nȌFIMp-:+ѫ2;~U>Gg@C?=,D"z?@+ú:6mU j1:vr q[GE!/q߷fb^7r0F YH :1 Bx0ͺ^<)"Z}``9Tp6^Ч 4UA;0˩gC~?͈aWaP2h.Ѐ?ᇞbF'HTU ||~3fK0(.04MϕJU:8,V7{Yx9(/Ŵ{{@owJߍò`+c+cv$*V͎o*, GnI[ >:*6;O ^˃] $XZ;4ziUY`ܟ؎e4@1<ɔ)ѧO曰Fz_k]GXub2}M-4%! w$-8%34bbϛ/PySc(2@'iqlwZĎ%۳(ꪌr}'RW7 뿤JjQ]CSsLmf֝jf-a`E k|R)ybޚvj…aG6CaqzȬU,9ϮF>hU.K8^o;n5ԡY*@n)ϏgPLUHPk>g"( `ׄ8"QK(@FS/sV6G,;:K0\B(6nMOUH˳Nέu[ء̼qpS /s?όm5M4:j8;!LtzC1 kQ[kw#oMs*R9z:|ݰJL\ypA/ۘXf#-%,6pE`3"x΋d##``g  6 {8LmW.[z[Ҷe$|cVny]I(.o]ieh9 CpetV݉ ;c%^+zFʨ.{(e,M 6 arL*Xs|ϡK]ޘH$#%՟:3a r,|TΊoZk;0T/-}cM' Բ x (ML@-%cc%r;:;ն?cv/9'zpg)Zl|kE9Q8CLiva[b7^i~ags@>C%;>#R:ԛ*7ɕwhO>I?A'Gf# Vme@)Ox NEk1+Ԇx40 A^[! ap|Lӯ.ƒ;)YXn8 MMrK~ "7MQgt!A9С¾;܁=;|e r*+FVC:}u4\}gB߱DbKh j.zqiޗ9@pjP R.V AY1y}x0M~ww@Q@< MUeb A`ekѨ&jk_*e1iX;Z +s~nΉ];e꾗Emf\y=ļZ?%^xIrJ6Ϙ*Dјu(U L 5l]7+&~s)]UgC>ܯ*>Xz_9dܘǘbdFCJ Q!b"-.\t/6NEW;]1L:hV@/g~kݧO$'Qڍ# &&d@^;{TX0gG2Zֿ5=>`*P> ?:I7l:2v*y=!K|땺t O}$  䦪 (}o6b苣٥hMy q. mJÇj+Miā\lAvW7Fz7i((@1W` nRiYW*ɼ29LC"x N*Q*,3ֺY +߿v=nb;A7L aڦUiUVoLhG/7i%^#Kx7*c<{[:]^G^順=z]0눃(P}D bNidİ$QnQpcAG.Oc?RI?v-9^RC]y67MÃsi)""(, zjȈ`wo݃4tn˔XPգG,-s%Ae"#kwIp"=)EB\ɭGE[C0z\g8Oof|Үg4[dMYzDMPj,  uCZX k]fSyKuM᪵::SvFCk?@'Of^(ZEK GKhG;5Q)znJ!e7Ѝί C|,}<[Auq_GeS;=Dͩzm`i2ؖX<5^}nU?"C;°_8ThF_cC~AEsz;1AԞKͿHvՕ8d7^J T՜qJ\ =ף8>{#DЙ21 S48; 6.$fTQrTS7=Z";|y\VZbCgB}6>S5'svms-ϻv]{D%vnVJ۠ԶU᭍mu`X5f9؇--B< `؋* gqiiм)u!ˡY4n@@jgL2|D6Db<иV2;zE:#\n# 첈h*r ݬQѸ  NV?Q|h1(;-$(nD ZxAPs%ՠ}(A|a;)sޗ ƒ([|D-0Y:$_櫹@ݔ Y2wd q]y/6A4/IK|@"* 734'sDwDž,cNg  " ͤKu'HDIKuI$[$a4g ,A(y]Z :9w-"oCiJކ~,РL3c[7g\vSCp=Y}{.gYCq7< TUA^UA԰|fRQI5M]H l0 c/UOnO#9͖8- f狳?EW#^Wů߶~fpY%)ebvuă`K!}=pY!$ex/uH*nHWr]cQHu*8UD/uV*X̨=֚(]37vATZ!+~aw܌)FƵkOiD- ~މ}`ńB] stm~THh{9F^/+Fvژ1m*q7c3!"AF&\vД4R>Iް+YRڈ##=lvNA>4Fsa3EWN1lE[a#ˬ'Ѽ Ae MobO'q⫔Tf^J~yV\1>iec[l% \^TdaW,]FvAyb f6kW B7*~gRaMyk ;JB"Rbgh4ercAioS Z1W@@Wg8Bu[ߟ9_|u~[5#8oz;)y/ B,&M3U7 ǟ&&8,@D۹Ыׁ/}?ҹ׷TB2콀Jd6$51b#Kfw#}J?y ̬=ηd ``7'jexهGX~Ѽ*|Hȃ\ɐ4:zAB1lf1wR|0U4w$SdƛUAtA(4-z" Ւ(s%Ũh OYoFB>JF+r;)H?w|\]U5Ѿ㜨$Ke1WMkI ^-:dہC\xvVeʹvz10LdUUOߺvp3= eOĐIn =Ě[ tQ.Q/CRA\801VoFa?ioLLItrdy%?"z'CC3-[Q!oۢ/.׼Lp(*e符ӈT\h`,h?.W,( §R4a&qNƲcހ6(%]y<lj$޿1jҮ?6F<9VkX5qAr#x%RV P1N6Fv(VsCyp@xf~G/>,P kY;.:ͅk[a4pne=S$(v'2 (/].. Ҿ,b+ V"=bvN6m9fzedQE.%#u$r)*>GkHI~"q`-/GfF37gZ5}9ͮ0VŽ8ʡ5ۣj+_.瞞 =9RU1f߶X@;97%Zd,v59q Eh=Z^kĞ#!"Rc#}X,~UrhD,P,?=lrp??Sw1(+BcOr3]+qسWp6q75'|Sdi?2ID?8( m/ V H![y?aW?΂o*EL4 KA| n >S#MD]}66RK [mb0Q c-M6KԯzpT|B͢޲_վr}KYҌeޘ` t;w>!տC|fD1ߏ% R8qȟoXIUf]1]=$eD(F2Jpftsv;]>mwCy$ێ૧U)q-N{<}& A03X@TV|Q苙P ǕӰj\`ە/x((A:soOS([vZWe tƇ?X'4с&> w0pXdɢ Tü)ghyC0(Z4+*cR^xЪmRc 9@O:4eb?;<- AoIdߙ0)erzOh 3xG`F+4]RאtÕV]'; 婊J btc)19)#`AEesiOy D^h24rQ 﫹ۗD)r!z PP M',U.TAB(}̅dW5"uC@4\yg[rM?M7B$5%//S1h| v V)/ 4 4dc؇WO\<+h91!)Hهe}UC@C}gM^--{#rRO |άysYqk=Ry}>AKv0{zo7RZC2h1.<΄'dUSiV& Hql5Wk*1D],SAPXPm!A%> dQx9'S#id2]0ο_ w brB u8Kɦ 8 m])Cdj}iCvhSf7z/~T5OT*Yle(X|!(-+nLP^?,䁪oc߲]ڰÞ[C ?d@&3<$(4 ri!,v&( "AAPr$mw\c!A1,b`>8CE(SeLdVBX^T ,ٻ"`#D&p%G eY 葒 #Di{VvyJބvG0m- ]DaJb@Qv IR{ű +BʢQO9n kV/aA=7`Z&1ڮGII\?2͆/ԣYcDŵ4TVJǒ"XNBbbCtБѹ' TAJ! `6VFb;dA F4Gf&H,@}ŏ7i8!X0 |xio1D&uVwŻ0s8sj_1Z!X M!2D!n& j ϧ‘{=bv1d~gF{UBO3:x{NaCد֠Zx0LT䃛qg^.9od% Ri'զ= tܣ+5З~L_A9fFhAslPqT~ɠBi}c1 ʲ,FU1JѬ !OfQ {"3N ;AEXzI'}Ia8 [|PyST]4}zu x&JAC0*yJd64]?+ /yjhр~[ْrYg(=.Gd3ةRׯz)x4huh) ØGt. _hY{1k_XZcQ9@u{Be[R d! -<>#exQdhHT/zNB)%H*yO^o>49Un1ʁζ:t~(}D= 0xWdm7B_#K:y*[7WQ:CK酎(9%I1ׅ7 W@nLVK0dbHTZ/bg7ltK4!cg܄ 9fh*9jZLe1T6L2O]Eb2E)0}Py50gE_tS{f$ 7)g75ߓ7B"$I,`IjIAk _V-Nx[G>Ȼ9Q,\ن?vQye!ı=(AO'm="x*f z>5XAc|=y&g7`԰3( ,D`nÝzJ|wwkg]JK~Xhʹ1;f/h&]=koBvV2DXru|  +Y1 uou1r;٘v1fb._h#2 C84ǎ̙ݩzow7ޠ$T5R*b3PkFړK lfU^դfֳ6/ hFDC0YIHz__z_=a;DW5U*)>.#n`#R_j5VAv15_ۥJ=!POYeQnHI .8OU\YTBZ̷Dy^/zR!u#8FA3$ \Ď d$ ~-S}ʙwM{i0Cz9gT3i^K3T^͏/ফp\?v'lMU{mvP$+,+(q)Uƫ3!$KWiR,A[cRUc^ qTWs. ?3pH.Os7KAEjSw"*QcR1;7l ԛYcfzqfnщ% ʜ׭QVu֕HsA_ŽSկN{OgJտ[3C$KbH'?#M]wPtr k4*7=l|ZB6w Բא?xPꎔ/9zmLmx:،>;}Mh[U5ߵ%Fq7mKe8D(0VRĦ[ r FF~,:" ^oD:y>2Vj2!uw۱5oV˛aMQH/7#>Fm.ҶfN̬ᦨi>zuܢNL9ҿM}HX'c {("䶝 >i'o6qױa_pB31ˉN X91) u Txڼv4%e:2Wܫc\' SW%\ȁBGYmJU%{>)Z,"U/HX0UdD zLN+ aH~cQ,Tإ{TYGJ;0>JKڃ35i>1BN{3qq3SEUG5Br #NVzWyޖU3> c @T4^N )fψVL}k*k%\ gY 0XQ@p(бA<.Y9ǘŷZTzǍC$;.v(ϗ,J^7p?DZTH76(y'O)asEpx`_4}[UtR+qCPvO01r"A3 KZC* _o g3 iQ Te߷N2rgXFokjQa4Bp&A!X aXMoрX[1pW^~^Ӳ d[yEHkG pp5ZUjun*YANrР( Ӗk4yݨ胍w`ܙF{h4jXl\HcO:莩rZq2\xuW)u=?"WM垍`ߍniM_]Nb%d`))TSqq,{1j?ZJ yLpW+}Q -i5uF^c]l?ͪhbĎyқ2<\bܮ].UO䇋us̆:A<.r9]6i e]tCuw0ŻEzǍB;fc1xzE76NҔ@Q< ~tY;8Rɗ󾺒8hnCyVQ rIϱ̸6/#|ND sۏx .k"2:hycay,Zsł[sӒ>d#Ȅ `&/Pwǻ\fQlKNl#^]Qt'eA>ȕ*z3\D')ʰiQl[*8ؙn5fl쩆𱦇x;汶6UoJozUPA+)vo]rY?Q!լ'm_ 5\V-XNifsJU'ˁ{?ɠ)|qKYppap ˹ kZnhnrY;XS;Bta*7PJ _-vu7 Zm T%? \ycZrL( MPADCwPNpDh_k4S󊬚 ޮqb ,\Aq_B K+^};=)v,!s^SQ _$a/XXx5kVҭ0@R K_ :B\?6$#jS~H=aketwSc gcBRS-GUx>66|u,RuPQZ*KG8[V?2gQc]B ,5]v  lu x#KAXecr{g5,iEsjxKQ;`S]BYmIF.޺U&[q<(4MNïJ(PZKq"ةA(OUFѭvXT|Nmס\%ɠt^<mxg=%,fm:4u;W3 צC$۲^L+Aynt\uV`(wwJ{cG,ɟծL /zų=wN=h6L{Ϫ.UmG$(k#I/Bʩudh@J!V sZ'Qhԟ'9Hn[+Ou~5\ûR\prKƂS\,)\m_W7[̴drilw@H@|+mp2}UtÍs,Z􁶻RƇr0d|_+kc5MFO"+C0Xw%u<5Xm,2BnyG#瞟Ñ5lۗYfU OP@Ψ!cQb\ĸ8ĵ%I]U8qj c61}b8ڞnAycg>(V1 8|kAَ"ޕ;x6]p5_X bQsO@'q~ |߻n̈́C[(܏1cm}/zPU LʓqaSCAsJM Q͏>ZξBXveR(]tכiu\ǵ*u[*7jyV՘ɭF 7 L+[0(4;tE:mܭAF.3ʁ:W˙p=}t]RJ1яA< ?= >D/2b}Lq[`_%pvT{wk wOS(kڶmjGȴtɞDI8.$v˼V\S,ҡCxnr=.;ݮFM5=I{uiv?<>`䊪4y w5}RoMvɽw<հssr~xovWp_1 a0:evԿr l1$ylm(6~7fϷmHOE{?Pf ٕc/qfeQ4{za>z]9D3178_!&-S$ aD&M!DDps[#P6J]1֪7c,Nyɴy穋Z|):hJ騧h5>:SB-cd:֪⊁R7צ& ZU ,E /(]8GM[tICs/|6O6p f㺗gUչw 䬎c6*,Œ64/|x$Opױiv˥K!D,淣0oOSv%F:A|qYB˽PKB\,}?m09 $zL i*oG Pt/ Tķ0 >XjqǠ *\k)#OO*|%969K cqWPm=oNt+A[b n8 TF^bS5^W~:{ݗ0G"BY:{l$u#"msM f@zO-m=dAbʋ<۞i7TydZ|Y/FfsRy;f 4kpKvq")ni,:X0?kif@^]3V G+}|uC(sVE+37kyM/`V+bmu$A˓^iIN&O0~-$gmEE97 ". CT,kJ9Н;h]P Dg <{^iE?! )_oڗI4j1y$O,V@hBV]*Zj--y%Y/p1 ]e>"Ж\кhw2=h?)A3HIoTzJB|"ע>Qc.(R-޻VM 7] oǹH7&r \(νn[_c1xFTH3ȢS[YcD2U.Gn{ ˆc+Ҕ.`, yDtR*7ed*_m:<‘lN)[2.,Mz@sxf5N0ͺٮ\:/^˗iޖ_3K_0 y1)BzB`P(0T]nH6=R# Ǧ(rcI{Tb1W9~Mw-8zƾ9BR0\!ެ*qsZ%bT.ƶX9r|^3[O=yV?9Lҥ( .SwSg/]ƚJ Dռʮ=W)_&pcMqJ7u}]Sv׭[UM-X꣒oO gj Ϭӫ9Fg~jXuօ#iI:_W*$=L gr4P/w!a&%ϟ#8-5Rl}5$7JV-p{l8lړaB;$(& C,ˊ=@l@dM1;peY{!Hvopx-SݬFzi%lf|L($h"ehSY{25q.Kr.z5[Z;,VM5WոovFcGMhUbnn}45*9IŌ9C i]\x:l9\ES]3q͇n+"(v5R=m搆!h@>'DHPeFkk|5ȕo} 0/3 rT$~ &s!mS,pzCKg;W) 8[kcevW6I.M~Fg|͇ciRq\o5F6GN8=ǸX_RΰpJ>d7 @j'k>XGM;.HU\5O(XovsݠLmx qD63OA*rʳxō8PxB3CG&v~F8\`&l"DcYEsy 2hߢD`zk#`:IEzrec6MtNpݨ/faNxG.PK?!SIW-٫,Srel+AC1.jqQJjߍ]R7ь)+BOfBa]eL& ~6up=1„h+(?~' ʶkS5gᔱijOJ0 W╻Ǎ>ꄩeܧcf;Ϟ'Τ%u^NK2RluiY ote/Ag{g5+QT˂u:$$楕K#jy:R66HVS=ZNz@s*ypd&3Tgcdi]]#$XWQ$DM GMgWWjP̠d*-L,g!2c3x>-`:6'^C#_t[ _[ /@It> a36F3AG?yӶ1/tWyekZ8s$WT-E"NZp/S#><)҂_r {edW׏r:xDŽ3&+I9s0fyչ6!gMTӴQlQ!XV$&a)-  ICإb9 ]YN~WE,31hF*P35ˢS27bn:~2 D^e Ŧr ~#1g;Œť|zKA[Qb#cNRCBcMSF5䫄r!"}lQC|p49.圹5%/p(e|msϬo',SP& j^z3v $?`iK ; ocՌ2mC6lcz cՕ/}ɡP8Ìҁ~Q_=ٓQ}Y=+n)$`?{Ї]l V 3 + SBJ2k޾csj=|FR%B1},;"TN zOhz8bw]Q+'fEgz @(_=U;wq_]itw?wk됏H :%M ܏;giPNnݽ 1{XCY񞠤$Wb59+)zɚ@#h2ES Sޒޡ9n,`E~t{5%'++쪕gmo@RUil\.w9|3@t\7G>|îťMݾ]c_|l<\]fOUXQ^zI7L˜:2޽DrI;8Ca>k~6kb,ѸbOcz?w2G `8/3Yu'O[]]*d^Uٞv,W[** j&jĢI&]EBuEe ^ v~nwI7q=ةW;_vGab3 Rz2F-=ipFogSIҔEvivO_nrsmCK})"b{N-jDz#y~4ê!H$۴Uz'y};ܦ"7@\b*TxCk#h'7L4k'- l.WfWeLP`=d4R JZ󽉴[(I+(*Wx\3TrOA8)i;u^03eg?<*CՅ|%_:y:gf { E,SHCwV o*_ u&W*g% L3g+?۹ՕL?y*'b]w%0wRrg(g(?W+4]dcrbjӉ¦fT0/5NOs[yg|ϮYd1o:g?9oA:9zSIiޚ#m܄(cY ߌ3xA(Ɓt6% Hŧ$--EOĿ%YCqaVԴ`njTO~u -HԟG\th`?Ni6)Ƿ`}yكž:޼ؿ:؍uU?H+O̟>b ZR={ȴ@-9dy(Ũpǰ/fH`pf_ჽpK1~+)YBcB]|b]sLUjOlvA Ypym*׀ֹlyJ85$Dxd 'q,Q]+D}b9ʼnz/[}(KǴrALd*jcP{pF '޲6F(ߦ(ukҠv-LdN)78m5<'xSr/ Sp 拿6"ͳnM;ª?xUH>Oc׳ZA7R*$w8 \ZHn6{L})hIBTyY 81as-dIʸ&Gة׍aE|im=hLQR8'͵m#A"L@*ol/xEnJl*I{:3 ҾBctdb;qjq#zS8S+J=!9,QhWJhm'>}TvrrjvA`׫ݦlZ9ވ:U{07D2" ShG͋ TjGzEC5uty xkSZ3v׸Ibx]L#'46; CibaڵF34|Wnu h04/s|2TVSښ/,y3גmv?סqMy@Ad9G7lSQGLDkzP-*Ln=^ޤ!K2<"Մ[hƂib3Ƭj\ѵoMQ[{߁=^Yj* `em!i ƝVms.gh9,~oUZHzH|+MT崲xSI$y@W> nIau\QDox 2&Zsa^$2K7{#Ok,[fdH*hKn4DXn9\:ڧRmRvKzy^wic ~  PD$piqGJ 0dpD&8٧btVi_ >C'ijժjxtp4-xDL,fg9<ܴa6п&."3 A΂Q+TdCq[gaV}Mesf?S =Lh 7%(T"&9* u *Rf 8 X$NN\dE_߰t3WTJ7C:w$0^Ծz}b@ZT ( s9XkvMbt4٩Pjk\Ofպ.4Ǝ̯҅CsŎ 8 tcd!Ŧ֯H:po>|>hCӹI<('Czád2z8b#qZO[i|'&d˩ImI(d\W2XxUwtCO*yKz9|};r:iUI*^[ h@zę<ɡ QC5ASBjt~:׍.0j.XW=z 3!₏Px A~NXXhAP mJL˦~ú.Dw RgɌ=d&L.jFc9 U2‡#&>iXnb(@ TcH"ή]Gᛎ3I@a:4jws,MIIѺIKKrdžpL?KLǸ~]yiާa'"rAI,g)>=#>5WE_ vVC"D EuP;Y[_kj#g`UU嚇5J_A%Mu{q!(r#鯔R#c)}٫nR^wCzp Zh[n\{I4­Qj~[+S^E1m )o_.MG/A~ۚwKA< J0\/T)Pf<İc!f 7(3Ay` =U毳x9Yˍ.zi)h 4KD!U"Ri1;3<01wget7<0nځ$<5*ł}^?D8vWwPx5Dy+ ns ﮥHZ@L=0 %/頬Z.IVcSʮx~|Er$t LNTHP+;ADbVdƥ.Ivy0{Nf 9]ۯ-*"EtQ=p?J۔Sc˼;u"|Fg?梤+M[5 fެ;v'h1F ?YHC<&~Xu4P;rG>0 p*UO%XFT/:P9=uKZ ta cP'Fg_0b\{kaHz=`xBT?`-=$?Rc0`@B#zN)iPGr/{ca^D.P/H?yP nrͲKfe~j&,Um5F[2cϳN4s?XD\l(`%'ȃXsz j;ZbriUGc># }ycTՂD03dѢ2]ǜ pqxC)QZsVen1W U&ܕgjڔЎU:駘M]_L_R(2|3KlLuj39$A,.Jܵ7`hŲ'(Tӥ| g` ܝȵ2+dz b1x42cu9I>,*^vZar*xr|?K60ʰ21(. 6vWN4@r*;~=`92}0R|<MX`5 -Ei}d{os9u/˿H`#6\<Ԍl,i N<T_}3YlݬXt*SrV`˥gV޻M6fqiǨYPH9yN2@щ IJHHnRj{.^8)l [zYoy܃_4#!vZz4$~PLh3T(XgqRZq4&1f6n sd92h=+;E*x ̮< z=s)DNNR'T%6.c?'L1qQ}YbJEim_Ii8p{>o{O-'Pc* 5ńo&gg‥ Bj_e^@ZQ4+ д/9[_ 䩰1i6m\4Abg 52?j{f1P6?}N- `K6 hhjJ5 $oG.wZi/70=`3% Umj<^5pcΜPtwo Œ*z+3S*LvlVNyH%WRO**ڞ@It {K}f<u{lPA; ?#S+R[=KDwY?~!ou0oNM('D{۴(f.lqŠTyDu$DK5KGi"PAB(B^I)H?X/Ľ XXS5){OWs3+.Ure^|ٓ~c{rHQ}ЈعW4U6s8"2g> ?sۇ)!hBXװt&!nъ__ =r:[5xY6ۆ`ƽ)ę&|THhn6|ZqUЕB6:$Գȫ*,Xjj33em&) m M{oxoǼDž.y(!,zPA8_~LY|tAX MYTcPL!x#멜W>i>LqFu:=zvX/WJ̏Bnۦ l=s1JGNV \7^.Ĕ,C5` RT!'{Ж7Zl{|'JmSJ(Y>ٕX:61YmNJzދ9$Gw-h{"⼹4tZmzΗy^ !>#E~ۏJߌ6 t/_lE^L& qYNlVT,GjA> }<㽩\œ s bވSH2i^oTm0Ջ ոX$HsaSWm51-KIJtM9UBB5R;Ӛ'y*J/+_QN8(TgWӲ|u91>ڥhg@m?[X/Iȝ >1iV&m @n /{y6CZ$_'@c KUšЋd5>.Abp-M(ďQ4;pZsV]P϶t-tIh2bWjPu6 PI;8^CJ W?ߧV?`. DŽy$71w-8W" !sh+}SqgǺUK?2d`y.I _xH]nŶ|2Q8JP )C`\H}ޗtdrj7lS`UP &|BVmbD)JXqf\"}O?u96Ug>HNѥ;AłYy$ Se+S]/=U8KjKǯ(]3:WdL`i<҄uasc;rɾ =OCqC#@E g(XtZ/gyL|S^9/GHr0-06mG[´IIpW%:/߾1О3;T-')VNH\_$Fjvsgl\&9]׳'ibM_O\~yMp%mϘ*qĖwzk8o~[;AQ}7> rA.-^ 5z-0H=*1 +;(S2??f@CETEOИ rZYRzjL4 2O5Tc逪:*VG3G%Ƽj>[}Q;!ׇQwz<}Ώ#aQ5Y* 2sӮ6P\3֠'HP_eo̰fl6 4?%Oϰ,g ̆u/R}Qu*+Ha_\k_kM%cͽ4 ~zv|⮪QRB"٤YJLgKmU|ӴNN)Arh'[p9<,7q` deArsAEB4\¥1LuKV6Q M 5?0rɒd|OVӳ9^qQh>47|?'΋sܘ$\Dy0Id9ɨ\K{nVsꞮ h38Ȱ(3y$A4PP/юcYu~&'6ZLL%z&vdq.ߧ!!md*h+oW@q.83wv4L$+GYՎ6}8&^Ԥm <$^>M]sKADȠ.DU> #mWJÝo CeS.Fes) ˓b7x.4 s5aS7vٷc60G{ʳqpcs *} *\,DlXAߩ͌`ᢒUb,: AGC#'g~r] }B5SVWլNNm58?m* *#OT{y[F& @ O|H92fsKRDEl1fwkgSW7sɲh e\i\QfāS}Ǥ4Y[&N0dam[l|Ro A"im2{ZG:KLkM} ,H [T=i6*Gk #: W:Ҭ(/ 6 ek.a$+DƐTH ">I> bF l|n;y~9nɇܯo\`QaD0VTb QC'J#PA[%RsX!\xwA|1k4edD"1uxAz3?8 (cE=z?7O-{)(q"ߞ_6pshacO xLb׊kHR2PIνԧzG_qK'?L柛sE^DӦD\Ln]^Kvb|#4kBp8;]3YzЪQuJxl4x |9DC<_#v?^V8Xa6oJ +8=J6` -jLP=w<ЃT( !DYIgˢ2C !<п<,t\=J4@K'Q4^hD h0pʶ@Ma&%&EA'mכ9V 2e")-k zd$,`53P*{ޝ;"hda*Sax#$"fyfJuIHʕGC KрKEj [2 { e2ڍ ѫALI27UrF;H' ]G٤ P(s`9,Y0f7 ڃ ,`/T"Cu6hxq}<<H)̞g߂q8i Y߱ݻ !BDTDpynƘQV .֧gtX`{ N3 f,-*|5(gT@%  Dd 46Og5Yۍդ+[\!MjW5\Ga%)剺sLD,ݡxDkrD@wIPQpB._ȯ,9%'}ۊHLa N K$55f)P2؀}<zR 'YĒnknw1W[$ǰ*. ~ƭ&!b)ߺzӵl=ǣWpCAyg|JBU [n=Tߵp<:RJdmSiL=\q;|4#!Asvw8nԗ2xMd nXܔU&D'b(y @yW?IWӝ[: *e\@>qL8ޓ793ᜁ@Tct 5;0!GDsP0hρ1ɧtpAEL̞< Υ[V\o)aP204Ȍw5F=5.Ծr7>;0S7 . [%\ohNM HOb1 lb4j3Wc,.C:- 9g`ڮ4  H``}z~iwuih$à ˄?~@ِ,gxV<:ew;Yd) 4L/_ \Ks+#͜_t\yC[a[BDM.B*OZboļ) pU |&G9F/mkSQ+INޥ3pjZR3ÜeJaDP)Y2a~w=(2M4\ HR@80l #.2Ɇ^̒R/IJI12GLevM::8PƑڽvrk:\9?6偭"1S0zv0t,)| G=K󥎚=}ȩBƲM59a` (`-&[ݨŌ{'Sc)$.Kتcoy (.8h_&;FBٓc±+h4k"B2, L_ayB-5LXD-11 ֳKPR6RѠ`ݼbZ|M4a8laRA Z-4ٺz5[BHI$BBBLxz>a@t4I$!Ba$a9.g*e+swmavw~5 ]5ô$0I7C~mz&ԒWpk=w$l-<  @~@C  Z<:vkLCIrUT7?sV9_kŲUoha?P1j.4xo؟\kjקr]}GZIp ѣD^Syu>Qqv_9+c)ͯ,dwyqF#ǝV(TmʉռKCt'B;1z8yYp,\q{ 9[¯ t"Vrrb|^b-܋dV~Œ¢G.<CzF=+uM+N1dY='QU@QgJt.t"א^_vL$H'O6_E('b@HW:ҞΓUd9Ob*u.ϳtkgb[q /c-l(i/`|m y dM Y"d!B: |\ OZu(>=gr0aO0,='GT,?V݋֓m f q>k 27t5em* JDg=  e=esT<"D ^Ǚy3x,YJ+/Jj@c{g-$Ei,HpCE=zATׇ̪^nwߠ-)QPTtifH~aUKD `(hCjہjֻ&A@6<<@hDYDɒ+7 s.1( 9 2cL4 6/!GľޢWs2ebڡsRpP >.qA=wثw7g({J[z\lmziY@>e=ݽ۩ͰuW@=a)m{uvbE{)[GuuZ. K@`KA*A@io`@@ Q}> H(=zB]Pl")@QOQ9z jv'Omj"VM ú*lYX@@ @e@ Q@UDD)$PDAQl"M(v@ӥK@QJ ^ ( h4%JN];t [0wv)wj* **K ˻ے-{+V.ES@4} kӮZ(݀t @w}I7iw]5n(mP Ϯo{ 0id4a4cMLLL`0LS aA$L  # 0 &hѦ2hT0hO 51Ќd4ѪS"!ASxF`&Mjm4i5?L`5OzSڧ馀M&hgJ~T٦S5OyjI'I DjmB~OPh S@iLCh #M CMR$4F#M#TOکO?E?%4)5=SzzdH~?$! d=2ʚ=C@4M4= =O6= 4@&@#@LL0#&SM#ڍO&44T#LO)=O2RzihbI&SƦMiCOiO@d=I &*#)&;"W5r.;{۟Wv^w1!58TT^xVϺlAtqQYFVMQFeyې?"ڳ'"(?+ xuΒLJb> HJ1|WRl˼ 0نD!K݄; *$gF3b71E:E <<1b1MОW9%gL"ƩTʵԶ pgRbO*#K\\7ZrǏ !fĕj,I+^>]ucFQ%Dc!l]%Q @~ Xg\z 瞩v_]{XgROx/q:kو}}{Q8Y ,]dȹ;ζMB~Y:3tf3DteLCY)I O0ʆ;! 1agR@, GfF(-Z,,$$33#مLIl`7NH\WQLkMg4jóE1B#ԩX5ŽHMvI1 diz<ܡhe&*)nN3 yNy9,mYmdOvyH J׼x瓯?WVn87hA=/()7(,[*NlgEt6IR ee%`J^"{4ļT+ԻDB32^AGTe1""0HDf2L|g5KJoڴ<֘Ыm (&I CIEoUW/~ۉx#sZrJH/]#ߴВI$̄ :w\]#;Gg4vMo:TYRYĢ{Y,*(J8]%bgJʗRؤɁ@Iy('%J\kI-3ٹ [bu5εdrk"%P5 3 >ؗ՚5piWkōPz1:i]Rigw׮V];&ܜ A=rNYdҼlk5Gǿ316l JZmC-\\!pkNinʘ313$>TQ{c_#}.s_zDdqKmJvո'6 NK* fũ)~hTȓs^~^ggl1Q 0 V{2]4z̆9sKmV@?ߗd B>f0j1C n-L@Jmdnu:Șxxj\WX ~ǮM;332fFUJv,\$Z`dIn;^ \ \Y9aTW-pc[sJxRѷOyϵAN^~&!|hBPz_lE/.N2\d̳&oT30:L-Q)>ϮCG;;ZBqM˜a0!c@iTeY'ܴ a; 7@`hr+7|DxLݸ$!ߐ%F1(zA%HF*^ 6 niNE&mC>}H#w`_=_<Ի^걏Mb;W(w#؏%LjDܵ2qjPps; f2]kH߷_1Q8_r`Ȁ@E>3@}7::5&d'q>W_zSfƣ?GVn^'(ƥGy>GÓ m){rޥyGHT8 ^{:#Z(U?U>H(@#: ?ְDbG)Ik4\"-e2F65qM6b{4M3q (R}.KR)I`rCd k?M@W$f ‰i>9Ʀ4$()o־ήD6mj(EId [o @~60JS$2  ?B$ Xۨg-E6b_+Uczg8S Dˆ()(4KAE +o4)Nz)`+VTO!៟L"Ӯp[d㒚84ˋ_wmۯd'U8Yo^|xu„lxw\vµ͎kOe+7gLݰt]Ҟye/~Y6FftzW3U.> lB Rff_/T$?&*C$(>_Y@29 O߾߱?F?\AQ`nLEJQGK> `߯[ qTE FIQscr[LXup[5ƃԖ~kqae? Of,wPCh^^!ty+XGV,sG@iwyO_4IHO@WR:Hz rGVx#x;>Gw|ybx >$k]?HNK9s?p)"t)Rr8Eá.K!a`}+kOa"EIdkQ-%֦ ׍H^Iؔ:(7[a/^,/؇a|' a|9@'+ʎ4u 8cf#%, 9.mG{b j|O~?+Kߓ'ڟ='KCw?/Ծe..q` <}qt@r/K? 0y9D<,@7e.#~C: ގS\qq 9%p r76_#pZK}w2Ό>6ȁȍ_Ɛnjp/*{'|Y8K}gg7d}_Ős3^30a >=Ņ$q}E@a@`*@ S"9B?[Iq꤬:`Acц_Zo"/]G_KnՔh.?bPB~/\ܮA&q |J`/48ö"/nת]iWܣr[c⚐*O@0HAI%*IHEDǥO->- o F }(!'G WsIg տD=2Wsn)===O.d(Eُwdб33ZR18Iq<'9<',}:)>)-t/NHӴ]=7aW3sL+ LGOp9QdG$?0PͥAX=@@Yê|7);1/Vp˟0Vb=S[1Q})O7șn}׎9kXf dY1:UI^@(U'-Ftq$ T7b^b0k9v-eꌙqQʵ1a00t<NMkpḢsFb>>zՒ@  MdfQQ'%OGi[w\h~u[-r>aD"y5D8"j#_NU$Ȥ{/c,:;D_e]0Izi2ߗXZWޝ/NBcs2UxVdO?bmtF_8N dX3/A7!='=ݴqlGa\mo˻}pZ?*d0@[[ȁ$ BiBJ%Q%dJV/zٿa~C^5g|^^Y OR) xcŴa6d?:߻ޮ sϜl獡aV XUX`k*VUXUXU+U**ʬ"UQR *oAEH"T)IAAT*JR*+qT`%@mTT * | ߏb+[='2os}7rI$|bL/#ɯ?tna2Ϭ=Xd)ྮNt>Gg,?ԟMpz?/2\?mgܧnOTY xf$ʖ:4)SuI'Y!=Gć!CDwB|I>GO|%>Od#BwENd`y)/t1ͯ5{?ze#c~*?/nd~<9>*}=2@C"qYy'D^>i9X!XyG ]>N`s>2>]pRyi:G,1)gayAy@Hٗ@gD88|hBc@_gXdDXrÖkx".wmH!s-`$TA !TER |]Ku/q 7]J,è)lS\/OtN) !ضp@X3pFct6<k^5C(5b|ƤhaG>JD0A D6MJIeVJOkAI/Y]EQiP͞3i(M GDLw^U 9}""6u'%HeOHb[z+rlW۟{ !hBPz2GPMJbn|)2٬[fiLî"ҋyIuhO_7h,\Go,z927N̍fQeY}z[htr:8YHx]VYͼ}XEl|.&5q7׆X+k-֑ʵ 5?;[F1\fr],h;5lh T ǽ׀~fmoP6br 1#2M !ޖ+U6B͘i44Iy[lE&h[7gHlzk߈qngsJYcoHm˵͒3ĹHd[brVپ,jAЃ&dKLs"h*hy=Wj4K.(L^z9Uǯ[9|n!C~b9Okmf*1j#ޔ#6=d VWcgߡڃxs!uȡ!'8v:+n;Xbonޯ]ݕcݪ|7~;+ݦ^SNMUK}ѱÏ}}{ve*T6Zm3ڎ]`9I5?0b"O2B0~ ciم7U[@nr9"1h(,vysLZ8jtwhir_ZxNuY c/z4qgjfG8.fѯA O e_ŷf?/oDiml4: Z唌l~wGthl OBSpc"ܱC*ewqtRBQkcT.i݉L2֦^[1:V҃3mfT6O65sF=Eת(UE BC˦dm\gZڤd!!sqtzKh/˴%Zqw8ǚ;{*|g=n&s{z{(to|VNO|a2j> ]ɟn?}b ZniDdYدՃ-Qdg,{T@@[vC)dB%T ٭^]1q>hu-mbw5t8k?mFNxc$!L?;KY`##_X"O]B9@*XU:Gq~' $"2m2;aA *[­y:B*KnpED:Ɣ4F/]AbIJo?^d/fNP}05u*!iٖPVRSܔD s!٘QM@5Prً{m-s/9n6 eo\x0 R=sa00i3ٜa祴&X^-j9n {b6Q[>Y.%z{sbBIx~2NoY]?BjHRW16XiL=\MR9H iyk+ۺ OW#@Jx pOzmPRڨU1mh>hg.x T{a]g=vw1rtQT 1 Wx`]E8h|Կ%KGY:"57\W8Օ\kƧmΠ>g=oZ;6~h!%Ֆ㧿_sx7C%:~ԫs~9[d_ ƗB3s1.xGKP..͞ܨ)bRj, &KqQ%6ybY> dYʹ2OMWeB7+$OdCelK֫I^bqn~tћ;ƨ7 &|m 7VuVUG~4MQwiCsqDF[`_{#LdsYwo У;vKEE,Fbiy4_o!{j7?7-VtUK>ϼ+|Zwї j]SH9{hw $/=ŚqZ")W̋vfu>MIrKHyi'-}&v}N篰a/׮4zݟ{xD_)9WsDYx13_$G8jwMVzQLAok $?}ȓdƉF!JhLF~-gGo}/G^_5~;^|k/}k ѐ{j6CnAUm{b "A}o" 9FKx;oYd-&{*bp^U ێhya чJǔi\QjM_Wql>7>z?Jz TTDDi$S,0h?j0`A؈hr 84u֩>ӕCM[Q^qD$3Y8 $Gtr?D-?}eL$ɨdj6ڗ6ple[clQ#8p8Gʈ#x>}DhRfm%?k!ANdܴ Qɠv T,Q?62ThUG5@RHD%Fª,V~@ ~i LNX5 %Ny1"$"j6F wdSb'~R.O<ɕ~J2y}hi0{֘.ȶ%zGz{'އzu+ދJpi-üc ;0R:r7L.0<èC \4(.ty9=!0Ø3  B%@h՗e,ȖDdJ.0a]BL7!+~s &;ӓҋ7!">Itgzt N߻\A2[7rPX R$x߄wq;7y:=J%)^27wbgwўϛYnڹ i{sM'0^. NHQݛK/ v(_]um^+DFXD1}m;|j$͵r6@ö fی>aԂu@%O7 H I%U=75n~׵<*;_g^uכ3 Nr%# "(BAH97:3f}Tq.DvN}5QQ % AzDq:r1itDUGz-<5"$Lr!fC[g~f;x0peK-L(9)/mp6kV7]o7P @AJD*V[S^Oz:QČѺ4Ӓ)1ތC=VRd˃'\<%0_}a`;TF- %è~oXF7WqWqƱH s}}-ڞv~~Ƽl9sG94αb'؅Ț#2Ni3@85D2;8Ls"6-M4[p~ư~IZ!Ņ&o5{WEcmUWc }ۼiZDYcowB0 0AC$ F ;ܸӲZ|Naح5>5 .`ʃ]H)0$ol{`JH?@ B\p7'!B<[fAW>x jzPj!6Yu!Eef#>2Uh;\O3YVV&SnB{NO3 f|.np)n>2w='FA?S>u7$ۃzM}VWcr RW'WD}r $4 }Hd dF'4G@ ޮ.FcC=W>\!z6wk&Z_IZ(H]b6`IrgO52F#;ifm7Hs&ՂX zIQ:H4'"3.-Z9S{#8;U6%[Y#'۝4TKٲc:U-uEek8IpgܩFAm}{㚌aEлX/pWio'>jeލ[NԮ^b=WXbX2S30aVjb2 {`m[l>^Y*}VuW1I4G(^Q,Uz#7A<;c&N?/rO{5%x5>˂C63*:V2YeN*\ٟwOk[j#i}0^%Qt@,L&14R&*PS Q3%yF?A߫vp7g{W Ӗ!)ĉY(800#쥯ޜ"t_gbx_p0aThPFP)1M6:9V>< tu4IWB0^$3J_wHO$3IClbh# vʾpL[ 7UuRNR$P!*Y!@H( ֽDTR2;dp`j[eX>#РJhp#uCI3zzW2ǎW,W(3#"ð}NfCk}LY̨p/wk4:h+ԣ~D\Ƅ{0k!%yy𶵗FpJۉM7Dޅi6,K%=0kv( K{, Z" JWj:(e +x8?]IЈ<1I4ʎv@KP(GAE V'Via nٹ,zI`[B0Y }CO<.-F:톄W1.zQɭ14XWgOaw(NiEb]M[3W>p4:ަ/)}g;\꺛SY۲)Ir地Nny.^s\ _TN9P\BbHxs {QԏEy2~%_鲀}S[O~QGFGaڟi0uhp1LG"O@{o]3TR)`9vQ$Xٙ0ؘOP]p9Y/}2p&{9;4.a a.HAPiтi1E7Gr1$҅ | k&mK]iD O;9O{EYRiRb@st t~gzvk88<\@4!"7zf=ݚA`'S({"$G_dEڅR*0h ø{'oD E:{ʠIGd.C2i5%˛ѷsպQ-f@: 8;µZD#+/SRbݚw /Ў ӣ'7bozJe!7lE\鎣5[үU{K۴*5Y6fD Zy!嗩ik)S=tKb; M\fi r X!0 l<5̏=x|k#Hd6LTRyG }qSb_ǻ䑼?A8'?VBd%#I?&?"L_/HNK͊~̾3DZ/q>BGI!=DV_0{ q=s.= J9)1H߁>4Ŕ?zg|DuQ O~H<^n$Ļ-"} AzJfq#b:ۮ6~h| {ZMxdP H[lLQP߃%r@ wrEe-NG7!팓>a dkSckn򼎽B~siNFPHpC W{M EyvѢsYt<{n_-gll[ l|ן|>^|. "@ 3h е_eQe^BY79ؕP뾸APo E=0FPwZx#):k !z%83M-6zH~vs߉%F u.@̒хȲdr=WK*ζ< d=>}az)-{AšݞQ)TBX`BAH Pk^w\J~GAym b Ef#SDIz϶6C'kD + CcuޢC8"k<\O(CfL[s|g m@k Z>+#Krp=Jy=VNz*N \= ?ɢp6]K$d Ug $(0f1vBn3bWNi\-J_ 1pA/D${)jZ q#>gS9EJ+*;IO”9g;R]"zt._Dk@/8"B쾝h h 2m;phHuw.՗a3Yax^kxMDz ܭ:qmbߝU0˞<#h"f %T ,c!/ P1H pe 9uNNWːϠC s x9ǖ}A[-̉81C9M*qiOǝl$`?v>\gm'"*90(N(H&ʴ?n&mw: cwbXz11&/HrD*|D&yp( nјHsBv=VcL?&?yɕysdGv<,xL09$t"5tpG> ؾxr>}̟XPUU%PLYmXiHPsD)D T^~yح=ࠊ!E?O.Vo'ݻ5C{{}};.FlɁ `%d06r %b7mEL3t'Z-U7r>yHDBS}AV ɷ[VpOp-lVs;o+5HzQ~᜕Z@IzY&mVcy'X6'Ll<' 8C@x)IAG7 'yL)P%)@y+TQ aHs ~~#l֩M4,|m/:9  ziяR*ǩ'O_xϊw^l^kn1o?˯3>x Q:B}&I՜b 3ͣkj{QL}_\p Cn}^`!X~՚j%#u禽2 㾻tWrTgԥ%((wlP`4OkrnaD~S+KG$9ahڶ,zZSlDQڱ ,''4_ot?p)~MT@BLpmaFS2  \{/߽:ym99q7CG D(WYW:}ӒI;c ܋#ޥ`{y{y\O1$-wz/H];CZ~LLLpeW8oRK.r + nu.v IUk“aA]x8p+laෛ-V%~r>^<7zovi3 8vdp Ӄ*"ڙ: #X3!`_h Ml臫 %2ߋ12LfJdMM6 }i/CM1#TP aJb4I+m(dçts =b6qn>ڸ\߷"`S;='h#5? g&dd !q9=N&,Л8v8beU+:p1hXsﺼWF?I::#ip4-YJ眷?WϹ;_Ři?oKp$iA'ĬKƻDrXL Fǖ~ -?j 2g+(0̎AT 8佺זyJ#:̋ʬjC*! 0'8+׺1U e.|GY(HC$cÙ7s$)I0<7MT,ML.iBQqXGV٭>[qR)>9QR!?*|5pPiU;kGaîb/o&[L Vv`MC.cR|y ׫ϫ) KCک ;@tI=CK"̬Q)QQ?w@v}@;ܦ t '@ l^Iz!*95NsLܲ5Exh)O8V 0e!k+HM"9$g@GTz^Mu]129;(1 [?>-g {rF'w;}~,a+aД$ӯ:K~v7Oؚ0Z6Ҍpe&x3Z^-z(S$I/yqĉ  Txew[}N"0qߟ9^Qm2N'Wܮ={^pV#gI,[꾆Bka\a$Ar˴_pʓ o ~\lhgba\fF ڠLG߮ho/Gn/0ZDE1çj6z:Tđ$lXAaV\!ٮ:A-ְ$Tf˨Q fO?г2Ժ4;r+t8ʚֳ/s'ΟSNY"`[m: c[2`J ( S/jPi ioda{R}_M=]| pư8[w5Xsfs<{;69tȮ0EPN(QҪDХ3w G=JO=^J5;1.tܴ9nsmGu8S㹁ǘ/@XϞY] -ޠsq1v:wQm;gsk7^}2޾^f}xթk NVS ]͡d o/ˡD OdmKdjXǁ 3*H=>kgZ8=/P.Wd`i0͆0(Lʒl$TKI}x& 4I|,2JaLe<g@Qڵ@/1Dj̷ݿXT䶬#hjm#s#n+5ctۆIjTD# WP7o|&/˶4--E\uZәI'K_1үBLg/}"etbW2Q GѸ: !./o$w=Z\_& ŐZl*my̫6lYTLi!lY@*~iO"d8rW&tᐃhq uD- Fm=*Y ]T\\#9ouGjSc83-qt҅e6FrS0\Sm?/!vܘU]vLfF>y+MNǯ_cޫ6o-N&h Y~;A|sP;FTܑ0j sB%p//(&L23<,R,k\_Rjj2o-D7o3~ Oy[PL!Yڇ L> iҸ!`Bזz8lX tT튗;X5':;xq վ̏ZV)Wz3Y|&Ki2ZOdg2Cpxycv8X7mP*TؠX@rtT.tG?'Ο?񎑖hd.~f`m3\GqysnAvkC֋xkP$5{젭*K.]EijNZ%Zêa-J9"p4ĩTG^0o>b5SMlCY"14{cۺ pz$mC$gXJ6#e|N[Ɣd8wPT8ݬTm<y".ge\Zܕˁn+8TfAý=TQ{sog_',2f/M /7-CO{5]󲉏xZa̬,ϐypB|Hx@I<3܎ B_ kދ.!vmPJ`4@xT 6P7c6U )Qm84NlPZ8Co|O2KF-0[MOE>s*^K[G?b1 '&@|SiK rk?}USE4eS:/FFnUd;s 'a3:]{@1x=V-d:21rMhp{''!!bθ48zq |C0ocF/?AZ&{oSokkw#t Eb'P~wh%jM@JC0~B[6>ܮ$q rF!Jl4 7Z$HF1 3jQqz"ul9J nV4g]cԩ)RT`ӧ`pƀ%"[u~2 h0WZ$ȮG]`F\܂c~di0 ;G~ L]`]r#8ڥL$B0W5mRA=ȬW>&G ܯ)tN^$|pS69{kQ5;;7H-8{m F]@qzf|#m}^쯟}rW4" Qm1]e(-SWe_F^nlwS' zMǘ$Ls(RJ|U?PL(zYx ̀xP8a0J‡)­أ/*NOo* ; ƙ*f}5*,jp ]2= F+LOacf s2H-ͰnR(Բ-030. '{ NNYէ]6"r;B=kwS4'<[BsHRP3x%GjR[h2o=ԡ}:ZMKf'HY/+DO/O) 7YioswZG(FV/wgk*b7RP{uI]+ܴU=_mx?\j7cI3C $݃m9n77],qr[^kԅ92 j`5;jV6C.חPz b lHc̩H+6fNF%'-54.oh?p@% 8p\ 4< c=7jݨSgG3OKJ=U8uf͓-9&*g_㨝pKVu` G'V ]4 P>BjmdMHoi$jG+O-4Irsn*SBZ-F#O'%[fFFbQ i.5q9ZwG(!z*AUaPpV8.iP:N`mv {e1| ݐ*lU阭{^ZiLDGyeOA@ u-[uiS83,8ߣ\xzG$D^ @%dƐ%V&uYaGF2ޜ wfbRd|Uռ5|'N4Q"pbqXϖeA쳡ͅ~sJPsHS9dGw[KDG$UC 4&6fP<0Q!$=q$Lo29 F>H0T3o{ƫPVO)kZ%ECў79Ixzqjr|9U6VZQ*='zm Hk5p2f\d^v3 XuR"]GE|I`ETWɴs񫚇u,ᄱ,6EB{#a܎`N5͊D䠴Ci =^7Od%4PRW?LbS-]c&@tl$CHPfAsg}HшQn2u+)?$).XK{qMk=_%.Հi2 mTu~n>!w|GsQ'D <`d _MW!T3^|*DŽЫѮ_i?HUŒR9x)e4HazdPq̓Mܖ A靐4fA dWXdz2Y# *&8))V$FT )(af=Q ]=۫v^8@cE,LWP$5,3'8YDg St.n> C$vic%W/CbXeLl/mlI9tKXp[6Ad.SOKfș7RbVy9Z 9| ׼Сz!c}Ѽ,͇hJ!4wF濿m:{q/>Q<۳+K[j_Z ڠ"pVd&kLM> ajȪ*] RNDEaX22Tu]o{mAm:L`tnK IyDa@@z6!=%Rfd=aay˱ך LCBn#!2"!ĸ& o$9|^mbbm15c s8`n( }qZ垔kzDgHΤሪľ]A3 x*MI~\W<>K{&w&yo7q/ =a0+q`"9܎gjヽpao0Lz6DLBz&BV/Q~NoӘXb0udIeMӍ3 K@dDo{GF*_BX{+nNd'BGC!zyʁO_ !=vICXVb3Chq21ٙଳ~[OٹXW+ԞjZA_L®!X;/T3}ǘ ao>g!ȁdbb٘聆b7{7a/XLIa8nY-D Kfx^C5'GLpL{B y&ʠ{H$Cc9 %d,\x 6T2aAc-|ҴcAӤpY m-aqDDs ]@sk̢ 9dMQPo샋Hb i0GwV;C;|,^{ӂk.zX~8sJ\sJ0ds,FXL`ݿ<4u630(RwƗWfu[f;$ WI ːcPW[Xh =c9Phf}3zk 0[Pk&c =%ĂVJf[!}nE ]]/LʞvCa , "a@|T{bsvJPFI>i)BS|05YL<1у)(?V7cɿ#Z`(4 IC[L/Zb6IMB "(\.U}_3:%8# kֿsݡ+ݷH5nY"('nϏ-6KE-Jsޘ)}ov,N~ANtt~J_-ՕOxSXdr]ly~Ʋ$os99"L+WOtv]{7}'Fʶ?  h$G#cnq ?HK!Kp6LJm= ]M ,%!J,5cɬ`{M< GvH*5ދƲ=X `R1sdWWnW,;%[{~a'c2-9"$Rdcn}`#sl ^?M{^@A^Vb27^ [u1B yv2r/I-Z-ik,霂]r0(W{# 2&`,qq[3c`_1.w,w4¨ʵu=_T_tw DH\Q0ƁX,15 0XkjY9 ab A GiN~[ ,!#H*! xo豇9(Pc(bU`{&v=kjv5?97v=x7!2@PD}@[LD¢, PQp>A)ʩ11",xA Fh ]@@ }$(MܜdlVfT.=$&mCWA$r;"015rpt=!j-*$|ţ/r//E8\;C%,W8U.VZ _>)+"`©X/wi^k^^.9- Jٖz}5Bڮ$jkN rhd {:h C䛷~b9N?uG=IFᄵg%[|o6݉ ȶy\AVvȗCs(8WlqRY~̋=Ӻ 6[:Mt"TA؇G2Z=ULWJ=-$Z:Q_^+Hqn#F{!:^h2z됋'>o!o/%{AkN/5b@@35YG-/-o Zޒˋrx*] Ʀȁs)~q5Ԯ7DBF<^eSª8^°f;Uwy+<fdTxA]̜A{-;4|=XBh2dZA.+×~ߤX-mT,@!Wd±U[,qhZm/Dui=͚Aeeasq^G;} a`'3k,2B`)z/0BUD:=#Dmh:XAyxkBb6H 9S7AE/>m1N bw.:TUJa0kC>&ѭK vŀu/_ټ. }{ϥ>y@eU1)W+X0 L:PfrŵG@jsLuH! ܍c%XT7<[ΫF'X/ܭMr6stN)ym'yhY?e!hsO0 i F*쒬V*jqudSdM21X%dM_ued,f Cafn,2BMa.`L)igZ|D@.`@p˒&ReA~i+($+7jGДS&)?l/+ rSa5d;2Ubݭ]RhIKzx /:4l3[r-Ny`r!}+phߓo4@'2E+ z)8QApeM&^`,ÖՙRYS/U>Ҿ孚x)(X0{PWr /g$S/G)SJ {+EQ2VPm=r=uƱQL_^i=r7'x0,® 𪪫*FX8Vj0^ ԦKc~`cXKQ-V/i:e)T4!G!8fw3*|/G޶OS Ed?m24ؾUjNU +$!}tpYiRE5kԥ]-fYlzwOQ9~+_^=̍]TIӣiK?NթO*j<9V>3FAQ㿺#j54YH*ZT4K |QR㑷uWAC Q**.(A-EW6KǚΪTeW&}ǐ;TҐ*RsqWwVyNB5tSRTEۜeRz i]m_ڴeְY1H!8I$ai3q00JeNx⛊d-7""SVS@5"V3IZg$GH#((m!4 $ȘaԞT-g/[*010#?ai} ƪr̾H ?ړI/쿳?O\&rP{4/=\H{qY0f"sġD L-r Xv4`O4j}n#ic62/o@mvI=4AjSi6zh4߳Rwti&ZPI"O/͋)a Sh; U 똚$0R"bp3HDT7ޮY;7ق*̃ J# z45u%  8,u |0q!fj_@XIy#y8j#A}Z &"TCNRa@]nc@>R%D1iRZhdydJvg%5C:FeζYs5 1 ?f `$iN=1uwq%Ao"H)D°ի`Ɣ#GrʌOذ 7--jBmJ?wWrL;a"TӡhuՉA:cIC<_r>纫q"<ܵy_?| _VO3+L3;iW7e俎d^_k~-8_{?8T T(AͧRɖCm|2_oc١D b6M>S@jLLߚ Hv$WPST8', Y +Rwc2h"Lw>ΣzIygN:0`R`QCZ&_3!09ޯQy 34@1]b./>F1( Sp@ѡ11Ķ|WLlY$ 'odw&oLfޝd{_ BPR5"`A M д A܆ JEhDnS>OǀLi(ZiT(D?b?//ց"Ш'c_~3[x)F>MT!cMs,]U*ˌg[1[ >20n()Д0d f{%i -$Dѝi_/\9nw7*&)0Jw p2)P 2 C1" Ą"$3SSYDMY*fJVҚJif%ړi-M4ҙ--,c2"SXMLm3hȦ&"SRlBe fFe̩McTm&M6-jZc26&12ɭJIZie$+ \1DE0U"VE$EYD%X!h&"dX@RF XX V   0&)R%5,Eة352GrS&4(E$basM 0Т!&mmʭ(rK, (*HHC,1 V#%(ib1i&Nttngua'w$qۻwvu웹w)7.;FӮwNuӉˮ븜7\rpu˻r ӝ]ӣ3w.w1]˒w;A[˻ 5]hREwI&PFB/H"Q=b) bUP{wY4jc D #Q(hC+ynJ޵|!L)E 4 H+U^|c PץI$!N2հ#.L<_Ȓa+ $P*Em˫ʑ+kXRa'O3YuNH=GV]+"r gݲLK<3U>TJ wF):wvK0b9 rsA%[mGXܫUa,D%ar )=[.M6Ң>qCK rE)omV@=+,X~u3i9]`/FNɾ,D"{A0d iLj~7˱%Ю(tph;VtFA[Z2 r\nIV⨢to[1QuTe(aS0v#lg9$WRf/fDuhD|/aHQuM*>WLv\XٳReTLdb;z㏅ȽaMÖRMuMX@YqENpZXGNh6QiRU8X8%,\8cm!-%+)67 c3} fGfJm>Ss ce)C36胙eyI; 1'UN~ì2'd;{7kg;8rwy﮸ FaJ_t.6G L0L[5tAvK)UHєq$3YgG;(λnޞSt(j^޹ :w𜌶HYd(, #R$HIHOD6ɜ}KwQ)/?gKvUȓXERVd.Zܙ}$D7i5:^g#7s{+ KSmJ'ڙb)$ k~Yl% %bWȵ 3MP>ə$ ,w 0,_TJ 'Y )4WK0vQB#}|'[ |j묎8B:X;d(B9M4 K˺TcK_FT5 ,Y#:MyTbҬK)Y`Eg0l(??,zI%,Z ,j^!=lGv%pUdM4AT*DGn _1/%YiD>Ev50G!`nB I3!TE;Ci&G֣%?zՔZ@'0RC4ԊHU>tzBb$Ju煑u쒬Ȕ%Z$Q>0wxHn9)9W\ 4073%qҒQ1>6Rr$HG]'KرREWi܊Ju&;ޱ\EC!Z[*sZ+)M Rp/ĔH{&Q%b)L{ ђy*ID KQIXEC3p)yHIc+]E$ kb#:CDRh!AuG6؝Eq9Gk }Bq4ՈVu(^)Լ:h`?ߙ^TL$6Ic0BBJ.4bEQ4s;n8fҬŒ2"$hQHcfpWoaPWK~4v D>Kre^$rDIe#V0:)!f(]5ޒm#nYRȹ4XL$H[:qux $ t YX(8dB/ է(w!]DFiA'dؒpvY 2$t A bU}8DSG7e_bWpgD$"! e>cI;|=s9 8.9 %^ύ Ԝ;-iQeFԷ?epQ529!A Vsn[VOaF41Ғ>~nѦ 6ZECA Vnqm VOEӅVHQEBT=3e<( Q57oQj:(P * I)Ef1G\´pZg7"x!HS'c3xṢ?, QOoې!3!_)aA\ѽ,X{!#7FBm7<~v}c '?Ga_ko ȉ }'m_\̗$renc{u>Y}w~],`Y@ˑgi0B_:/%jlW鋊XQw#y=\h@CvNI5;x@=:!<( o 2L΃A{i/,}*+1FaTKQ< w@JWv׈4G/^0Pl?ZČ& {R{2O҈Itz[n8ç‡~>P^~cUT#TPRUגt< BapB@3'Kdx۳_tp$!$g.]uU.]1,Yݬ9ĉJQM)Ed{w3sSnmQOoy"#o~ywrwr'ۓ޻=ݸ;ۑO;:㻄Oۘywqw oLPu|<=pw܂]Bw\׿zy܇ǮS1'rukb{{zIF!LzuUqIok4fb6*p&cm%IB]r{|=BO] kyx3q;wq &sLJTƅS3313IJjI&fbfJT4*BRJjfjfBBfd&fbgT1GvN<>ww!;;n1!ݻFi]$ $쒄YKۮCzNXwr=vAmmOܰ"4XI oha%cD#`a[见[ 'cae8Xj@>{׏Lm/3 #e??EˆB(;y^đ$>, 0V4}ޕ_W/YQYƔPWJ?$c _SB(c޾Ś {N#7WPIe% R3B)~GA71Ċ*T ;"^k|_ Zp~oH#D6`/dֱZ$x"ǜ#0w?22&uT۹#I~ƒ|(9f2l ':ݎ|r?' ^R?Qv˷<f_¿A2~O/52*LtdGƈ+?8<~_wJ<AZUD!%2LC!> 9>.mXmjr'3K3+cg1,Ut=$8&ւ)D 82\m6.Js&W})RI 9J(VZ+V^,OX|bd||[rիT(PRŋVqXLCUc_%b]zs{lx{yw{h{z{ ^u׋֠F2\%ԩRQ#ú:7N&d0(+Z;v -ƶ V0#y<]4r:gI%I$R\,PQJJH_}P<;l**uYWtXx[[[[[[[[ysMWִNSMwm]ŧmϗK{;;ckcs{CC{SS{Ld; G3ژ X,[3ɋ<ߵlluxnvonnn55D55ue۟ %\czβnǨ||}?CAYy)hhjJhM*RbV551%'iZ'f''gMIEdy οx9z.VWjjƮfض=%[WʔӧR*S^zU]j&3O .s9zN&T\V-I(:L[:ABɃkc+K憶֦?f%^gw3& -9ub{d>r 6nm>~}ߝ,۟y|{H>?CEHEHGHGGHͷgJ>B7Yg1b' gիC@RGI21y) oوyCɌcOONOoXTGBjp_W-9vUUbnkژ0akov|i}rzw8A=ʯ(VR[8jՓ+lX3t SȤ Ukd$39 h^Yk3_E\{{##!a6 sh*g5W~të3kû˻# kk:^$aQv΋~ 8uxigiiiVǺ=?AOϷP;=>V9+^cco1IȜO'ċEq')陮ĵbv/1iOMPڨdD0z=ws]I=Rjլ-n:<Μ񆆆t:q}v|ޔ^hvZ`&ZC]N<4O#I״A{6wt3CBYqze:}6Mss& [;+k3cS۫c4LDLC$ EK[))'' /.fzs.sss+n,?syV6^ l6HFN>޽n^Nl͆C(>{Nk{թѥxFsok&zk! uiy|~ jJṘܮVc'gsUXd?(yZ*>Vl~~tJLMm=?岙L#o9 FCVU+k诸v3+8u8fF*lO'esylk1ss9@mz<_ߴn!G/]9rx;)U*S|SQ=j:*X^׼n{恮VcRmSv`?/ÅA tmuWQN:>}_x}hsiy}uzz}r{{{||||||T,>{~Ky<,^/c1_M=K<8?>>>45555f`vH\˄Gv͹%9e|~bC###### |e%%%#RmOgKZ?V~m? EMGOn.wKEEL5:vymg*>&>8IҩvySk5F.Kܺ_`Hy1?yι[q4(k jԱ7jz6#u~/˗./[.qr˗Ϊݻ{۝?OvK3? /MBkso:/7\ 7\nOupGT=~S߇QB͇ yTofZfb6Ubk9Oݚe9FmvvZ庻U2"i]mrP*Z"Kӽ'R}&_ӱ.}QCaw=v* L <l]N~~~~===;=7]sp8 []EX, K3WMea_`Y f:|/o,,Lֳ?|_.cDyq]|flz#l5n6TZnffmvˉE;u럩,Γ=SSӽ]z{׿`jp{|%^WW].KTy*jjk**%.w;}VIIl[#w.%s]xvǧy>.\Leϧ~~zx DYk.U[̳dQ.ξ^7~{xc9 o~Q}lgcKdt\ֿ-8߬5~>{g_C__?I_}GOh[aaacaakl5-Eհs5nO}W\}t~im.Nō{wgcv9noGr3oXq4ZZ('g|SSM)wzޯo}"4ii-/ר56I*ZZz Vb1bʯW0`ВZ7-UVJ %ldd^ŧf`߰iohf86lv .nUWZz4DYpPii:$R")+Xbň{9#(9V(/{V紾vwooŮ[=~ͺ%ߜ~@w4?dYb!r"b8J'{r?><3P;^S>f9y#TH0-"ǭv^YH4\b{l^Z'ß@cup(DyΘz!ᓯ~!!,Q5g1߀ 0d>.U86Ň0fR M{ #ǓC$'{7 Ҿ}sۼz~cQBDߗHHA@!.U`8%[\BC$) ~)F^>֌UU?ԑQW~&v7ۮp\t\FҐ$jqmE ĵ H{ZM" чt"<1 H%i5bT?IҐNndZbt)&12 o%:!jZْy>IwHč`˖]b~CA 'bT61da(b 9pb8MTp0>tLo6(br'vK$JF싰CBq TAaޗPLߵ_ז_7/}B -DM2,:rezс\>{DpDIӼ*0LP)7Yk#!N/ȹ㭪3c,g~nO?}4_VV%ȣP;\B-AH?c_ȵIT#{~+ :V=}M ֬u+Va@DQƤDc:>1d(B*t؟X9VW(P":T}d`#때wC@97Ē'v0IHH.=S<ؚd5'5G(E#jKe  XzQg =mWx83 ܀ӎo_vd &pIY2IJm, MiP\¬$Bf̲^> %+H9]ȭ2]v]˴^@Jm1 ʽ}T@y~=WTC / ajnnC*ZPB_Vznf.Q!p$y]8v6SΌ"/D $,AL)j#J-pH%?2жUq28D&cgA;/: ! Qrc a{tf/ӗ$1aR3bs4#˚z(ޭ%5J%Dr$6U҂or,3Uӏx53,2fz]o<Dضc/|i-o~#,@EQAѽoav#f< uq<#*ŒѬwft l'+ ,hl7֍2D&nI@Dˆ夷@:0- m 06wxO#kM}m% Ѩw"޷˼*B!3w LB&2tz]$x"zAvlcjA5v/!:&8pLc 7 SDЙͽu-o@06Φ7"i70fO7X"!P[u& Wg݀o|o [қ/ ;T[|mJL2I+[B+so\al}r*+ moIз0?A нYM(VѴ obK&cϬ1$q_{ێ鼿5vkmHTq쏨: P+İ,4.~e( AiDq/.0fV}Exk隳t,ZK\Ɖ|އEtIߚg`-Ya#Dd\s}:+ LoH8XQC#=g!/~yUC]|N >$xb3o$!3xxr5`ximXt7N>W{Fwx$}ͮ$w]KDF.=$8_tv5Θ*.o3ԆHZ1q&}]tJH"_| {RWwW=UwAx;S~)B>G c.&c/Nkht"ƹD vslJ&e+H9I3/V #oĿΐ7\ԇBg-1@<4q4Pe6Z`b@ -ZFҩlNm|zj|W\Mov9tO|6n1/V/j|FH 9I hX% &#%l7~d\ p9vܼA$ Q6܁D؞ hJ'6~^AT:G!zWd=QF+.%$@|+`qE7p(>xIBH=P Pƌ0:;Ǵ8 *.HaxXu#Q=9Vث>/HW> $ A* >3٣ ;@|AVo@Yu8#uTR/_!|E©mePe(GG}L|,Iwscߟ_] H RO"4H)g@\$o7~"o ?mI[i"E A"F;H& 1ޔwm4jđ ozÎ/q Ӭ5+Cud GGRUV@!HHJX15iE 3A-. 9UZ 9qGtgY3'-v ![Ş֐Y Zw9%椄|s;r>5tcw` b,Nbyb,_$dV d q9iac43J DZezS2~j""yᾇ_Lu̓*`ߎSsRy:= J1ld4>W |@{ᏖM7'>Qy):Pi$-m98fz=T_@L#ߣ> @ + 0EDK*T"TA_'./1c:%Y9'FHhc/W25(E)ah%^|D?m$s(Q/~x'>7xK_\3ęEH98 B)J 0C"$H2" * bB88bc" +)&  " 680H8@! 0aP 3%$*L(, *`bYX* , (@(*f((ˆH* F$b a!*@" H2*2@P#@ bX`(F @@! 2` H@a HĂ**8`*Ċ!* f `f( H!#2(@@4(Ё@ʅ#A)⡘@aHB1 Ҧ8*. a``abXXfQZm՘-(#![tuWuֱU;ʮmFmEj5FJR,,$ Pf L  -+0 @[K5Ed6ҖѵmL+J+H* I""(D,)3 H!* R&2&2"R bH+@Ø[6[( J %Bi5)բ5&Y@$fZb(B i r D0(sƑ$LL0\)Pѵժ5Uъ(UschQj1QbE+\6wkҲXUj4kk%gmt)KJjŤFűj֒ƣNJ"%*4kd6Z涢 aI0&Z]FiM`R[S4Rh+F56ѴV5L*h6ұZ-l3c[,,eUElD-%FlUQjZ1JITjڠB BeUhZB!iZ3Y4-TmȨbEcQr*--(cn[ضb"(rQZe")ZiIh5nmũ-F6i-FsQV6ѹkFƴm-h4*uuv+$lQ)66jʻU\[IV ) ie"eJCW@Ѓ]Π" h$)_%Nr=/YxJBWE@d9 HS* ̢.JR$B"D(LP"EhQ ZBJTJTJ@R%PiI"iRjlQH,Ҵ%* jU2{ /rEr!ZUqX$D %UJnmmҵ\ݴ!!T 2S%YBBA`d2pYJ@C % b2QZ@E@22U aXBB%adW AD %2Q$rS@\qUaLQCrhEJE1 YEɥ *TiC )JParqhLS Q0,, LJ2Q2Ū(kmnHP3J)պUEʷfnn,Dd"Y `J@d䁖HR P H$+ HK 2J-nmXr! VJ&3&Z*MiYFB\@F!UiW- BbD@5L+5k+r2%IDC  XdXh+$pg@aDFI(Pi&EQMedB k"D4KAdXIRm!%)D)L&#BeD6ѱXe5-6[ 5D4b)hԦlk%hfK"6h%4A%0$)I)FPFXђ)R@ͭIlkm5aMji6dX (,)De$0M46-$cA@V i*H,b21 FŤi6̃fU1hBHI"4@!Ԙ1E$4$PR1$DIBA1!"# AHcMKj"V@O40!% HdŽSar q: q HtS$)QMU QQC)* |/n\Nҩ ىays`Ѐ 4MC%%YdҚ"2e3#h (a`d b:B`HJ4&4"B,d.0@.@a*,ڴKJlmX֩i@!E%`a)Y`E +IFi6Ũ֒T&&Z*mf.V鶃$U%jS+dRJ[)RIͷ*&U&dB[IRY[3jlMH  )P(D "JvZ[i-Fj,[&h5RڍJVқIk),جVɨSOsljKlZih!B#3H̬#(YY QEBZX4X֙M-i65" PMdP+6KBRdbi5p!ٻnlJ!hljKE2ejKJZdlLLl[$V#L@.-ÃkVZP0@",$+"P B H)S-Ɗ(H16ѣk1Fر̦ZA @xVn8 E04ZSR "()@Cp%E*%"(Pд0U ΐ[Ur&r 56&6HPJ6r!BԄP@@@ Pd%HRB HA"!H P Z*-*dնf53mELѴk2Ѫj#F(e4ͣZST)j+%RJE(ZW%kbVJZܵEW$L\Ba)Ҵ*R*M%+(RJU[jMZ*jdIX XAE@0\Z 7Y 0'1Sr!r ġ@ l hB,!tϲuG\N"*d} y( B:R̨QA5 ȃj @PWP)(ʹ &B"Љ 1KHP+J& `5GWnPCfaq`XE|+_lq F2*E7 #2 dժuV5j15%/*XOv._P"QKetnq`cʉD(KDʊJR@ٍe$ɚIF1Rͭl")P=E&1I hP@Pw(q'# '(A)䮱;?ã>*<)IgAIiFrAy;p݂*P((o0C,2R@`;6nu:`m P$h좚M 2M!: 3o{@] " OTw%lD PIUI`R$PIIY%4YY,׎sH$J/IcPI ZFG% \\L%P$NӡI/E$ؗ>A/8'I*H%u"E(JA5 ar @ !w*JBCpO8HQ;iiJ* a6{j*u]RHzP&@"p@XdJ9Gc5xnAG 5"Pid=I!N3tRۀL0 :J=a2\ .@XdFW$(AM-* B4Nqi*|I@!M9  HƌMo:9?ԡ;G0;= CF%S%0 !tA&ԸMG$tHE:ba H)V!ND0 CR"B @*Ia!PBT:&d(#M+REӴMuҎ2< Nd ٕ!Xr;JₜA{glKDç`J>+L3לUV|@9+~/ȇER[~Eonn=ց-;g-QJ \$*BD@5= I$$^NvRŢXڏ8Ebc Ʉz^nA)8JZXgmjttW gIͷe`tWedeYe23uWI߹ 8FwSKTgkeut֋5;usX,Q\Hvŗfkuƅ),Ys!S fbY5Gt hb<x;Fwb+ j4fDbj\h(q̅r&ؒFa$jC.RRPzp)YK\Q4Թʔ^RTøg1:g FqLj MhHuf$;PQ 3#[BŔ̹qYR\{R`!e2,TR/bK.Qzj8,+3/eYD(BJÈbIF(Ĵ"D m$J,IF9o 51ūPqFsY0Byf*^v+۪33IbLă*TIFLŀk0,f( 2XDI8 IhdlgK,X8&1,}}}a )c$21@ &DS(?'_ֹi4@=s(m=N-H3ܫ^ݥUERi% JqE]HRB-<wcWTHC>lM?.6K_T@ޱ> p@"U3Fu;xG40^ ^^*;0( N:(Pbj 2T£|0F*M gD8%xӡFի`dPӬ"DCP6@m@u?u=סp`P/qHS!qL@P AEh ?:%zWq.( \APkR'1D5w]Kj4 B @ u"ԩUY?Бn&y0At/(.sՌA̵Y0iHg&&$"6O:豪 CnHtʓѕKMg~a* "Sʅ ZPőPP% )Ger$#@o?RAZָ-Rl2JHrN"g(Cd|L%PeOCc|GhOQHThRmfq;uĸJyȠg6k5)) BMB0܆fP zc^TM < +)ٮtjǐJL{]1QyJA "Qp7g DS+rftTRY+F+?x h"SPb7V<F|GZ @n-F jR_b 7 c9H<0LR s{׸۞ЌiPZ`&׎@V$#oOnL.T;Ռۼ+s^\=;e(UYJ9S,uat⩤۹I#ImOKx'U]m]J{"V]/bT#k Y@ełWX"Zy`@ΔB6@9$JKK;lYk^p65<TŌ^#-[/8T\ѽ*TƢԩjY\5!LHxԩspVBe#LpJAKJpe촐N`}TϷdpam86˿3c.{)>kaIBpNI20CS |89 ަܛ4T'dt&j(>\Ȥ ,~#w)AU-<5c"/UӳP.?ϵZ9u4\jLa7q_ J0̃k2hKc=ؖ*BMqKGSPz mZڜ%)X D0cKSil$JR0PDžy-U?}NL=f9oHqaS7 RT\ꘪ01_UF ܻ$ò Y@AZw{ 9$ 1G!ALp8C2dё9S$ agƅN"wEl- r)JIf <ziz Ty#lj5Jj[쾗DaBM( =]0ʠDD)dMIzS8| .G=F߳4w5G`B8b-3:ȚFsˣR6Jy$;b g"`=$Yui3%G `3RC^G$ 0qs)uu4&]XC0ĀI3edfAz K'ѿ-w,6g|7)QT*֒k%ժ0X¤q#7N&OJ  ҝuNbC5$G{ *a%KDg4jj-Ib%l<G~뉃uX:ǻ7]c:==Դ.;15OkFF7kP{1C8_pp2.rGQ`5ĚKUlp! Ո3VU'*D/M-SFHĸ'::zv.rrVM ukKֲgR$gYP%E-5Ͳ2DjCR]^NE$!vJbrO% ܚ㉸v"!5.F9L/%ogf3‹;$B||ZQQ*ݻD%k8 ܊:^۱:#Ä =)=~KՐ錓$Ԯ17 T(YQÞk1-b '# RA s6;ϙaC ("#f}"F`8PJ]p2A(<9xO=: 0,p2'#0(if;J=P^8E#)szcd7) ҲM/H?S1]m K-$,'ьIh.A5TtnzL"CQ@$Zk\`SSscr1ǖ8j'.ˆb1Z }=.VS*j2M.M\??yqeAeb=du )`'},|2,؈L8,@y"5# 譆ip35J1[20V ܞLaB JV\D +챉|] ' #%S\Sbc6LZ .m1i/IyO sx :f[2S6 48G#a 0mbqFDɩ$HAnY)idZ,:sɽV_$Ȍ [q؁@@ q v][ O<4y䡤r")Ye%EԱzRdR'biauIryW+h$gh:b҆ %iߑM3Ng!ȨT, ^u#4rc+0n33G)xcKi<0h؎ 2IH.qčF.qޚZӎX-hE&USDs"8$P2IHpzp^`B˗K u9E0`)qb˒U3LYW,1\ʥ̩i(fa# @206K$ ֥s*Y.%=k!faRU2!@HDCeo39f!%?K4* 6$ <6tCdO&PP50JQD+ɹ,8$Q,,Mk^.f8Y Q5KpjUU*8CIe%Y503{aIeK6YhZY\/P8B*§q I$uHWI "P{>qLO؞*@/s߇U.IMeQ$Dt_{8;gQu_!66nkRUQd)NJEHhʔбz{dH$p5P Lk@V@m78xg Q{ N:69Jr/N[ ir#)7G TmSѤ%%$BJ.J&7厑R$,iI'Rh d\5IRѺy0&C2 ԎJR-KCLhGV)/Jzwc3p/Ro,)T2%Iٱ,),Kdve9&|ʒs2.g(Be3ndIZaf.7+tMmNв ḽNb MbrJR:(JIpXR\;zRM#AAFdȈ9֑*W6ʁh>%L }D&o e 932fTS>K"G|ˆ4`&HTwK\oFA&HeyZ.KE˖4J%#g9ru#iC |^4 )"#r¶ҫKYֹȣuz4GTRY)/=C!S&dJfٍ kLDQ1D*FA-2=c枦;Xa̬Vj"*%IO3ָqCaQ6J# :sryWEѝQYcrUI -u8S G -& RXi?WխksKi' \ JO(k@6dzY#<6[Σ~"C '8eC\tBF8Z]RRV ;c:mΜ"Jb}RÆL6bE⋊\"R7<,] s!eQiDIJXdE%'nYa1/YSmWOzm))C1H䔔u.mv`DA|"Q|hXty!"&6`z𔈀(%ND5^)ݲ'/}2O䮄aj!gY}pNL;RˌĒJX}bŌ$' <̈J)^L$KZ4G\-&,)JS)xȲgX$L E.f׾ex@u `HbiGc!;7D)2(RUVdT2D\vjR0mDnbG8UZ:{쪗UKZ8r 8"(59 ؆|}!p|D㚱yTf]pއŁ#S7u",R]42<o FAS:L!qeRc'S%L u֔jddr$(QJQK4fÉn_]ͤ %h\9tEe.i l~kCzԝW NuMVvpm+I.dGQ2\BRu ]CdIk' YxtƦ67D8c*#,1Q3U0]'>}'x|{ 8NCǡ)7 I['sܦw$b㬝; DazK:{hĤ\i",6D8Ukqߥ;f\ҡ֨QM6q[xE_agR-q Iif)7jM E.Y(,PKK$tHzA3, &`b}6;#&j5yC:.׬4%-R7 JI)rOy+QLMbj.ܗ6Myq+TZi4S$Nz'J(pKgȊ]9UystF^ٱ*kY(BܒwS1 P}8i><12׉s #[+ih3r`aʥuzyqsZ #SiJ6Իܹ؆AK@){^[M:v=KіQ1r Z>+̌鞥QJ]dPIo@Hi|~Qp7{_[e9G$5o|'s^9&nwCWrY9Ɗug>A/~  """@5 &9PdP;W Fg̛?>ŜAQA-,sUԄTA#ge/}?>g{&h2K\RDc:{+ZXgtx [-!r`hѢh%@u;c!GF< ՄOjMA}rjWJ:s*jST. z{83 (C Q`h=F0XfIDyf"Ngw5CZYmq7qT>.u;sGӺ B//.8XdLir FQvg4Tg_tvăDrm%:Q+'$&yAPMpƹfs k"($B@&g]lٳϤ\:a. ZsWP{>G#}^Ohؘ}dz$L/]ӿzGI!"{84za L#;8|7S_l;$4QPqogw͹QA&,5DQ1< 48a0CeR6tNs|N N93P& aϖx۰Ћ(USgu~Ǚf;x|ޠ'rD}>A:|fӨ1y=<sǕۛ8x9ߗFyyeUPL4ѡU>!b^g#X32&!GJ=çN8sAxE L# 1LN*=ԇN lG}wfT剘`g|i63r4ʡk;97a|Cv&o7c A:/^9t`:1YT*qQs/? UWKf8B_c;o/rxbI#LI`k)|t|xnƘaAgT΢&5ߣ0]Fmdısf-J$ɇ32P #gGSm`AXNo7*M{ODw\qMjn i$~/w{AL<E;x|i6a;q݄zxB h9|.¯y xȲ5ٚHK6 ǣ'z>9; C9v#SEh h/7_RxU6:<'yI4N 6ӡmi66_*Z^^b:[VJժ(Rk __og0t}Gz&LɆ>:"THar2gTa  KL@`.nY9%3!>yz{{;9M)I`!mw{ׁ00^S5]k+uٓ$hC};y'z89Nay%b>"8&1d}41HKq|LE4y1偓NH3<&kiYq]rX+i#N灶,0C0PC{Uާ[yLd6*#Kf ^~A6=GMr}~S%IĄ/$M- ͓D5쫖2M*Zwʬl¤H3f_/Bzi !Q#͞5!c/ LJ̛y2A@^FThNݽ p6;f 5vͫr}ێ:C8es u`H!;D{ G.ȝXٝ4f8k2 2u;+n8rv{`;ɩLiyd,,$ppdfq cեJlN|)yEW'pxA c֐LQG y=Sw&Ɍ6T60=OS 3<1y9^0"ZUeGW<Cn0Df$xeGkgwzsx<;x9q4vR10Hpѭ;ǀNrN״$R%^.| )6q{F8A"Cɒ4RIk V̋'PD`a}G1c.c;4y#w"aO05 s;^xh89A3(t:vtv98EjXOiKzt:ܱy(DuƤc:mJX<ß3Z 4Q SG;j&4HNgSVJ"]hhf1Ċ YhpiòoE(RsA$#5b S;4"oÁw;ZL t͹S(i0B؏G:Z'fÙDd֍]۶t NI[hܞu|8v:p0XiH$?XJ6Àwz},zfT`j8g*,t,yǢT8BC@\?<4zup14FX4E=ssEqY(+#!IƓDDD0FCOtG6<Ù4sNCM0# }5 :"s=ty0:XrR(Fi/(٫$X㎟ǂrd·Lَt֍|(ÉP? :nhi6hHFvgCf(*`:݂ ;(pzs:M׿Rj" ycch?ģ1g s>an( z˖iٱ9#Safd9 ;Óp=T v MΓy dQ.`Mv*:7܁T, pLD5*DRdW]JĨsC.%Q92ryȊD AOHQ%JJD /Q\=yӳ>XȀʕDu炢.@2$" 6zQ' Ѱ#s׷{f8l4;H]pNdG|݊9V6ms=AgW fÌM A^C<tVoo떴!gou8)q&25hd&lv.נE1Ju֊ xptE1=" Qr9>yU4v^si''9l͐Dc46NDDCHa AP͋;`[6rzMR-GH#㨈9\K{u|Uqiשg ^d @ܲR-\"8xu*,@9.0' .0ZxDPQ@@U7jJM4)H d#)5((@%SmH\wkF *MM5u3cIlA%KE"YSe`(+-6,J1beZf@DŚSJ^;6W[TC9|./|j9Y nzԤ[7gx°`hS]ƀV7S#lM(HiSV9Q-PA0,^7]Ȕ\UږZ蹮 ~"&_O&:ٓs?(SBe$7`ZGK4;ۙ5>Z1v )GXcC olm%nk(f5K"ҞJcq ZX.)f{c09q8z(T% !,k,w(8$%S@ޮ! qy$L!/"WhDmD(0%Bln(v'jo(p sd!kI##fsg]q"VDb\:CKW\ֳgy.f&ߺ Ke6O n}Mp``cR91`t b2|QF;H0բ"߈XeGK&;e<͙ӍG^o0u9-FS~8YQdX>J~əLWQks.2AXGD?VDÖ8TFiaxOer(Z:-Mס0orY͋ng}{杕pFV*`]Zwtl@ZNy$nr "Gn @  6Q̽}';N*;}V?[-x.goVJE y=oF:+'}{~ $B,Jg_ekYM n(W=|[im"$12@71uΟar4.7sK ˖x`Kq-1c5}'98r (Z'1Хru$|I2<8H;V\o~f} !$I|&)d}2G0-`yә/l+_om~?v}KT4k Ds=(ֺ4e龤L?{}ǡ1(Kjkym{vX,_Vo\ t ɵmV1g ϑkOUvǭ7i<a.^Ocb"BP$ rpyC H|`Z_4i ߋ>NhPr(b\.zWΏO_\mUT{V7/qǧ!)ٞ=8IE $(BImp B3m_e(鯱}g׳W~-Q $Kfi7EVWAU?({RjW鴚"(xWwhGwQz/-@ۧG`pk^82(:jC}6 D[˜W\{3p>$@RW\a(|Oc}Nh6Աmؿ PHߎ]j𯀚#01gn!81NԳ$1OX?2rr~.Gz.G.2[pm S0 d 1"0006 ۷KY 1M@ }y$0ƑJ>;fffTDBa8\2X 2 Qkyq0 3Z#4gђ6뼃(;!H;v(>{CQt83qK)GƂxQfqYd8R>XwURQA01)0G?'4qpE288 ؕ[1YjI+M;ac];AABVl@tcR KN߸Kf|mk$QxImYa*^`e.X;fN-&b==JbY_ Qxs/>k|E&n5"_Ul%X#@r0g_iǯ?$6dt2_-!ōz{' "UֱjֈN_ۉzIxo@H8d8zἹJhƉ:KGݤJ_x[КoS ̳2̘|i vjg!9K#\з,?k Ъ `*y0 F/P2 Y9{?ZY:?Zͼpu{ÙD6:*x&%O\xѰܻ8KZ_)~8tKSl4LI񷘜߮^QncZu\*[դHH bfa]̒]RK2LUZ);a;*|) nj'\P?<ࡖغQ9w=FEPѼ+6㗞9߶g|S<ίyvfx bvx˫m(W. uq*-߿־M  pA9a,ps}oNV\\/O7$lW#.now߾_]}1eni)ge^NLة6$vL*:?y*upVs&ԣ-n?wiOῳV/;rk/nMWvy8ano6kvU_Y35ctW~WT&F~BV|WyguZtʟaxڶtI䧓+fORYvS v'};gC<<~65b+ EM1O<4xƼKQ dmY(UMLyQE H<[78Kdb%0{y=+[VPƏפ6F,7X+}M1Wr^qFqGUrl,/O{N }%.ӱNs@.,as:PB54&M&WX+:i?/Nu }2n/K/ԷKt]GWs>bCwǗܶ^]ύ7FoUyjn.$Q3;^a4q74 q"Zs\nW5LbP]ly)^xQ:~9Z TzeDxAߨ8535nzP SFh1:WG,3 mdp;՛&C_GCűi}B{1ŋ!r9r;z]tump $d ?BAlXY7r8bPѠ$AIz&7H~6X/J8Mqn}Q Q&vP Rc:E褓DQJ""V.X T oN>2i c B7Ee4m%j-C)5CԷgBUm,8+uitEr'h N.(u[nrNs_ɋl9$?'!}'¿ie} pdfp0$qTLÏO3~c# GSaĴ8-f&۩x3UL,{sr?jT+P9&]v숈~@W_/޹YxPBε\d"7\fu1rAJb֪lL3kH-Y}7D+82@6`bSv%D7ƩF,Ze9'ODz@o /(dmo!bN *բ&T6.-&\&u3/ SjF|G1` Ƥ}{@LրLyn3IwӄSQr:U!f~Ȁxʭjqve~kH9PZLT?XV?A+@3bf*A+WeZ(bUV8(-i, tv˴9m1QCYmPoFAJ ӷx7HfF=eiV,ۙn;ּ2ӑ9,ĺ6okIv֙19mTJ̼4*"wKqlM0F4`5Z4ɡi!-BڙB дk^V:l2vi׬ֵNֹѵTw2gNpmWcbjδѲӼbg1RZB&jI$""DE0H1C#V(Jչ<-f:(Jc}Zwm6igyU_̯6؍+5Wg̝^V.y$j^&$pi0[4l{o Q&e3S:q+*<ڒ60oqΕmΡD<փ2ҵbVّ{)71h 01@j+\AHdFyJb+PVkszvkn}UT+w `VJRե2%-⢄5(mXڴhի @jZe--$`Vhmz"D5bk)yK%T7{-zѣfms:-^XVlժF_6Zp׎)YIu۹TgjcXRkFkek[ 3E笫B.i2[sڇ*e] ]ZjjhItլ)I5 iI"\zhsNS}wPWk5*IJԬW4'Ѻ5zWR(:Uj9jgۭ V'E42j,3ۃ21/Fsձ>+>5Kr\Vjך{Lכ nm3IV~xO^wx㻹 H" Xgk~ͼFvf<96~\{>fcUUQPYs*RBm-iIN\i)-RU54e17NfI{S[%j&m1mi$mbMcM&s9.qwW<;U ۈJ[S&pw ݝwCqn⏻,Q6D*}ス \QK2BTVJV$MOn%'w$1^ M$7')TQ-"_N@k?{ڞW)b=WjCp~m{VL! |xt)[u7|c.7g/&^z{:7_6ELy{_GW|2E"Ax:}} 4Ϲ}Ӓ{b}ޏl{Xndֽ;d>Qh̶}PI M,óѳ6cwumuElG8VuϓD³]>9^sl;{kw[>}7og=כ}n].g띤 KUWzG@("( 44(n*BEڳfޯ77nnmw7Q{;Žgcvדufv/ݤ{ﻎDnZ>pwnѳ$o{x݀r5}R;dk4z|/ QVo_{/폺4+%3}^m]>}۽}gۻ cl^KuG_}}mVQN[FԆ/;ޣ±JQiw|}=}|Wzwqn%/&֒T;p4Q)@%B[5[@1Rd[e j2h:ҙ DKv:/zd4URz2hvT4H!ETBQkIRQikT[Zm*Ti%(֡*lT֢ U(*(QU(,YVXjk26-ݺ--eYbڡT[Z5*k6c3m[1)icLTlemڛmcffk"0Cmۮ(kI{Bmᑶ- asRZɱ6t $)@6j]:>S$6ĆNuz}ٹ;sqVՠ@:8Ej>{"rmXh`&10 6M0 `&``04LM 2hi12 24hhiA@ &FFFM   @dM0LёyCLh25"2K3ehְ{=!LŮ܁%UDN$G^q*o|a: n'1&SW@TD0Ua8'!:;!BDƜ ª'ji>aT&Tx~3^ ""b& 2L6&(gJyI*e*)[aMQVQ)bi-*s_3b F7.dfҚ®cGK/S+-J+jQ0H<56+r?a[fy[;S.FƇk8$%^D { Q )ay>`S݉4 A>v%y>o"cmCb 'x,Pb8_83Gs.̬v^n;[cP=.""s=G}*[q5sZJM*IxU¯DΥ_SERd 8U1V!Ȏ!DdFbza/Ɉb5fe8m.3kf/'xXFašs`}>vDV a0ݎqޓ{m;3O sK}`lUl! x栗eӛEL ѲgRqy[S'" -cm 4LC%s$HxN7ɏgXa\O|N2Civm;0d1- ;O Y9r_%{JvT9y!{Ӗa2G [rZřZ>廡ܸo7]=L!'ۿro'mP7έqb{OqWmF;b 2U&^]L>P(0`R RRD"H@E`B`R>=.BD!*%A^u@P@{>'A  2D+)8 L,!+ 0H*ʀJ#Ҿt u3!0`A/@!E TF O hWfpDPdȁ%Q%{R-k׷j[V@D&XycD0$f̒rr4qV@M;`E#R`\j =ay* 6 /C]<<iMUMl ܲAFcRni 1Uj5ks@ځF I"Z0:` L@Hbg1h2 >zNGl{#szpެ7 ;<78e6叅ȏ hk{g>)}O] wx<)0W"7g?p;s5,KpL`=`oAػᏆ:p ?Z:0> aPO`;BEpN]^D? ?`PP…"R!3{"vGs0K _3EmZ+p :e5'0dPwO@ڪTK), ŏZ?]|N˴.F:5Ђ"nBPDvZTy_}FW];HyD*0Tz~7Z L`zh:JV]/Jq kf0FL]:L3G``g{\c Y-Kةyn_[?ܴ;BitIՇ3/R⡡ӯqM0q}ZQ6P/![*'[|[4>nӵḟw $#BҬB4H H4 I@@N >|yPHLW@=(e܎Y98c1u o4QoGx5BBh mSs |wN{ 1@" qziT}sݏ'~xKK(G8a kO~A;ƍq<h9v~^Pa s `!ɇ}%#88@ \&YJʯ )9.b qO HLr0ʶ%*҂tK%jݒRNwDDk1gAMU+wo՛ [wnݻZޱYxy]jW V >T4U^W舄܃ #du횙_ZGwbRuh :%RdUsɉWMr^ـ5|p,Dqd>㘌TAWz0o0n ;WthJJZrs9k.-,\sUk6h6RR%4)NTrN]wB^m د?=錖\%de8.k %Q =0(4AJ$8 c!(zy̜}56}12=l)%$7EUf=xq/4XCzewLLOs&@l-!˜@KC1ݦFt2lzj%4Sw=&#<0*Z;qի!A@ȉg,HW2Z`gwx(0rCKXh|҆wѪěF'˨:*?AU7kT%c Do!'NiaqpwTpf9Rzi#$DC1>ʮTz'Nkpp;|6k˴`a^ 'szp.'8.v@ ݹ7}W|?wøj{("j"+WVHGgLa':"AQ8QBJl0T,( ɐ*`Ȃq(IT&("Q ! Y&B%R XHB%!~0A:@L10APT*Y@0A(P %P4H d;c6%A %@]+*hHU L7]gox}pB@ X{1=$F&1xz;'ɏpz^z^xYL@HH`$ @a(BsƸt5:M萢htlຣqNp^,kȜ `u`jca)m٦~ͧNvR/txɷ=7IF ~l^`v r j @7PTB"M{&ITD/CS0O☝dK S$ IGjAv(1]B:Gx U&CꢀH; " ڗ2AZc  UBARު JB I"a]H/cJ#KCaZ7m}j8FXH[%xRU8()v0cN\$\y\ER ;g+8mkPPu L: bqԳ65tRQMeT "@Qd0]H 8J:an{7@kt;םpf5ԏR3W-u:IGx%!+ܫ=)5'D C준ް޺5W1:u|ZY,RDo$/,#&7ڂUŸrw+" 8"g!C͞:>&ܝw\l!4ȧh;лLv\!zH.wyԒ_ AxOtg{R 1~ ͩٸ*x JAhdA-ѥ?gei 'Kv%d& ennV(Sm%>qg)Ɗ4Z)9RHưi ue 0 |/bRiv\^-$TM,TKDAT[ c~Ky(XQh((>N_)A>ݻ{nl~k!((\@69HGwgF^F ৺ZN~/.z&acQ˼8j#Qr7|uD kmY^Zpmw9LD1D fJI$(ϪD)limSmv<3ӽ#TÂh֌Ƽg!\u.<|6:xKmA&."p'lC hPA Ed E1J]u)ɋ)͎c'M^u/b10'*d*_Oʝ9tR㉥O.S:SS4bjCwpg-"wcGS[3nI;1=\I;&#[~Њ= "hTĵv+UʑWzIY4TSAPe!5 l#pPxՔˉc[XbZ&~hҒ \Ji 'AGj@vre򼅸 aǕn\o FE5T) H% <9!l,`u7MW${i؊Tn 4IՓr4s|uc,temt7 - (x$9䘆qߩ&a+;( n$11)iɂI4 !+M@Ml4 Bq}ֲtK a&F`k9\ I6 ,Aq1rtBv2qtf ({xփ0K:|m+˛bW2K Z'YZF`]2oMNS^a~,y~]~v'u QuG Ѵ}c-Th۹rhu븋 l3\T@mLR+Ø - 0H҂B%hj R9Tk=108٥9r.2;JU:s^F,8$rjB^ F |[Mv'|lFm}tf@Ȅ)p*B4S) }ӻB\0`lcO؞[=Y-)SiCo-[t-@AI0 "S) pnnJq *R;p:30 q)K_6HWA+fR07ݰQQ;naU7 <9.@Z[W ̉S:}R*A7"873hj)I^#IfDɚS@JtKzƯxxw* $%H? Fp/gNWE&fS4~ aqեsD%"0(4t&hJF] -NA[}BΈ%=|!x% lIa= ,4TXҹx#{tA[\R`C.uA cy -C7A'Sx p r"wV8#`5!}aHqO\PVF|poT7%< "^`o8-Kدn6k?.cPG?WNL@m0h $e7Yct;p)VчF.} p{x@d;;X`(<nK[Xu +~ǩ۸s+vPr)BK$HYǙ@@-$Fh:_C̙ !Üq4n[u?[_~d2Lpwt`V1$4F*bi @`rLXF ],iH<MK/ 1i|(J=tx~d^㐩^1<,C~ ׃kvRcm,ϽjM9]ZiA׮\OwrT!v>,258Tk䨄ƐR5zWb\5Ǎì06*!=`NEAy341t (O!i ZL6Lwv+\,VY2z:@s V7zˁ7KSkZṵ ZB-arw&#.ۆs|~_}]uK kqz͓@/{#2wx^\:YdmWlJV)v{U[ОA !+HGǙ#v Бrfu~(WeVg:+"9; >&N DT%P0:'uz8_ 79 : c`މ^.Y.skz.'j5{r:RDºx9I%jM[ʦR jdOY{ܓ0vٳ׃j*p??`(ͼu>PAfE uJ'68n4hVs湫`fFBts;s^&+owܜvzCub**"1R.i9 f8cTM(R& Kd5u#6Sɶ{EyxFVhw~oGA!p H'/b[mgr\C &4fiIi25Z GE*y3Im5Uu v&蓣10tH@4O+c22+:(껌>(L|>]}>ӿ^G} &WEEZP2hAc F߅M|6\t֥&E- mLh< 7[N!#]MG%夂֨ʆ\AnMC"ȁ„t \Q] `& _fV|ur7 :9qiy_+,/e|F#G"@m_}ѻ 9\ͅ7}JwAJ*$aa&/2z'qj9 dM' +$P`1c/&4;S Mp y",eRkA!D,O\آQs⑤ќzm _X'Gm.TY)Am (Cۙ"Xsts)w_i.Er,룔Cw3C J+9"uP`c%|F^':`~Bk֕v#7:-`V . so^(7} ,,oLr;S7hmQ2MvI[rh[x鑤oW2BdEL)5V1F`_NQΛ "/  ^*@G<0!^~aڜTxBpaņ賙F-a o|3'2ө vT0~>|܄hT7,%ss5.C:^-f"V< vCPO~3acYL+e-Kc66x>iok.6\E/rYA4\ #f ͱ]UIu%b $(`KgY梽j $d$☠CB*8~)̈ɝ״mJ@ ܼPwInդu~?;5!;2sOkWׅ'`r / рi8O/O \KCs-[tpZO#g xo%dt^w7MPMV;]2d8vT}ΌFs9wvwHF30ҁՇu] }nxϳkms3 1a"Rmџ/f-!PnaAÝosvr 6hi`YL`ppt dru x2apMmDM+\䵭m^x|&Vm195gAj$4:6ഈnJjY. ƬpV05SlvFspldsja`7s޸ltƅ岲bnYvܔٱ)q UZicaag'vHm@4-]$6qRo;EG};6  @Xʲbz!Hk5G@,VoZ[o&[)͂Y!4rez@E FGu6zaCUe0Ytܙ( Ȇ.OW .Їqt]@2 Y/$ž -SRh4s_7%,v˗ABAha$bܑ/-{;I%Jv n۔_GY\,ًayksY`.Z &+ib]Ib{N,jF܍!sv&Asu28pdT bǻ3(LQ$UT I8s5 yz`dki^/ɚltW+%|myr?U6H%8^X"Bh#<`Baj[1ĂKWxBkZe]y3eZv[MJT 9VJSp\ I>L /+*o;W)?[(OZࠪW}\[710+Aj n80H+40Z9qUk<5L;lji5@7;6jojFY~y<% R;uW!X7_ EszZ55/2dȌPbdWsinM*{HsYiHyitHe! R-++#jHTQcrShBS͠JYSSJJPWG+ J`\4A͐9@Ȍi6C∻Y]_;|aPi|\K.몽7K'񅮸|tl6_ED "qR ܓ…VS^cÙq&x~ɮvv `ܱk-_/J gI۫Nx·xW-A86c{l٦+'-гG rHm ~ylݧu|D y@ɋ(Bd7mˍR X$}x€5pG$ۿ61Nt5ZJiM>mKI'>l6G}?<&QDxCv!#uGd}P8Se @oʬE?8Ԫc]HybWw*Q^oM_@pNUuy=~6Zt2`C>x^=z{WkfRj(I(l33?z3 /9h: ?F _-.'[K(MB@dQE/)74j"}QaA@\O7܇}/kmc %3 /m"les1E!-`&$%#$ZiX?72/4S=4{s\5p J%"ZlUebE~Q"3#$t L4͚6~a}1 }rI!X+Z isK9.+$. p+P%Eϟ,d~y[ XA~CaE= ,ضy0:ZC5$V )(3b/ ,1L03mi8Ys}C>]<|1~&EZY]+l\#ō{qR/]SS 7424mh 78F8yݵ@}x.@P03 Fs #53 [؟"!B ""$H*ЍPRҾDRDH!@C՞or[{k[HEv~Imм "'Ba;;`aHdJa(HV& IbdEF)Ib!iR$)BA$$(fa&eH&bd h&B H$  XFYX , I$* 8 $A HH- (J2KȬ!$ RLH4/jb `X*Jb"bpƿ>B0̙d[ JV mH9@# !$aHIVTdH8I + *Lq #Qb!+3+(d Y"=^.Di3Hq5+`)4B )0D@ 9a1036fR$I @b+SBTC @7(2Ex(d2=C\E0J=v^! HUE UUUsh5;E3DUQ=OE= TTr6:9+AG)i)(abX)!R@*"*f*`bi:I4MAPLQq p( ,'3,ª333,k3̢2PI@l]eGȋp4+ JVC%ML!"[6XC \qL"!bIa؃ >1.H!YY3]!12$!%!cd*UŃjNU Vȡ*:XrE4!4\ʥ V$RW VrDVW "H(S!MPr&a0L +D1 2\VRf((D8w>M&$$$:ʐRQ2C_jMY !@巕 v.LLy5.HO]6@'~{;oTa˙:IKu9e"T$>_{e N0)N`8$01\ . dDC ^bImL<&o ]BG$F0"rhhjh$t\ HY 19eP]a5&$tJf"AO9xb"OVv 4 @0n6 Ͽ lMIHF@SiSOGSȺ}#S{hGhشam@Wo旘g:>"TJZ!)fi5-dS 7 ^ /aiupեmms!Z-n)eb|1VVwF 4">'K5Zگ} աB|n+9Y(_"j- 2Om ШPOf̹m?:ؘHħՉhyL oj wxrUS.zr<[S>*AM6c&w:|C7qDG[= U^eNCM R4A2 A#'t0M`Lȋ5"!š/7qOiH(P*(zpXB><ąiUC +!TqҞvU_GHrzKB!4=_GNAXz `"qHv={yA=GD1B{%AM(U_<<hnD'@*/P3| RQAWG 23oiSM5m6ݶATURQE[fUm6SVfMȦ̂ɵ AWmJJ*3Xd0ȵTPSW6dU\pȫlȠ_Ӫh(XdUPc\1312 0*12-Ȧ TZ&l3Xek3"3Xek3"F(((()0[c{=2h8k (ֵ: MUZ AUٕ%͎k 2Z̊Jᕬ.t&m:fEɳ3|2j߆Vd[pֹ2̪㉕33+1:5EE4@t B MQu+u[YlaV݌RYyXe(҂ T"xYp\.fnj=i9˽`,ǡV^B J!PnQ1s3;gḜm.tЀDrJ.208.{=_.B1 / 3]+ȫP<PQAE@[ ^ * `g&k0B*V$%X5񓨋/7@ ~8?׮?]n帺M/eWX wgC:|\ꃳwgO//.;>ꭩxDOs $SeF./ms #ӳ8D83sÝs=4%(<>22>BBl̄z3D>^I#w!4a6nۃ`y)ҕ.ݿ%ߦ@#lT#fY~ &b!bݸ&4!Iۻ% ZtÇOW}!!42! Ѧz=]&_` LL ǨL fK}Iݼ @="޲8L=^; y! a\]KR-#dR""",-"BB2BBBB2A33C3#ӓ3ggu!Ѣ'eCCD$5٘#D۩&](&Ƈ(CdbbsccÓsD(##==4\dffffFfֆkSk[kcs{Q.{v4] rn15h.-6YV[6C6PնAзV5|kUt/2&Ӈˤ9r峇LMAxнiPQ۷n, ۆh*.h*268NtkNUN[kc?SE$Q_XWzxyz]la,JݸpWc##$t||lll8Q*ji+- ߖ}ǷHrϋ ɍr N NN[d#%%'3Ԕw:X3 3Nf$xP~G~h- h Ľ<<<7渊sLtwyL."\D,j*A[ L8~ްgB|̫N ].]x2?blKKi}VhW{# a3DOkˈ,&= ɗ_㤁b/hQK1z_{>u[?8mh x`Q֥t޷YQÐ]OBC6 ˚J È+&"iJB6(v$PAZ0llĔ?xf: ~!(^Eq@4*pW g!wMljT.buD=qۋy>ݚzܙ dg&|a3Ĩhp4g.XԈ b Z@jKh&zB4(ja9Ü`#̊Xښ0J$CR Nl . V3B!Ї ʒB$rM&xPŠ`r»fD@p"$-}H?Zp1X0DK{Y{ai?iф?̓ɻ4DG^v4aTmp13 8"9H 8)1HZ CE93\@0QȴH3KYFYP ;(PbYL`@κ=c LfTrX)ӁC۵?z]h"gNҠkpk UBH"B^BaTL 10`tAATђAQ΁ϒFaQ . `T9]@O (x(BTA@D} hUSԃUGAP3̇>ECH䊦SJl%EA ?ID YBS?K 7胉 mD*R]spMY)Tss*9A (7yDNaT_A@E@U5`(","H!A!Шب(*W<.ut%e|pM_{w5ޕ,;r)gهHø/~|V2eIߣ!C'0jO|hAX %>H UQ-HU W 3 =jC.M0Z @2n ɈjS3FM1i&py- "4&+pB.l&`޳ӎ)7ԧg*srt-4"6<ೖ0$:ꃏg!ڢ*Dp3lylJz)I sun {C>wf-ⴣ:;757R UM];)H 9@*"H0)BJ?8D]f(! ҈"% - H J)JH#t`yCFoBq%0Xc.DX+B nyuPRoirN ]'H}Ԥhwjv+ؼ`mAiDž~68%ʟ4hh ̇>6Άޛ{*3ywyYofÛ9Nۈ $zL(G>%p?3Ԏظ/nۿo:DNi-z `˃?7 i,{ 诞z اوp}RD['|n_utw$b=˟a9IJQn9O-]Rśb=kN8c*i'4Bq P+FIuy{@) Qh'0$Hf+Cяp[Hf9^7S*nmA_ޝN֎73>XxOZ-4a{u5rm^xt'w| {X(c$tfsqmķ˙ԿK?\i˱oud.xܔ3bOK?h'oH[R y02إRdXx;=Vax h7SM=wkw>Ǽ1zpJcj`ۏiÈMz}ۂiW]8 ݃8%kXk!;[;N<ܓºmD"ՐbtWCe94ۜZ!cٚ)!!bgFQw&˪M8*x BD(&YCnʩb`AI+!L$HBb(IKG 5R%M.&($+ J9"LSP&*H :10&D`0YLa L !U00d10E1 ] ?'HĔ3S!PfTLV"W@! P1DM `*#J@ b40(@ U9PM.(-GN|[[J0zn n6 Yk0P) |>5TMt$l3Ɉ5TlEPJZ+Es![QH@6l+npW]n3$ sSBR,H r@j֔2QV +n !tSR7&nNab4gm6=m0X/F &J-f(` )zX.yoI t%xNk@02p%gʒEqBIYR.Jj5 (Nhɩk0dȺcLr Eu^QBۀ6y+>7JU+Ԏ!a^nww#[ Jd\XCM#c4\:HH5!46SpVlBtM۸X2)}3oσ2B ܅ڠp ˤݦ!Rfn5r3;ŋ i8E`KG7PՈ mm/2l; Z(1" hHƱgʸF W%Vzh! Ic//Eu谘˯Ls]j]G v6k}ݺm/IlpS-Ämw##W_%l5u B!M./fz2RE8m)}uv:j  a/4[N&oљ<[6L-McfKvhʂnM](d teYfgԽ[ gMjFKu-l45}6,]Rgf2@bVj8#Wº`ɖ+mއ\V6e]' D[g%&`ɘ[~yN^6]~n4v]ι-jvkXD=uf;W^vsYL3VAS5MB]p`b ѱ)Ծ"2*c_`#%&i"qHqlqxz{W9$-7+wcK*C$(yDq d(( EP  M 02j#ɐj4谡b"H"R!" "(%F( "F$ $(F*@PmD=A V%D=]PB<h N a8t4P v[!V#Jm(“EQ jD(Y HK7Pw Rs gƹMW ݌1ClD ;̤+87VD$b״A"~`{6X({8 L &%5U^T*#`$KQ:RD Rh`! < 6/\N˿dB=4KchhNG֊ TOBaI"LB?ؐG:?BJ' P8!H,D~v( 9ݑdžFWђ{ _Ip$+r;(xV1:#@?m,`[3Ou8 @y" g0Lw 4i811P("zA3X+*ǩC_}l62~ *HT?~@h/!}IsԮJ;]~#(Q SAP+?8%lP&z]ƕDW`S ?Z7>@%(&D*c7ެȀQOL)_pS+Az&J@Q! T6 $掉 ]QHOIOYI) i"gQ#Jg&auS(m$1 u;,DeNOɩ?cKTGd}I)H@ Ξ nP&r1 CEí?xRp[sO>=&Z6"? iL!V@D~x'4%S)Kyʡ2 ɤ)c@:¢$~fȊ n@?c ~8s$f"4>yz9a SI˲SA 1IL";y\PA_C*" ]~ɔɘORJo|ުrp89L w9&% IgTgQExֵJ!_ PA!!"@PBM P%$! 1DD B̊($ @)H)2UBi@FHQZ@(DT((T(Q A7 E=\&D@E(OO7}@9 |HUP0 ":xJFb]蓁RHkggSiB^v֣B ݤ Tv2)Pc˯t*ʣҍ'<e+)T.ͤ4׊9ܐ5=R5{_qN;@~62D-)@/^dCgkɝKԑOۏP( grg`ߝ6HA.ZH:t ȴ/w!-;1:8L/qsS#y3cJC+=\_¹@,o$ zH@s^ZD!GE&(Q>հ#)R4u}ٍ/>=ns@$\6 Sw鱓ߋE ,jV{?~QF 0 v?k.ޞOTGڤ7bSвZbٶX@ d#h/k.? mf>niT70X4AG!W䵑:ˍ訙Awd }ξ)VY0^ž E\(Ih,U10e^T2谹͖Ww0]}g`^T FQ4s>YK=G+-Gp cBIJ֜;"}@ a%w>N"Z"@)(Rhh$HJ*"R)hJF(!Zhib ibH)b$ hDfh) "df" (RX*PR iJ*a("" &QULID!"VN2 Ô@ȥ+DdHFUDE(PRPD *eRH$!U9H#,JL)%40T˘%a$D 1"#YU5 P 1@BRC C(H@2@2J$*IP$ @$IBI,A,DB (E3CS#DDADDHI$!B )"@ !$ 2$( "URQJ$,$UU *Х"L @0!0B1AHƥ" ;tx`~ˆ\%C庂_ߗWKHV6^_ƭzn*fORC_c<;BzΏA"]'9[7FUO$`,! (DPS˨ܩ,eiV%nݍ?WI 6s̏rw?,{vOץkǗY B{cߩ E=>}>N(nvo2P/v}KZʧ\U ]o"diRn>զǎrhwa>}h>>N.徥cBV ;Ej+MQm0C2~ >cjAgZG|H:#i͈ GWmYq}w~^m2 2%~׷w IoFb*7M\T(wvE ndtD@,Z:6H7~q8A`_BǝagM2T_]Y) Oҭkx [$+Z Ӿv:ZUuLmmS4h϶DYdRH7pS : $_FBUk0+^vz twyOӑY|{疠KlĐtO~r ?'XVaE>߷=j= GΥwK-"7ߞɀUwoPD}p%ˊdGMk "B@<\[C *'sPRADHRĔD%+L5AB4*R(PJ/9; `Q qdQc @bh4ej#4O_D>Eud~l7T{d\ast@m$?8a19]^?fP ץYUkbgpbYOdi{ /حOZԡ_9JAPIIJGI箙O;(Dzo;O B>E(DţJ ~!A7rs=׸JǪ4~G94]_~~?g'1mcc}5րgﻌrqjY%OR=K+5KRq>TԈď$ސ׶ʔXԐ _5 K4#IO)?,_a>doSk2 Dz~,Q*mS2vn_&a4'7Ŗ~A.v8]KiEv0fF{/^HItE `jRY"TȾ%Bya*д3R0hVc$#㾍ŒoTlipbm^BI- 1zO']nZ LƝ#aoDTH"!{Iق #%w6ϽYT @ޭRl aFlQz̈́'-HK- Tb DpoE"Bľ-hT XbUUkbVQiR^/3>a>szz6s*Q$&0w>dLiDc0j5`aC0h/wsG:W|:U9wK"O X2h'L%#\P =^R=Q"@fE<0*!Tt:!@a;R'DoVHD(3wfs:u|՘⦒#D$ 53,A2HzO%hV=9i MMGķN=5.tjベM|48g' g?5A{΃i=D ܤj^蔶c̨a;P>=@BD"(d([/HwrŨҵ۔ Cq Hn̬)"t` &>o̘  &>GTzOς HGi٢&(J@HM "royt #peG02)DLB D3G1%w}̡̞PȄS$((dԁRU7̊s[)}x7$.@xU'ex|V Aˢ'|FSm8n.<  u-̍0*S92aTXVhL2Z]DxOL9>i)uMQW[G:׍bᚷU *q~3u0-@;j=Ld(Uǿa{*u#Pm .` I*R[] 405:nTþwђ<-5=ޅ]ٺ23#G;$B<Ρs1x.H7 <o.eO) E# }TSv0}H&$~zX<+mxՋ2 NIW>&gqv1A};C!C!Lxx`wvt?6E~ ڰI-b_c)Z@o^It>)9 g]tvuPLm2;@Wf:r%L lH!/jSyyJE43 vxcP%?FmJNj"|p>> !vqI0S0j]^i#^Dy0nGv8lɪN,I{:5DxL}7Pd:u깴q>õ.QRP?/1@֒t{#GNT66!XɹOov|2swuף~z GA\ds:d&n*_o쵎p=clʯzRCrl̈.OHڜJw?X\gc_S9Uh2+-ifuF>SAO}bKCKZg}E[f_mI;`xtvX[hkobKCꔹʠfM7/=?9(0 n%pf's(IPʬȨd=O?R~@%çdo'mg4v = Y:jgYOV}wќV5 k{π}EeTND̓Tဵ"ѳT 'zҿ~RiѶ+x-iZ㛕|POy<Ò8#{wr5nCEDJF^nb觇B܌B CuNJ1g'j >MPYJ{(RF>|(BĀ@WH =<2j,8q.)ws)IS4?1=E,^hiV:07+lajDlR _zdr8+CR~YUL4TA2YO~d./Q`[?G[ /)\.R`5ᩍQaHj'=)+Z=11*%yp9%k2=FhH璂m_ t-T=,P @~j=m_tq,lEz2Ӌ@J4%Quv0n%{Dhnģ)`\Ž"gijV+oR{[|Uw-ҷY9r85ͽy14jEnieEi<@F"G  Ì}m?b$dX)pe" 2ZZD]>2-9b85')Jan{I[BJ611)PF)Xٵk [&<rK8G1 vˢlv`%E V腘&X~E23*]*P R4"P5B%x\ Vzc.nwS5wZо2D}!n-E9=&rfuʺD#/:s 3 E0_x Xq~BuC237˅HqX7{n~"P\x4Lٸj:GbsvLU&v5z\ &0=ge`#5[B!oB]&VUfKJ""R 0=I`0ۧ֒eN0}W dВ\}`~Jx~]f ndcOʷq+dCŨW.vtO ^hCgCtMC1N?w 6ېIs7u VB{sӭ+MԲ" w~V*1}nBO?J}]@ K<07u[*+$rPc07"[ǧQCQAeA7_! Щq;~q:?g{+wZ;x HD ?INTVdދy@t]R5OOF(I)UR"!P !@MB `Beeh#ڪ~^ ?:㻚xW쵻Q ?BL\HmeNlUlԈcm&jjW?@GSa/vx^%CŏGޮb_$_zDr;3{N+XېČ7Fr1'*heE# J^iAS#C0D$ssYPD;AB&(VJ "Plpډ؁ÁĨ:1,xEͨ\U%Bjv!;h$ (z8k=FUUXmQF$ 6- QEr-T0<[Xr X+Y%`1 PG@0L )!'Tm9?źL)7!D ɜנx| \/9-ދ 3ߢN~ @Q7 J@aȽW)R&j,jQtB'[ϵ'?++4$tj2DG)M.5p̅l< T~$0_׫vX&|<YM07v ~"f26W̬PP9柰EG0]CzxHKm4ѶzoRQKʢ_]X]Ce#gg4Q]KR!4\ Ɍ2/ՔMBD9Z3׭_,v _d+o΁Vǫ٘d(#rMv6.dUXϙ`}]pT/+0cpz:gjƋsYk>hPj aB41w_VQ|Jhj=yj'v#ͼLw'\ ӰIQ4lЎRWiAΙ:4HyQwmdGY'ʕ`Z%_H (< \ڠ ΆIw{x~ O\ңV"[]^%į|Q<6Rv#mjW;ԹۑWs]D&xccV^!W.w=ַAϺ9߾4fK ^K~ub^]eoq]^,_*=nb,#*QgßVtQR̭Go1|C-Z`nRRBF'u՜vjZT)ZBN ICG {Wc~ʀ_n'WMKpQFT%XI@P@$ XIo3%$&h Kň !! "((F0hRBhB$ZP$(pb(VH Aa7ZPOnهtBݲnEwi;x6sm^j YK_ ٍ#5W[yDoi 0@պj)/pç5B[U8L0P3doɪş'[t2:yhY1A *-c?8S>ҪQ ѿ ^뫲\7Z~W]xk!`Nx*"AT56 .l[|>pw3MJMeVK;jq +w{3P"L^b=@}wZE[MO.}igȤ^~Xpx*(€xkDӅ0bo _'~v8v8[ In3]k oQy`G5b9=VE gLTa-/@ZƹѸ/\_4!IJY)?"vkRUfkN&20 }`]xPQ6~FыB mT&Z?mEzBOI3=-$oվ~SӦ"` 3g\;VVrb%VvO7wԙ6nM2'O$!%Фa'2BS|zJ#_e'ixZ D"Xȯ&-dp\->w[gSF6㘘an!s-rm6I ŝt:mCyhFVbW EThE?9U)lg)19'd04+T|Hq K, h0yEu;}?"ߡL ##$n6&yw+{u-Tnr~wl8lW%TH 'ʧ){eK2e[@F,0KuUZv0`t"vSCЩ(/)W"k@Sdz#&RE"i(6%81 ;̋m_2yi\VɮӧS*ݧmDEtzB+B\*T-u[麭Mg/dD\C :>vM놉&&UE:,"ުegl{Րǯ}S><_tN5# A$EWa{pos">wNd]kPQ;6*;y\ר3dDƮep~*VKgc }Vf/qdx@tbL'^c51 Ol7_]#( b} WI?}C /P>mBEӁC @ve>CZ:j%hMeoԺՉJ3EFI<6WAT7_C2 %QSQB'̢0ɪ*`" S0BpL!h(VA L08ƍ  .a(Da(dD8f#+I BL*`1 C&& &."0&# F%+)"I R HXbaTM"(dUS@J4 ITيK2_rk6AL_3xEt _t@@A/]I໮49| O& GǬcb}EGަm4'PFjes§ږ(V$@kVB#|u ;vkCu}4Gx/$ivy3^* ٿui7Unl*)I?}P%K7vH)YBoorXmYlT|m E󐆂z Nh3B'$$LF?I>>Oב_9 [-k5Iڠ%c0™H\K@&bf!Hqo4PT E]M>  ޞQ~RԃXK0-y[ k;\WWxĬ8Wla,)#G0[(5KrjU3RE@ >rkI#$J!kb5F"mRHp8dŒ&3iLPk L>c>gMBe鵖+?JF;4Y/dm~M B9<-E `m4Gێ@qM^j!xzҩ ~lCŠ!l&(;[X;i\PxI`Xv=z+RhZe9\ZwVOZcH,A+7AvL:ðCSXie4YpSIKi*7Êτ fP-V\FWh^-o-Sf'o;/%&l `ʜ}&V aEj[jػΚT+[i/G$ j,t;kOΟ>۪\pbǷ]9g#gOzlMS5U~\cWMCW؜W$y`^)6Jկ6|1ݠ dWNJwonZ-c .&f}hyCH*/H2 Ojlu!'nmMYm7>ZSd@T_VߢZA=$OHF岽#/CUZ4E_|uumk"Lb&/[! n ^ q5۟CR4}:iBfzLnDq=//b:jKED$^NBV$lv$a'%h r8df`\Cdnކ/lj^]#9/j#jNܹ 3&>S=.D #_1 $giM;RaGǣ>~V1]CB GI)K|+=qlc2IhCc}-;eXi`H?! À &UD4K?5;AU; zKKa-곷RAQvIqz-5hLpʔbO~SI_ B79/q'"q~c}cԕ#ރk{ٷo`YM .-Ωg>p Դ#3owS= A⩎sFqLUӑ$B3̒EVM`(6$rM#tgg;g{_#s TICJx4nRSR{i~hQ-A;ɚm= ZĊn)szK@:MRRl-YG1nG8DSTV!smw͈LXQS(} FOptJH]3I#do5w SzS3&S$0|)Ͽv*=sz$Bd@9׾S 8K;6hɖH!`fp|O7;]< =!>?ϙߖעލwzI0X@@E~rWNE*[21Ņ+m+UJgQXy¯~={3\'Q_둖wZ%\}6d׆83R?bgupb%38 r+<学;o-rȴ=eSZ6~YV*Įūhi4`߲J0ז] *DLKfOev0.Sfi9g4]z~^/4k6<?Q JK6#z:ǵ7*KDeG7;m I7LXR>SOf+՗t TϙoUN /eUP4"=? I /I`\q>' eٶ~׵QMKƩ\#O>ԏw XG9nW:;siL꼽MJDȀkd1E!rVyjuY6%q:7b=zj(L"-1P!4zsKTӲm$[a$ZjNEVǶ[N\N7Z,&mPȴ1{,rX_1C~Y/bb%M3?Cc5Cl>V`FE8✨ޮHP&oh\(lΩ+;C5Puq=ЯS"/q9;"5.ʣJt̴@7lc DfY6'Vkk)qOMfyJ牾jfS+۟xoȓ(}R   4J22Dq%HԆb -=EKh>p %ƽxiO{i3f^LzYb2}4%iZvV}qQ<}@'JvtW_XzΟeYCW mq$UMG9%4:L[r)俲XZ+ಾE/fN3?+eUp+Xvjw1NVvMdnr>OShӚ0 ]ZZ-"dmiֈn/O€.[0oY(;Tb0ڋ.L+Tocg #s0X^= '/I덍]fL'\CO@$7hŢS=;qV;ID[5LH}R,J#ls‡}@?fB +h>RmXg1hh”6|}R7Hl![dOG*^!#0Hb Dϯxv_O| t;Y[2}DŘp RSz08/%0!Ƣz /~AVW]oHLT2oڹQ%'zA-h_-bRZFS -#gY pTn2HV)׮[MxFQ`Gu ^PAeO)Gڗ *OMȳ@G\ZtdFPQ)<~JA Do}<#ɼ3Em%㉅NE^֝~#M m`w@XɀCLThqYq*޵8~pHOzFXTȷ@^{1OĻ DRXoϱЏ4GQsd^`RN4RÂbEd#aUf%~\OGcHY BPQ]r4k=\M؁21/AHqqa}ayXL߲{:\[坢n\fK_?W=}cc\XכP Z/&SwYr??+->Xҳ7".6ħ[m.tXcfFR{g ,"7!c?2>[hKÍ Syw$BoA{Փ-ݽtS<9aU}\Xꕷ_A `Ĩ~&::c@ц0׃{>QQWt\uIwXDɪk//?,'`H/EO%R6UI͗\p=ʕSϘP(H@N[]-|׹`;fSQnAߐ-w62CgƉr~줽xs="XF;bD8O I@W}@51F-|^lEF?xbO{מT|xE U;`8 bf<@BeqQ^SX{e:?p(فgif0X2L{Y_O^;'iG^DS$GxTlv#!?."`6BPPmgh:CʱÀH")7[ړwdj JPB VkFWzJƧg}g;9p]x&1R=c0\kpl_v dQp;`9m晒 z1h b|okyzHYEy~z]:/MG.!]Iew^[֝2?qL[*?p?ՃG va;N>ׄ (ny|7B!u xu@@d@  9@ /{oz{Ƣ5he뢱@SX_dL/;*0xYʒ(eHw.1H8ۖ%Ώ\P㟥Tǎ:P7QkLzfn*akQ䃊qR+i=Ƭ~ӥ;VF,PS \{ CmޜMOZGV?/efD 4:yM` 5;mqǯOro\$%-tX |*YB;UY)죅vhvCkFAڢ$́? 2(h촷a"@n ;\UBU#ЂBWW Y$_Csb#O2! - v֘Ϲ.-C{Mf!e w< ns'Cˊ]tĮP 4[*tY6` tW,TsbkMFl^_3G-{L&*I:[vfWj/ħ|UC,A]?B?Xn9h22ET '^1TS*Z;9qv*- x3zZOOIB_"QHq{EwYUS780uqrQpN(7"31,SCQ<?)yÔ&'CwN\ CgC! {XRc!jr4\HT|Ss3v\sUB-KJo yG757*EN,z/LMq>2Yu{~41R+Xr)YIA+3!2x~> T7D#/r\OO[xӦ{[EyϾV/ 0}b6E7ɈB_}CGޕ%f ' Xx9o]<Gi1,]nfY$P]A,2 [I*LjYƘN$^@k o)(~.7tiSR<lA$/|oZiTQn@"OL;;A7S.+x)C/*2~g9w޴!9bSȤ)?7)1}!6-xÂ7PMfގπr`߰8n[ C:XcưU}49T=x2v̾yXy6&} "u媹k iˍ6Bg;5U<ߘgԀQ C}(F~RUh,rJWs&s>G:ܦw$B@D 7bҔ$ ~!f%tW%B?Ё`cΚd 3wY]KiɃv䉏-R ZNq\ h%bq ׶4-a\LuxAfuio?Nc 9/Fq,U7êDy[83֦Y_eQH8C49]q>Zԕ/Ɍ`\C9=$ 5^`hlsԡQEw6DjWF^?ː{ fqP̜u49-:&76> A.+mU9"I%|lkޙ m*܀H!L0ˈ4bCye?P7Rp(8+/ZqGv{C2͵ӓM_}ݍ/cE4>\Y S3%0,}L&Q$H (H ȓA|wS>b8A$b~<=!QEQa k{ #`B*?@悱Z%$^D[K_);6ॅJ9:K bdPմ *.#9e=gA3”5xTsYr$ 'FȶKS!D\\\V%3?>$x [[hTJBS~#҉( 4 Sj*USeJ^I6@@  T י@DM]v 9mK)iN)q15K R@vL@ 9M+†pbG͛6l٭q{< i h93.($Uګ>~\wklBV$fA]pՠ"dJ=hvirl*X wdJk㐒đ?WnXK@!@@nLXZû,>ÙNAg"_4Itq֗hk~IJ1¢i $ hq/jnӞw'mZReƞ85MkD}gDY[2B)J)JPJR R(RC{,2-db{$BJ$G %QsG ɂg.o=J;Nk6a o-XuL(A&7rn^aqZ$T>!r }IOMV ] K[{26&DS\tv"(OZeZvy$S`` BTm1%bMƒ+qMgvgIm4C d@ T&/p`ӗ1&ft%4b]~I=7뷐??dG1lDVCMn1b=n56Vg(fZ^E=MۼGek~cӃ/{k .7bR]VWkD*OiH5fὲX?{je<wtjkdp9ߕ!oЗs^LYBA5^tav5H[ Eԩ~ع%4uV,elEiG'Ä#p:^s: zm ]2f^h=dýj4U;]s/@ d(F }$>`S!?Xz""0d22`YzcA:jU>otX/.֩^c[z?  C!#Bl-$2IUՌtbmyO)s9PWl0k8Ƥ9ӧNML,tw;y^<އF1ɋqֵkd5h9~fhrj޵55550}~+]uպXAb\&V XW[,~Xmnn;pMm9]̏ &98iZGYO / RZB@p 6oEJ -v~zvY$c1}ٶS2iqIP>fM?AJ3_U0|A9} 33337~fH^v?L)yEr#"B{vEgBc_wY_P/>s-Y?Gg/W3 VуԝC;6 ,`m XsG|9'].^i&öaKOJsp}ez,5)_'#۽>oZ%t;GG,ӏpPAӑ]vi,Q[Am$Ǫ`PKуWM*R7X:yoG#+>@Sܽ9a.@\> @E`O! v/|fD#o}@DwT/yRF1 &d*/bq![X2AYƼ]s Xgb#xN|(zߤd!dS@)%h Kb*H 3faAF 1KL+3p{nPF+ (6>_6X?_ce gA<A߱>>8ض H%- N A:=PP PnK.О1 M_ǰ(.&>[jyHMk+n`@COTVߟs&md"DA`-,0 #y0ْK _ D3\sɮdxmF-7(Q4$D" >d(`YkkEQ;L|bum|/:PTs^ME2 ׾JS}*R4;@AYq Ec2Nmb|+nJt=A82`XA^M:t:šSāE |[ݗ/>jFR1ID f'~u`"F=JT~HW;Jً%Q=6Uf95ҏT6dו@hÚnl~TP S*Ka2NHs~DlI$f} mvC.nBPg_+!fٻI?E̤?CR4ffHbJ'-aFII.6;OErT:Źܔj;roӺ 4{r˰THLx8HS^1}BXd1}(z z6L%HoAX ^6<F񹟗?NāGRh{zh, P{RN&FoK)k 1H Bu8S . A2fpn'EX[w=h/p5[ tc[|k %2Qià[.B7/O+ȊWŔ2ʱ@C>iGbqx2HC?x-{kJXx f"C16mS.asSILҡrb_Pnvϸ1?9Cf45OBP n{mNsM ><3Z9'8UE3K5b"p c{8R|뀷CT{1++Íy%Nm5k">؞lu\lO_+qq Po@iUR y/4=3#;oN5 |V 3Z ¶\4<:UA@Pnޯe_yԘIDzoڻѼЇ7 CxK=;pn|pL2aͣ _w.Ao酾aHV J7+EeK}z{qE Ùu@-I^:8.0vHW?3?"FL0U ? %QĞ%`$z8y,>˧c<]}/YR Ǿ܋VjJ@#qlAf"F=/%RA "aO7^QzqATSZ߯`Jn*iJj*Ol+o#y-Wbo.>$[I|C |Z ,(0 3]S@?^q}(:jTO,cF(b,Nd㫄)W%uN 9}@> H!yuY1 NAcN_k[ux||6^'hGV!Tͼ)lGU~*a*,eHtL/=:/6BCKԺrD4YHrQQ&) (ױpRc[5|Up>*U'`dE0-^⍯}-U$XR%0|f9"Tz9bn7Wб|%|)zY޼6ej5.|ᕎ<^x\ش|}suO.wAp߁_j17 O^qDBt@ <~8+:20`D㞟H]GGL2)rܩ7Jۗovq?ryumz(tBN:Q* 6s(CÀ''r~Z#/i.uA * (H :P(Pb6kx."zp@Ƿp n{iM0>k_Cs3>Cz|͔MGߚgA"ϭ69|Od9Q_{e;=VE0Rz"eZ_ɂiNa8E@!A# Td1 tF֞/H^> lf(rMT.9E3EF|7}9J ~`^~I]\+/BI(Qͮ^Cܒ V#gUY#y~ǘ7Gܸ!yaW'dx*Mێ{FB;+?l0Mfy=?gssaXc0qix" R` @Lð])s~&% OdLLg{1=^gY.!؀dh!zj-ՓVabͽJnXy a=_*NƍrgfؚS,s yJ) pDX0>YEf3 +@'{xO%a \AVwMe1\ P"`L`ڧ 79JrD< ,0 zw;z[pk Kekқ#|=KZ_Zo.BWETw}%æ򼤃`EQ TrVi Tpȩp#D$W4lch'EUԪ QEa)J H&q Yʙ-``WY R1hV@^*r&TH(hml_bH ?#qpgF=_r32C s`?=F%V4:ue9)Cp80mBGT}¤xЬoGB?ECM ՐR@I-xdrxÈiXGkU4(i eqjSB}iNOlc=dUx<`ߣ`UPb-C iOs۲Zk) 2j.jCmzcf9oܼ. ^~yxb=$ձ1 (=4Q_0GT8oBsUf^hnO߶xz$DikMq^B!<@"Ao@ ݷZsB?jQםj{#WGτ?naj0 BdEsӵz#+=ؾ(R'?i "`. ?G_M5;ka :> 7g743w+J)kn&IĀ}I, xC'2\bbƖf28| G.~Cw/*c!^<4{Ԭ0tNɾ\&x"WSIJ& 75[3do/7&"H 5 ꢼF:t=$W0Y]U1?-S'?.(A:5OgH!+<;4f6]9 )^Q(n:2#Dx  H\<ƉB*F٧妱̤ZA>["a y}gv?]UcrT@kU = OC r`A0dIA-[UkٛRWHQְ|+r~q>잋__+whoAdSB ({bgR!/cU9 B")yEvPD 'I|:a_2 JY}ֶu\B<*gg=d#U.欬^ڵ:٩A`mD=NTVgR~Y| @ AiF4J27\4s=9IWfj^KQ TTMHzTR[T{ӘQq1!ȕ'dcQ#']lyR\"w,^s'"4B^X W4|%4L-Q`麙ZA &`NFF}Ged[^9ܥ vM N8o1طxwl~V`lPeXP{矬W 4*ҧy9gdv~VL, OlŢ=V3a?D1}^a[|\?BLe` o `М$7OIU=5<x?;!4¹D(. _s J+u ,5]R6P5̾)>%:Ɋmj>,W2(jqUM"RErյm#0j׻{U;Yۢ˨DMp,D2dftavӳD+ǢrqɺH.`YƄc3и jVBdyC:agiF]ҳzx)4ꫯw 58ݬm{~|Ǵp/mڮ.]J[U+[F[KD ߷ܰ-Lv5imNf~ؿSIJX'xhPjn-jK̅ڛsuhwzqul ͆T߆b3Y tKœ|yt9CbRe$8>>WnG8ξv;~ 1i&|3,woBm؛R4EcbGT Wf0 pW`ZfU+[c k -aZޟjkoV;}|3_M;L_%u }&l8Z[Xb4iw<ʔ,׫uZ_ٺݏ_hZ1\^7i31C Yu2ݕaY~(z9v2o%`nq!$ t A^\ -6cRV.ӎ**lpWѷ(eu3lL,d Ay9@b*҃yTm@<ܓ`nWOFD@bM1NB/O,1R">n ?@ *>ہK3Jt0$P5,޿Bf?[P\u:@o5(gak=lvY\+NRWa\( ӌEӁ2$dE|FoZMX}{Wn$W+pFw %irvWq1y<ͫ7#׹/,eM Ga(4:>XOi_5h9UwPe6%͈BBZjUO uG 7w/&,5IYCn #lcSįllC/ǰrѝDvf* tQii0BtG&U,_v^D,=|?wQm4Ran B`b" f$$~QZUZ PU0}}ksajvՁ/t̥(ȶOUw+R`Sy>"Me7T5!C-vH`&e$GE2_t-KDʞp 9"euc6tKВ-G: mF %M,`>w8'<[}oyԑ%4ڱVCOqK TT8EActP:irC QP^0?g^d u[%e[}ˆPU;h @B 5 wW BJ{ΚTU"{;d).<IvI&Be?)~>߸\kYجZΤ  /زNS{29YclwCCRYK~~&v7mC]v &^L$!<TNGg7Q &P\PjʻO@Eh' jxA:PX)EiJ5f5*py"Oͱ[l~zMKM4N@S̶w&֚)n#y4n_9T#I$͂y3Y#^ۮd4OTUqLߣc[ m,͐A/Aꇌ@;.H, |N?k[CAJ6=}*[';B}#o9%#O &oQ3"봐źiT9?2ce9]r{!*yęƄQa2ה+[0gPe=$T-xv9  q{ߚɔk_2S¤z>mhubAc&, SEŽToWr(5-et5W k\2z{NY{@fMm={Gx{oߌ6v~aDBd@G9f$ͪzOgE:SoJ3~ޖ5`] )^;:74Cڻ.g-`A4 Nu\$W6dn#cv-՗ID5A{@e3izgBT5Oy=oi{&=F n,@^-$Gs+{Iά!uΊ㰱f'MH`fx@ǃ.iȹJg;~1+p׃+qwNͳfKT?˗y[c/@ddj mLjMv6,{}T8lRA&747 /{˦j`&c#sR-cl%:{롿V..FASjwJXkUP8lov 8(F< Dx%zV>%S|:A`%]9x,moӮlŏ)ˊ0D3"W'> +0B_G1 ']".vn+f-OOk9t!Vܦu&MZw}ɹ4뷷2ߗV6tF\! SC>*d;ib\Ƌj>G&tn8@WtRɐ;qQu@b[Ͻ\64 )0$-Y62\LH9ƈCBB<'gQ7t6xAg' g=+$qG(zh}30QY^-ݜ9rv$s4OM- udѽnjO(5x0<T0w '9 NTk:e&ɝxn};yYQ6(\soSAw|e!2aa8$roА\:mC "Y S@^q?n`&_o<,SM⊯`8NY"  sB@*YV0*ACs3:bezT!@sL*@:#~Y`콿Е >v~譱}*c+Wߋ1m<:r%A5{{td<:lކ_c46%/`{{k$E)o.t,8xRؖ;x;{F^罽_+5D]C(DuL'rQ'Vv1nh_Ø}ˌC!/$?._C{zqk(}>G4JΆO߉{D \d+)"a@P2^t(GP4@1_bUFi%fD?sRGO:?x\ܻ8"ü+=?'ٮ:èJ6Q&h۰mX) #\w;:Uj\01Ѧ|Q#d6 pHxOvABmgHp<ݎ_EkfwF#Uŧ1lYErחE@WN {J[sE `z!d -ս nf5oO/M3JptϞ@e8L[&mnPq6ylah+?4pM@|sYO%ĶQ?+Q{m.՝E/2ٖ%|X@JFzDg{>4x4iӨ*W9@^%ZӔoʘ<^>cta4PyoKX"1Zq^R t\N=7ձR1EJrpm{8Ҹ ߡچtsj̯sc[}}ٗp&#aW v7# -7:;"K >0'>;~!qp|]7Qӆw׿gk3&}R dB;L%HHH8=_:SA@nuYe^o:};UH[3JdHM\aDtD_~NSF2bh$(ّ [s\<6{g02Wk*Tr:&F?}L1jן.|`ft3MکDŽ*ua]`Dr,QQթa(כf)]@:Bޥ@19'>;ڐ[e{*KcUcDLQu!eg%ȴvMz2w{Qk%RYSFV }H:]Y"IHa$Op.s5G#z'Ef \W{ @BpOK֩.<19iivǥD~3m_ +_eJ|[DW W+PP5hWtp[KOhlݱե90rko>+k,!A=N#?nN&C7*_e>ÁD62 վJ-fԽ)OsZh/d f2EèVVld+Mtߜ1ߊ]qɡLj9tM1"No8[Xm+U "?Fy]iSF}&:i@NdxnW2L$'Ӱ { 4ʓtF]$[#tc r F#:AQ$j('e4IOk߈za 8"M$ P  sq3Cyfy89F6C[TJ9hN3ęwv3çYM+ 9OaV^-I dV\)Rѳ F11Pxn&?DǠ?_)dcXUmN+iyW rɮkbnNpa&6=hze:Źk a?6i1 Utk/{OeDdLy3ĸ]Da5&^SU¼Kl C_.ӋbT~12 {,e՛*w2x(~?6&욭0L71; \Y-"}[r +u9760S\͛)wz;q@5fff; ~ 1nh%IcrkE4չu7|us\{3k(0|Oُ2Nk5_"Jj8w@cF[<|?xW5"WmE:5@y? ;Vɠ_@G -~?09?rA6P_) `Os| (n :LC`/D (t8/.D$Y$Y]D2B0v̀2qhD;K+=_ֵoc=[+kAh" )D}m+A "Umi6~s[z^?n\J>qj(=BZBq= S{ zOSAq/m(4"x0eRED4!;\{d2ȕp/׌'`5h}!&ÿ3l`1d $|xi,@2nF+rd\P-9P~'j=4ޖ5 R#u ><Ea{W9 󆯾Cs> 1@o_)Wq?who@9;kw~9ͤcrP^O#}zomNfY?."d R4LC"uFː"A"nZP"1h-m.ykRh`QFO "Ӷne:7,ē'ԠC43ʍ"  +]7|> 3/zt;`idl8_^e:K[FŮI&/]g.4edֱ O[k AAɰ54E"c]+XAL3hv\VKH xPy ɓ&5s:q략>o 0%6+sx$jdƋ!d&}k_Ur7^~Ω$}7S'_RƠIaX?_w=U>$BēlKhDh|̾NOBx /Qnw5= m+mPhTg[q[y\Wfsu_Wk*'wZ?*F[zK)j"|@a6ᘡ /  `cvm?d)юuܞXNi*bw>X@">ܿjv.qͧ-{؟ٹ65l?3d!ii؈.!V_nJg"ѽ-k[t3? Q&+Po:BQwMtnn[wGqW`riOg^e y=cݤ4*_|Au:O m:7݂`VIECPHe|DzYbk.! 4C2/zN2ԜmD^F&|'t*<\hD[t+N*x.E~;L *Nl~Ӣ#O>pYǺ[HIW2;jJXen#3kZ2?1GRm{վxw{n_E" F 00{%# !uԭ =k Zb.A`@}'?iD.KAޓ4Qͣza-(U|7[' {jYwQa ~M<$e){S[,*i&RK:T+o9~ Ylo U*G-3]̥ޣ?xA'&axqi+w}DT{hFkt_d.a83nzR.zmqсjC6x*dq)'K"=-X=F!)7ucdf %SI?C4[0{eMݠ9NA#mcX}çCL 8 yY =&LLp>!pAp@VYewAX -{7J@}1 ycwl+U>j7juxJOӁ43HDƅC,A ._n"Yl$eWr*aF]7sXEh ʓFOɿp๋U8wm,w[';}+Њ$eFIC+vzEЪ5têm({&B*N [b N "йK\k#|f@'%I`p0B|UK \9EA. SDՇ <ϘGx]Ef99Fvk{),~=mv]mo Z=.eF;;94d__%ϒO>hztN 72d 0P9ϗ*Q;u,O"tq_|7?a_^<?G3d 7@/6ߍ}+h,1 ((O'dgiovA{+'x[=II,>ߤA=11p%0fJ v}pm(+vVP#u,dVI1PNFRT_i:OX!AEX@_39p3{m Vq^Kn@;љbx ' ӆAs(i(oE!Ch^q&V gşd79ɟP֙r>˓t DOi$N>ăqlH`/Cu)Y^ra~Q)9ns"Ī6as1sn5Yʫؓj⳾hߧ;؝;RdzƘ?*KĮ5cz5Q|=\rUo~/k YE{/ڂŶ1~ xCU5Ytf~7iNux\Kߐ|щ66z0uaA(. "Yl;{:a@#a fKV|L0Q-± !r61,/jAԧ. c.c]޿: sRX-b鄩:׉ Y  4%JߴճJA2xwe6y>kߌ oZќ@clOZ+=xb61Шquwr5-)bF>IM]ѿ7Fti0.oMTH²ޭ3jwAdUon妭wWG- nixtL+鑑09Vhs,Ƈgj)³fg8- E!rg2>w-xӚ~?ee:%;+5˔8W l"'9yaKM1(ʁ8')@ C<^k>NVf1T8S ͊ޖ ƺ7ڌjΪ=WP[ۇ8m[q6O0E͠}eNr!'\ ùD҅TE. )$cb%S + &X6GQ^?vRˋ3#8 7wMۃ c N]@b!ɱk҆[Yۈ_Vؽ$G$M45=cJNJ+YSU[n } l0/R5ԇ<~Cº_"L3;Z2^*>HX.æu-,8>|^̙u/wvýua 3TO:!mQ}/+׵l/ k׀=tӁVl^EoۘV` j\6K¡0T1Fi[;A81fan`}l x[2$e!LR_@!hCP-@G勚K~ǪH#~~*-d O+NB1_GxfMΙ [knÖ}MiX*[&õԫ0^mtוƑcRKZ;kG=1_~=/6sVYn^_B~^Uk:ҥRu ϖl*17UWQ~u-eL/%>k=lq }^}ZUsWdIT5U'ϖK$Uxjaci/yu.wrlCTNV8z~\?=xpp?j`E$]"_eMАY7A[ cg1G:pPH@XO_]Swɽj]5RwZvVBaNkeڑ3LyHU O‘3}11t%JQ Ϝ_-Hm)ׯwM7wɟ.m_zP#K9tR0;eJnb/ف+v8q׫Z5pD.sܱCz]zLh=n)y5$EYpH{Vt$F0żS+Yi}\SV",W t=G ZI~0(1"4hζ+e=eTU_jTB}sj>(82 6 & Mc3`>[*l[6m"ιq_&^}?>(cU58mZwujU!/8%^;Huɥ 29-1Bz~OKmIAIIJ2삩il\!Rϯ WT~5RU'h#"*r{nus!;ڒׄzGE(Vm/x2L;wa ~(_ߒKDo=,!Hi@=hR{0r=h_5%/,/^ᠸ=wwgIʸ|dS`Q]@7En:AfsHx /Z/un%}d[ ?k{3HNgQ<>KBʻzhbҌc:49mTq#e']6tknԛFf9豇3Ůچ'JZ(^atnվcJ7h}3>_ѦMD[4.ke;׹gk(~SnQfUд6`96G۬~ʭ6+]k-{i{4NxRBaYe2kvӅVNsjQK93ľ37-%IpDbS#V<%w4qX"^xN6c[9AF&.c\&>Gi(@+pt|(E_G_mA5w%L:;lgfyŝ"ŞV79pO(mw0.BHctɾ&gű4TAs,z.?lW1(vE7mR&7"ְgGtƜV?<8vd.nck,2i>grg#P*4w`{7\P) VLn;/xD6s܏niIU{'"/ ַ8tM3l{U Q7NLd@gϢ׻CךXbBx b$Pȉ>a_dB7mM@hv ]8={#uF5q!^s:{4pplCU?ڟ>Q5C= ~[DKHDPۚ\%=)i!Nu)(b߄ߛ$΄xUCm>\C|@$HE+D U|$P"@n[=O;Y óUrNoeM+nl^% x݂ͅT=MgC =١:3)AzHb =z5i[ϻ,9 ~C^(n7fBӧVSG19>)15;{r~$Z?Ct\c ktN,xpl%me3}0*> *ZyșD]~!taLxYӹaY0r{"INw|L1Pα;a5"q5_~_*>܋Mfmwlھ]'C.X~fmk?kĀԐt+@րM1і(8&Ohq&){ݬIB%;eP!"Fӄ")JҀ7Ktj1D$rg̠}@8+C;jKxPY/E'zUgQ{{?&lm&LElq u8Ƙ?TIGVFzKMO;{3o:4Il.\#?=I)e7ؗWdMJNUO=G˵O9+D~`7+nA<aʄ0g%;g. 'r 42a߁r+ya2y,lI,ƃo rUs`s8ia+ʐԈ䞚ɍUVWS| V[>=Ku#v6.opt6Lү :bKzdU> S1".Zuz,~*ycl=ihtb5v-:A\-㣜e*VÏx;V,"U&P29uI@e:FN}WBJs5ԏҀ~p$@^`Ѕ0g،r]!J'Z 1^u\)֠zuOQbnVvD AXljws? ˝-uJ=RoH]#&a>_"s\ZWS{ }>7# !yB;"XWtͤ %PdՒe_F.lv'Yk6СU燘%ycRAڛ{Eq%t?N*03"_' XPM]np{,crJbZH^#?V1%iG׭B8Xj$ ]|5XY,Jcܨ5f}y K.  '1HcBh4iKA7[n>?o?ErM[jϦf]3\〻׆tuˡY_+hT~F@z$?6 Y~Ιg=1Pn>m./]`0 hEII6G9 XU3//[~45:,%O8"a>eq!*hrvk7 v|*{h}m]g#b!gS< iz?NG{LGt" T6d|Φ.oDjOxzuN^<"=Bn]ޅVce P I<7ů )0#QAr'@d`XTd쓁f1)Ѿok\b/"z3KR9_h|.j|>hӽNcƾG"NꈃHE~"!NjZS1/5".\7S@CJ9[Iy;"M9ؚs^iވXSߙp:мYGCُnRmSL_D{Qh赦iQ-|k$.ͺgcجEG+-O 㰊`6.h~ڳh=p2#06ΛѠ d .O^89\p09CC󾐪u^:-r9:%1b8=>=]fzP/GjpudfuLVl+j v}DICA2y_U$> Lz 5Px(C\$ô5C>I}}x73 X2Zl?Xra9Sh=迩Fw[ $պ&MF%ǟ MBg(|aӱjCJFoE8pJ)=s?&|h-ڔ=nJB宒:WM|~c2d_ubz83H?3~fk}.̏ˤdUOt c;tUBHSm>5NA؀ `~l^6kZi5YyЕ#eً_>;!`Dxt\u7 +c20_iLRL|wf`WH6S>};e}Hjq-p(Fƫ1ɱھ ĔC(B5udR[sp\ҥ;wN֋}o}TB1-cv 1t,7IAX\d=cC!i@X}[ tn KJ@o* {mPX iƆ~5DXsO - Y9/= L%o Q$iZdnBO[`ZsQ=ǙGJ:(gkV,KH\ڡibO a9BZ9{=RQ%FX<,WHR*Gθ9WbKۇ]QC$[Qaj?&"*]VY4c6,!3{s ^'qB~ Sxl~eE7Y9׉t8Cv@ʞ q4ug?r?MzJV8bB ` Pzz9N/c`nRL̇aڄ˿ Nq\=OtJ>f}f*;fc40SHᱬ1'A < %_h:+f!Z íYXqaU iB{ydz4q]XャDb8K FR7|;00G/՚u|gkH%X6=-S~,kݓC#=_ӪL6ֻ6S,B}R[)K Î4}4u׺ҹH@+[N&Eӄ9' IXKt g ~j&Jq^Q'U;ssB@tMNJ xdMՐ^+.F!.%cԀWFAѼ@btnXO !Lz(Ȭ䇱U=j?QwAFN')kݒ ry rV"J x9xj *[tBI6lq PMPnN ["3}cųb[CÌ\w.|RG/EJη)M?3j5&|)X3R:^o\^4jBaXIf}¬ZxA.򤚄=Y8,nv8o#%NZ534uE 7"@\򯉮E9Aߕ-9UaӠ= mC'F5%?+FL܌gn?* +h]f| * 1(\Kg뽂::{63:#)u3Cߐׁpf+7=[ ^wT-m9>ZkđG`sd{'@B$O3 )Ne#Ƙ'xt|o`ln yn `6^G OB0ca1ܱ` W%Z"g/#ڴ;,)b-&;sDf3|:L'@6KAM \;"֡gYZڻZ EzqfZ \ںWrX,S'wV}cjy^{!| aCGGEaTA]weǣ`E*:Փ8[Sn-:FESf!҃/q>i~ۆBϼH7VvZGH{Kxa[+_ 57#%m vmh`󣚇̲h]sGNHʟGӢ~ReP^t#`cU'r5#()" ]1#K>zoA}gw>%|ďx`EZCD A?߸R7o-Qʏq^ޞXzu⩨i&K lB Kh,s#A>bA9ר+\_Pn;%a0ǜQxо+3V_w8ÃΟU b/B}Fa&3 s\=vNq+v`%j0=ߊL$:?7U͸9רEgi{շB fcYztO '+>_ZD<ݷ0-=b"s_& /NMѲ}"3Lmؾ8nbpw KkxKvQ74ߑO|_v,Hщy6plSTz pOzte΋:/䯰o ,Di'|D7j7>Qx"8υ7KIOSf^H \ d@i S8H FJ}_-p w|É޿%DVU]Q߯4Hc@Ͳ U` |}HuU~Z%w\I $LD4=yC}g98]u8QI'@%cbOu OB+or\ ˁT9ZgZ18| i;"76){Gbcd{SQ C: &0(i%fbuVj-0/&x(3E⏪Zwk.OZEWjݬj*xu?of0>m*͛Jp@ao_'̲ PC˛1* 'y֧M>a*~3fj`թ H0~U7"a:,|opRE^Zc=kőRU^;(|aeRnҟ`MS1cwd򬝀-l襬UB3Sy yrߢb޿}1gd['4Ud轰#KuY,O|rrJ7'7]ճμ%jOz4䷕ ꫙: t@0 <{Ǖ.] 95Md ;(2$}_Q"A&: Q?M%˾.s~d=b}V"< FvL},)#!;m-ߑxWn-(D8d UGJdvFᙨCӏ/JW]TP`h Y>׍ o"id! h6\C1ۣ-8L3eO*)UAe&xDU{X?H\ŷWL{V࿮˧Fkv` L`q˧d$ θpDHo!y8NFBrBڧ p2{'{R/r?ny|m05,bIMbio|f%}1Y uWi9ؐdQ{ NwE(8EʢGB DC}rAH!ni]Ļ_V}*xx % #*> 0%ɘǑ28Ntz.K!PPA(ZdsgbZL~= nC9{m6_Bb b7j>Aشdckz%blׇ tS~ղeļx! w Àfm3F9&p#\xQ9Bp}i8<'}QsVC43/4ꆫ)%@cWZ2JrB҃rfuueKM{UFJRhL`0`!K%Dt1ywm[I\Rr>J=7C`%^PV<ӹq?lP|(wWA3=VU8"q fB 3/*F:0``J$}c[@oV]!28]Z@\CzqA+Om9A%rh7'k'EZGiۑ<DI0ˆo#DHHA _˝'g Þ+W V_^+47q4tL!YvQ=.]3OxKݔIH9P.6+Z4FP?rK篜uY"н=M_E?V<5}**dyۿҙI8#<'%}BEߠvVZ ݐJf#բкC ,tWJ\6FP!AV023@PP7a^I @}Gs5K 5EE ~ހ oQ`i9bF(%{:JM(EawuL83vEڄߝ/$.={y5=/uy|`4S=7+O+ER44O<qms6ͱ}?}rzaP$y)B$LB!> #6<׫pnΣM$8rwx=bh5/'uւGyU{&HZjo7ȜR}4ю!|f a+ű6A_=)EMG<4mG~2qd:/ R n˖ t|$sDpt ln;&9c:An;[3%vHF(#0MY\wo8/!NϏHrH~SyZhP~0 ۹ߒH?nj v?=uvpy0I7&#xr˟162klZj&5 ՙaoԌǫ B @jPm tH`Wm]}^o;vi%weѰR_T&|Эt2@SNu8Cx%M!LtFey:cKE@")O01L$W\ #BF% + ц⌝ǰӝbҏ/^_XQS  1*T=R0̗C`'HV8ř.% x1{qHgGOedr0H=/;Ps^x0IehMmfфUDD|'FsqN,3rL(oWϯv+܉ZkiS&~ZbC݊'!'p/xPd3 !We` }B;-~'.itxg~}3ВDRN@wMs@L v!U]o8v*wMp /,L[/7Sqnxo폘Rfl{ymiՐ"0{#;9 ɃrP?k|F@ ɩ&KoC1y3iԹ;ۈ>pۄ!/OtuKxq5Ume=ڈAg#p2Th2noggQK\,QǪ߿oey lWI@Zƪ;[]ZtAsJMm(G D_ZF0s4Ë869 Heg:MzQ gL?KbPA?ߖiY׋e8=?m~Sl?˄7"hv,@,e9eJސ'ccgm9,#%j^>/< _P.^B\Mf o_K5DO5wpWpc .=um(&8O9&[b9ן9c~,A#D =\E^HD&O9fe}9u M2$d7И%UUֶ@ 5&̌ &1ĕ6J-?nNTk ^AQԡũ1P b&BF_Yi>⾾CKJ7j|Ǐdg/o:$|vR izyEC&7K ӉM[]>C.-aIj)s". q!w|vKZU361<38~O@q6u, 27݅{R 96#&ow@& Wv3P| grք\/O(Gv%"].G:nY-C]۪SjjW}EYЭRA{;:ή?7ߍLfwSO2L̻pGe$X~=< ;&1di9X-ql7(ϟ#7p[-P EyvJLYŖ5i.\'ZB@b_#]f^Z!Mi9ƚ4sbŒ"@г^`u% ǜx>[AJ6X) v@ a,UZsrnѮ^y L>;@U,̃=O B!C=2D$JJ B{xo+l58Jnn Onw5_ iQ|`! '^B A@PxSz{VeGf}bU;e-%7 4@2/aAՋ*f}ɄQ" 즛G~%T7O~KL!X^gjMR iuj */׶Rd8>'ѽK_[?QC4tTZiq;OٔJa߂r1?%؂9oikB-i}UMVF1i-o&Vp8]›gܴXZ!Rk~I.3`G}5iA!yoG^B9nm37+9 $a,o3kџ1:}BX?/¤5Nk7Cş_ oMT>د f'@> 3 P03gg>y&%*nǑ﷚(.Z|V՚OB)CժT*m[P]+Eu_Zn}Dok3^0*Gjԣ^ygxpE"HV-Y$$zPov Ucm|Ŏ ]O*Ei|IWsL"9kA{$B/M9+\hu~0"A ȼIRM4^W73G0\8'?Zl{ұ <)=Kn Xc?dQY_̭#Y.+3MЅ| 3[1ቾv}LLj7>tmew+{r^=/ ٤?m=v|~N)*q áA# =Pev v۟էyδLE7Vqt "j|v͍וpEَ;~q@\*?(X:Fǥ*5]W8,ʪ2n̵]#mH3W)m\T|e{b[-xѺ[j/r#NV sň <[+20^6zҙwr?utdID*fgeuyW'4voO)l)[!v{J ,HXK{w{~90b:Cu9z3&մWX et3ILT ;>Sb7vT9$T6Qx|?:+T G$4 |*tDᒐcRHMf 7".ĕR Et}c۶|ƍw@B3nU m#Bjom~Dt-?p|ݻB"0`1}(-<曃)]ާX&>'K ωE?Mq"iz;nphh@2ʦ5z'p{ m}cc2&M ULJQN8W=yuwt(L g''=GR0n6o5^_ 6!8 4$MxMcq,Βf@lRg@T靖N3kAao|]+|I)TQ̏%GH>fKҩg_}J};_NNW!Mp0WAj| ܺ@FRA6*dCzKҶ]%.6r ].iPN߇s>W5?gD䃒!ڛu#.#w]~m@G"gbs'5./X~Gmn^;X>1ڳ^`޾7m_p+*C&QXG5CgesӜ{`-۾4{ U :/1 kllgM`du1{o %>^_@qiU I;w/KQRLt!M=p"ˏЉ)h?3`B>TEK<:9I05c`}U[Z+P;GNBAf́`j(ׯ u`o]ϡd-FNFᤥ!lX Y&}d H \;ۆNOMȥxvVA]?~1\&|` 6 &Lұkg=@KP"7h^ԙOm<${!8/(1&8GmĦ4^H߫iqø@ $cE&$Ek+ vZmIt*; X  |3oջ[?X1e j$ջ![qQ?[NS"R1"Ӣbd28S8'b?6‡J oc'b lpr\t7}J۹zğbi'&Rpg?\ nBhVw"2Ed58 (WRX10? M>]%SemLL'RS˶7K}TF>4d/CekK7uTd[h0o |nʚBp&yݚ>5j8Vw= UH Yg-Y8"7,ׇmrwd u xNжCsc*%gj'_i$.s_{|ޫHebV1Yvםj+PU11ȏmkoP 4j(܂vgeHm )+0GTͬfw|ÌP,LQAV!7$S,y5kpҌ\+W̎Paj)js7)q`!qvz@q+!H~nߏ&>jn& RWf .Lf>yp!lszkX F}4/"!ߗ[,b"O-mwW8ى/\($?/p7%'Z9O:Ȳg-ے$PԀ#$}oЪδ )%kY 5ൢE"Q֔Ouhg aDE"c݂Bj< S`*z }̵̋Q@{øu0yuqJOr d {$AI@0dD 5{s3~A:yZt!鯲wDWm ʟ=WŃ&/$!miU?yG+o4> M%.y*\sNr\n|bAWׯq"~˟m(0`/I;ȟuP`!0sƋ^(0-@dw ':#j\s*!sۃCR%3?as X9U'UZdpaiI l6ׁb¹G{1+wǫ`P?ķӝ}I!0.֤HG*o+!2EH,á}CZXHJfn] Pfrl^=+qQ墝ِ+ƈ$Dkƺgx.6 In<¢Oȗ]k~YX'/~sU O`\3i>ZSHXb Y0@ABn>gR%cox.M`jw1zt#;paY%E򹩓` %ơ o%ld\F AtD[%I-*֍% )wۛRF}FR=(ݙVތ ~813ٽ82Pa`!P֓<ݶg?ZY;VVJ3EЂL[{Ŧ`WKHs?7R?nn'Qj|C"1J^l2JvzLӂx߻Cؾ"dy]TQy?$kQPx aֲ3E1 4]˺߉" i$ϴn>{iHެxⱤ7 J:3KN*u~:ur\oLfcszm! [|*:GXE\{z݋"qNq. .QȺ,'^fa8R0R 0#vW?晾\֓^7U=x'A3=kcOopz1Cnck0U ",:e,FD@c6'FJTZt/_#DCE߅uA LMۃ5"<6@6P#&S#T)4I$vmaU۩G$ KԆS*{g[VB=A׳{DOa]ܗgjj- 6ܜ%()DJP R)@)J"-"X͡lT\ggݘ?zhQWSLynq,0Wgms}ye 宁\Y,݄&f^ ,@gc]jDzYXrWl?5IUk;BQIŅ+qOt>)h\?,{ۓ*}g[4U<&yu>5]To  ٨H0 P$ң,h'oA,LA`F !&캸 ̀€W 6( 'z :HCϦ4>~-Ѻ]á}ыBm 0o|3}O/>ZvV}6Aqqqqa}q| 0000,X]!Qs,XbDTRJoRJ;{N[bbbbbbu=oofn?Ǎ\v[xW/ L{=[fIg_ 6aDr%x ~efjl @EvZ=7zj~Jdo@Tt#m؄)|]+ 1kd7鑪ՁZrخPH114Pm4A /Ye5=H<%Փnhlf:sVwg(󾴶@7.0yuPE|ݗ'wD-5!Ԕg"{tfl({3I1&aDo|+ ݦ) py"H$. 6%s?"9@5bXm/ZS~o.OxZ 0N`L7+G_w,_f|'NxΈ|I9xc3 7::.bmG^#x` QϤݡ A/SGq%yT+Grӊ;rjKLB)-$,0]~xV;lA߱br>ۤ<8dNO_&Z|lkk"?yL+gU SP#Q~CbL~$:eBB ./?C:0MO@q@I= :mo+÷5J˛FCBmSB|#ßa=&J2^dyyz l^U qq/!9&YZݞ  Lެ\r!3Z_` gL&o7OЛ-CdVW}w#l<:Y!˒O86U^OO1 vBytY_E0_D):|崒tu1.A|7ݭ; )ATmbzzWLy+h{]xWYZQwh烺4Z04fWAfzF9cz6B-1fEXD}RId!jX2F)|J,Jh|PN(B<*S xR6CX̡aYۍ"_X ]QW(깃=;yU)msNr?[7ONKe$?pY͑'D@Lf`* /1r-.2aږ25m[HRTLe)wZmo:fHJ0JqfJS<gI[dB5ڂۛlkW#5n)]20@5t/\TO/[\֯uCV1c3(KQtO[o:iPxjޮOv&NnBbfJ3gߟV`1QèoX0dj_:-@4)Lm$x=uk}m-@cIz $t%RCEUjS}@pXR#9]c1Bny`}}#aKJa -=2ba|^^k3a5vuj<aX<.Ri> f-hB%&NWq Y0y$ Ľiv!{{Q65 28F$^SzUYִd<Pj$XZO k $=DHlpGTH-k79| SjMR7Rj,^ Ÿe }|g~pe]b# E x0_=~cj]>$P;R|߷NcX4 %y ) ]=~S#D;"ҞꚃPKge1NSֳS7dF ~<ԟc[?G/4F|Yav\"Z֮Rzo#k g2U;E!`xCG VhuVOMD _@̗2ըwY*g l,%gTŘ4Y!˥d>=fwCӊ/=aE&AD8!tg} ~NDb")'C< ðna47_.ӱ V WA, ! 6ϟ9JF>=] Q, 704Fc Tlq-u,2PoˊB+JU–Z 1,IC rNqA,/gJ-vǺ*t S.uA@W::p2DMdхMPdd nBjDi ^d#U jf*e# Sn`)OCLåDaH VI1U:Mp,ʻɉiA$.:>ܦӦe7PIB{mag- NʝVB JP.J ja(RARE@gQ DU5IrġLm4 :kQfZ4P`֘5e8l. `E3 vēYP9vPUmLI0c;M$ FGyǸÐ9cU#ND0 & m!:mxLSWyd-2] ?׳a`a)Yn֠@&r0(@cr lD 8Hoh2WA rW (Z.b2h4 !Pe- S9cK 0:\$ǀ/#4 64$ _#~\ž0J I`&VhSy 3 Lߟ@1!SGϐcf)Tɉ3 2pD~_==?Kb!eaϾ7IxQ`IΞdC)^[5!^ o^ـiХuk{V"2 h`Q\4[6ᡰf0D$ƞ"T<Al&TTz6 '50[hw ~`6Bϗ;qwR^ :{7A辊nDu9y 3Q'\L-\=p]#iCDvg{]b:dC*@TG孮0z+I؟6LFH9GÈ*ma!X"3ӈyEbV^qBҷ/l;R衁hJHw+J\nu{00Ru2 WV_ڙbN/tQK)AÓJI,fnV#5U7gwp,3˼nԶOsV;e 5"fl H,,0\A񅆰8A`XTx44$d.ߐ'zJcg3(`d(:bf(^1I2Tb 7C\2 ФAy2$30i[_>bhzo{O~oR2AXZDhYH_Υ.u$X&;>fMm5@mਞ]}E+0炡uqP0o~3eBXv?O[;Y'8a-|~/Δw=h;VkL-h.y7v'J X.| Q&@u[OCMg,6jjRmM6ȊCZ>fBӞ忺VR40q]{yaSKȯctԶ஼ܤdz1E4npd2! Z lO|+:aKN1-T&EFVD44A)>ۙ={QS -½N!_Z!lȀ* d(PIӚ^6xz*VLeV.*C  H+C)HX{Fb ]QLYNS\0WsXoYEq2ChғPP/x~^\gnxAUa3!?>THU"a`( P7A"5/CtRUAҪ"@$946gzL)A"y[<˝ANgJ#rwdB"?Q߇ی  pD W'cm^ФTUHB C~P?BOlt STZ'ǚx?HrLk#+i$Ȃ,?,W$3OAENs2,RքkXÞv_nւ*x-8 HR+#wϮ+ ]> `y >Esԉ}ɍ _&h acgJ󛄰mH{2zU=: 0p&i0AsbH%p/1>-X"dNEXR!/Y`3L IFo4mYHc _Pr8܅m4)\B;Bt۝ j;zNIpk&\& W6Gjx"LާnCntjl 3yZ@|ĞUͬ, :NWD@@!zUPLlո_4 b`FfʭmRMvO8MzRJ2BxcI꾇*, ګs~P8ߋÂ&Ƕ#LuwSبf gsF6,S!p@l톖2X^rNɆk [  VPn`6+Gx =vsz/F[}c8W s³WEA=dU6& ϏT I&aVJc-}Wqke KAw eL;r6B#P_5IC" kG:MKqiZqoޑ` ~wh+}?*(.(R>OtS`$T83|P䚛DDq`ﯨ$: ) BՒn v!/*(Xa1]@40 `ONMȞK:Co'u:MXm@~%ɇ^P9%]+YV0] kyG2dc בoTVR  Lk r$wF l͹g&Wz2!}[|-|m_¥3 VxV+ Ba/:<-nūҸvaQK՗#4y3wۭ/G"ՠ+I )}diDC'BD݇$(q?T&뛦c5U)g]ڤqAҀAW]3&ugyS;"Pj'kES?LADB({%}H.CW67d%80A**ﹱE+pOFZzBL@/1z"XyKxIC9E+.dr@ٔx_,r2ih0d \(A rA Ҳ7{31|_9q6Xw~0֗"|>=*~@f3V*϶{m\MuBBpw}{"xIn>itN #_}s``amud?mSo;?iq3~[&P!`UM+iNиM܁ T`!V(!? ) N; `\ėBVa JHLلX%c!0  Ь IB/7 EA3$vsIgn Hͳ^Oۇ]Dw tⰉAů r@x=c@GvP-؜Q 袨"ꗄÛ{8$I&A$!1aJZ[u@Dj~b;T:/M|Jlpׯ8"p{-Q4 -v J`1H&ݕΑ'06eІ3xA'z#xRvq`Q LdS P™`{hzϷ{zxVңl*SPh(SaޠY„-kX4QEݧ@!r ( KBu3$z )ap+ I4J%o#6n항g(r[SAThjzp*YPCe0K}Bhf+E@4s^_ ov bEK?ؠ,BD"@'OeҷT2 F& H`"C60"pD$ADr,ߑT^S,~B:o8Q* Y38*Yb@'M x˜3m% Fwg%CH~ǴC!@xHBQ{xWFl4#$ې _]؟+ ԻW^B Zǯ90$]]mrMI,"e7qLpR@0 6$Cc6>Q3=>&y.{BN- [ЂLG4PO .D oMI i #ջ-d0 @ c@"5pj??}\%BPS귋nNۉ*6~ >V;H R(2äϒ6Zo"{#=Qd\W0v[òށ@D&6~C/x_ɱt37&5xWBd=D[ǔuΘ  >|KBIh0>hn'֓F^Ve`;Z=fx =0hk]}jJ@X uk|]y& oL"xC<; R=!EТ}&q ">KZd!/JCZBp7m 'xJ! Q$p  zGok:{LbX,{WR> `H3pni $: .re3MNzA#G'(IaGְ3?nǡL&@B^*8md˶vXzv;oK} TaD%>FDF S/#?{ٻXk!1{sumֺzs,/w} š/V%bi]iC~^һi*٨k!"T_ќUXsm⠚ @vVA5u0tWTh[37B7h>z\}}߉N7T5br=[u (B"{!fS 'ٙExԻĶ,}pA3͐1N"Fp"_`cu[bǿHw7E"%ƙ㟰A{yMx:Lwx&ɶt;&E}nMf`s̀ !ȕ:M\D[m_l f1h @1ۛs}nY>HY{ǯ3 ]S/xZ͇qƖOɲWɕy98tzV.`s @B =%1fq@d0Z%ryc&U! (zD@bamfJ$qokU>8~)S)q.א]4UlE(@0]D\VbGߤKAu0Աp-zMcv:_%aIUu l;\b;Aobb|[81(j5 Fe H i @C=UzbN%[}tR@i`u$$@z08HDDa;tʀ%1L$B }>iPs#cp3չIpBǀAĘ 7_̍ɀ?FXSEz l뎐$(܅' 9]Qv%x&z߅ mN^D#"S.]bnh;[ ~(Ӹ8&N@(K j>EvrݶĴ+c0o2tWNo&zX7#Ǵ< [5s:Zr[k*p+> ށ :P\d@qR hw>.8+PX(DxϨ~\⾕Ck0Q;1&p}n:dq`?6W ]ˆqM5Y@14JalN_T0B!@Z YYGȆdc9:5. 4wKTA cc E:QS!WOKf4ИR :?||rg}6HD:|nŵb x?Vӻ | ' U*Y-y"ՅlٽnIЗ p:5cYP_?}.*p) óEt 8H Z(&;TN ܡ(W}-MnzUZƇm`^r`L Y O/229{dRIo'1%x[_{ Ni3*gA|֓1 +Bc"$HR#?Р[h*-' ;qiʤڵ?w[ ^vԜ! *߁hW輶iT^ȽŭuH}76)Ծ*x:E:S1Gud8_61E`=W{[ (<󵁂 4An&rCX w7Oۃ%@"Gze#Jp9Q ! ]0xHD$yѩҢ>VX"q9@AFң4A0s;ae<$>ҲRøXH"4HH>HT y:IN@Ӎ+4] $1TAU ) XvpPg2"k O0"i's2[%3rd.;F.4|01f }̵ Rj=[yqviԻ59ƺbyE,DU ^)$'ah-dU9s}LϕVߠ= >`װ<&d$3S]׌`C@!]0C`z bi:1g߹O=$+APDFYA4`%zO9f\`~KK %£vrL :!<8@l& 9[ySD,l!pN֟Ex#>~OtVJqG+Ί-.5:'(gRZ?6~B"vDήxDN&gū'_.Oa4gq ]2G+9\?mϒޑoἱgYpx!($!2@( & 3\]"J) SM6M6̟c`5Rb{̗Lp7-PZ[1a&i yq}J>ԞT^,9L?|d-,BcO"/'rxUURI߆Zf rA@ʏPH)r`aĈpR\m6  ` *G!ˢUN|G] F]fH=Da=!oSVgwQU]y•~"ͯ|ƝhFIuQM?7 4.B٤ o?/ķWùO0wJT-V W>/w#sO<#Jq! 8KuV˵@q~0OawMTi>ْng9^MS)Ķi|8Nq<9 =~e%F1a ta ~wʟR2?'Gdd3z E5MST"6v%ZqД !BP6}m=Sq8c-Woݯtk.?`H$`` `b!$ 7T'ḦZL3! z3ThX1$mp^8RVhЉm-. &Oȉ&J1͗qoWF6): >N}6i>ve(8H)Ye$&%T( D%:i6k-R('Yp5m0%_y p{VUm zcHJ T5Q2QT N;`1 N @L4*ZC(G@TQ}sH+@Owjs_ #(hAʏ[h z'D" ESuT0)rF( fCe;H 䜪AvZ_-F/S %!<hZ!Zmlԣa)T$GUA_63&"2 _ ?˱ÏdZ|;!W!A ~N=DʶX/R\QBaRIV89ufj J" "bڞr!`tw[%@fK<"dR e$b3͘" }:ZdW8"4Tp{Y"kCp\^p{GJ ֏6vPvj$bHHrviCͷ5T\5C(kKA&1 >Wbm"!‽Ge*"5`Nr筂1 0(.*a-S,K7sNweKc6ʚ!\QxE_TSt[%4O)۳{ˆr%]lBMoSOK=?9|aG`@p}@&ؠ>bŮ7-MuൕP91HpAI3=[&UL90I\L0aPaEvEMq4@yDȀPM3 ץsn'3tR,+/12_ΰ,2bf+H&믙uZQ7ʊ8J4PRK$Cn3M}&Kd(h"2E(dkm4\;h9XDK$jj'E|0$M=5]r>;=C2̢e5JX XXg}>/i>n/bkEyxdD;y<ߓwSbyyt{Ѐ11P(2*X%QX@ @$0\ `q;c$ѹaİF!T\K$qc!٦6h`h?]ƃ?jjƲ[y{"ŏRcloGyWURsAdnǒDpޟP00b{QV҃Z1 ip[7Mn Y3\hr+(dc5~C`v<:Iz9}3p8{Ľ@}- -}Ӛd/~wb \:b 轗f.ߓRII1MRPVF$ H! ZJ *A Y(('K1r1бH ?\cC%m˻ ѣL$ F^oyAB}ρ}BI!٦$#iH7 3[GY x':6s*mƐ'Ta`i(>KėO{~tna,RL*w"1LIu52uL"""""03m[˓I\)!oT@Z 0^N#s{j@Qu9!|PnW9MO篶ͬ1M2W7\;[PCQ}N` wǛ+T;4]_QRK$s)EŀI({92Ldw'8'z UAUIQ Ap SZ<|i∨Axe4kĀ)0CAC DiT3 Ʀ6S e0 k0  ڡ}?S@ jxEUlU{ơjJ';p,3t5;V:&pjđuYdN\ *2,kDLpEgdubN@ዉ!;rմhQPw2ѬI iZzl?猎z9|Zerg+:bt,gGF2@$B P(Q%A0ITeDՈUMqBQPTX䋁,ɉrHb ,$$6BL@E2` /*MhRH? W Rdm*hvIkA{nCZ $CzsEG gk8gAӸ0jc^rE|T恏@_t * `K!"|w`t-A\C_e}{)WX F[E@&oC@ p5UfEh9OnYYpIm1ca0'#HM1Fz|k뵑v;/afwgAJgm%["İcow1~a@(1C4HA4!HQ` 'l4D[m\|8>4u !P.M a%P, i `0'@Ʊ.o9aXXk#}OFTz-IQ(w"vdBOI:X?#PCxD q&QDL ݫ@}yrD!9}YF 辖@8TN\*-В2͕]q*>wnopʎ`/Y\ڀFo G>! ԱЖ+Ϲ|~q !cC(ukNatz5jQ*nm[EL'LQɉG/'4܄ܫ ~CT ? 5 "l$?afg6IP Dc 'CNDuJmXدZ@J>G4AHװ+ R9ɧjN㕈*%8`5U˲` q t( @%ə]2ߩD313?{{\[f")Lclfd2ԟ05b`p@ *.[ @L^` ?lʥUP~^5$eDAnDd-s6:78fyDVxV '^03 YZLc,Kx8"60PpaF=lZ"2򽡘X[-]5'NM5CpA"Їp#.MJaEq~CxBQϟz胝G8g6ϜjN;sжl^@ޭD[L=N6aɓ8ʗb[ 58W -˳g@=Ю%U+U}<( C\~r~7v\ZC>>2/.>vт1P$ Ԑ(L.wVa9 qh~Ro +yŎ]5'WٶlV8Hsf0zq2v/[p%k# pn#INK⍌G1*mX9 cX{ĺ,&myf ^[hObNo=oq 0 Bg@H!  kw+ł{W1}i,؈?ð9c:P< QGO?dzHjYN! xH^?`.`sP'}A p6 r@|H%#4|OG"3_՛݀/ah%ZU8I%?ƌ A<  adZ#+镟+VLL6LJ9KǞ/z.߁-FǨD7$ JJ^| 8|ǝ7K灭ȟÏ2j@k Pd&I:O><~dk$"xufOz?2:g8R]{OG(ɍ}|]r(|V~p3 1t$\DU;PТ)JhW ɕ:1F\rq7 }Eu%;ҁUQnOW3)j殱,3뢁<ƣjT" oysI) $MQOK5%2bH~kҟ#XC$-bPҚdlq^SCHx%H$ U$.?[|;(WO]b8!-kHH=0 ˻ Q˚  VۼHX+ecWnǾlsKTY{{N xK`XfZ Yl_Ef̭8]gڗAnqϭ{um/)_Je.8>wuky\3ϻ讟 :DL @^2wڎqL^fi {WBM1?߷o-ym/`Ta7`!wtet$!ãR;Ui.~ۓ ("Y|Z ,5NT|' OJq#+ڹI8ym R?(TfXoRW{i8 $,` O*p*y{=14D1E2S!b)}EsZ*\/fĒwbٍ 9 L5B9s t%Jwuրev<%)hmojPgso'T 5{~\˃BR}5o#x8N4dD_ZOP/{N)S|%5lT-@/ШW]Ol9i}MѮ=/tLOX^Ot\[ 5Y ppKn]*0Mile*4}rlX?OyjPWe)*^W>2h#T-rd 9%Nt'm1/iEU*g@wD~,:oP!@іŘ F|eD\Cj̫'@cl+amvBK'ZL"0gHʟc~Ȉ#%Ҝ6{,&r{ުt׀ ,V}\]w[;͐F\hhf:{R{^Lez6 @ v(ˎ`=rCur@MAA+ԅfDuBT 绮ۂkQ!kYʨ|xZ9"Ac1R*HEw =|S^׽="#wFlUI&% ZdIQraxz@@"UpjR:/NFVy+*"b*|T`o zA(/XTU;,1aڴod౷ Qu&\'/rL ܴ署&W (22z>dE͢-2P(챎R=(E5^<ֲ$*\h)C'2S46s:؝+ RtfxNR[-C'1*ɻJ-iBp͕=%:X_3(13O-:޿vrfҼJA($ To) Hj}-\-=];qAӀ@Dހ(Qx zڔNwi(\0?B1. ^ҧ<`NXKɨ8g>8QWyܒ ?0"s_Q3_D3[`*3^Emڵ3ΓMNLS YA p<p'EfL mǯna GC!qX>P@mLNۘhC#tri4RSQgklzVqM 4]ρR)qץ}U*Mg~|ڋr@/1V w WojTwڰYrۡoܺ]rOpj4(xl `N 6d5~ZN_ބOGP/K(xRA(‚*7UP,Dߐ7;uL([>s%g{;+yoUFUvQ}_R۸R[&+ڑbj\m="99i/Jy|1rwT\Pط%5Z25dz50bTb\oUl>įĥk+P* wb/m*}|Eq˖S]Wjw,ʭ_^dWHxp<T4bCҟ67I{q|qwXY˕f un1&i]#ƛ[?ŨyY:[i1x /-E硃0ێ>Ê0ⶄA"fLMu0ފxԷG55.0m@f| K_R/Fc|oK'/@_ڔX jNMt=b*dz_(~Z0S WrrzIƯ"yM&q:^Zt`+>3'ScF:7,Ela|1*uKr)S.E]k6;:]<3hlO-R$TZsG0ID(U%FxL̉im~=&~hi}Ψ 1¼a+~@߶_w{__=]CAj@i36!j49] =k+IY G,φudNx2a =xP5/ >O36by+KNCӏ}.kԼWiLe9Ϥ"ڊtm0{wwww~_]^^i걒RF0eVF!Fc!5 TIv¹ew]{5Yl1.P>e&\Ym,$2%juܠ>`si{l>PKFi6pkW$}=Sڼ=Tߠ"_*6zC{Ee[FkԈK84!aKZ7UhhϿGy[dǁ]5}|`~ i4`=&|s`3&Ӌ9_83**4 /i O3ß$\ 全2 qhdcj_D=Juh'e{X~ܸBI1zhgfxzO^ǎdeO##粆?U3S~[Ӻ qT '!B03=U8s7 Y9ūZ MoRVՎ}".B__t * yEo qVQHc:w U(\"B 5.ba-\0.tc' 2_JA+CGj_ON㻿[Hm28:ȡԠ-f }:w(_}WNYwA?`1.i]iȼϻ hhQBub '̼rwۘxLbԻ%@W9B.1wMۏJ*}mF}hmmנJzn»oSI^I+v׿%) zF 1 dn2V|){`: 6K^=m5i(k!^ZO1G1Q["N[eg_ꅨG% eRޱy Nϴ`t^m9YHL#\*STZ׳EqY'<VŁ'ўPQy#jeIjTZi~o,[QAyy"JS)4`n;5{I'['5 |kne߃%Y@yj o'o\ے& pM2EƮ밚5W?"vlL*}QߓKtWҷ~GLX3;MT'K/H?_3⌸"6+Ĥׂ5рooIL4IOۖ|H *$]^#l%L'z?7Ts_J%1 4}y̯쀅͚e' @ XPZTo1GƸ{F04@U?j؎Ǖd~&.H)Av#~1sb,)RodeiX x}<^U8ߌPKoaI9IDtVw^?CuH0K TDiq6 @u*=:Z1 ()+NF^Gk?#`{tY,ĕNs)9(%VUEJŽn6L5~u&wC@Z.4kQV4w]|}V@ vXohT *_OzQ?LCmr@[E%+}3NJV!XM̉ Cr,jH[ҝ @ 5񲫜.M/6o[6]]n\-;Q-_!/&wEB[Ro#Բ򍥖nFN7NPeH̦8W)N~2W9㢍b¦ =E?hyTk#7ůXk';G>EرX~wx%Q\Kg2KߜJ͹eh~Zղ9P3JgY*+| PaX9䖘) '{ ZT\_(|?PįY>cz;]TqГL ?@ ӌ_-Y: (z_KZ64LW̋XWZYg39 ^QT>[]G=|Q VsXpk>iWAMsATGVE%sŧq2ͣ|"%^>5k:SF' mѸE Wį$+{ -7 ROThnzL*Me~Û QԱbs=>z*#_n;l~L'H M5v֩"Glwzhs?½#[,B`YT;(ebM@NrqƺqJCz]{H9)BWOhPxsܜ Kx~}>جCa@Hέv| HY9!rQnrV >,ƬD=|.Ғa 6fбÄ v"P/>ڡ>:']ؠ> ,L TQ^vb`įJ:dϤz+73:~Ӿsb٦[K0i1k äx9ETJ]lȥ2VV?{ON͉ Mf!xv9`x76Tbf[wj^59Pdn(;-^wo V)N~COr Hl d"sn&l?UV@X@P!GVh7~dT{X ͐og69yۻewwbh+Q?kykHն}|_-&X9V_xJ7gQF;Jh]z[Ol߁)UBњZ~ Vxj[@<鿶fn >yj7Wo8S" S]"t %7!4.?|go 8.`k0o$#/5YtvDaVBtB7*OPMoSFC!\yPyI m)u4BᚋR~Ū2C"F`4dWB`m%j$l'7́G lEqLQF>%5,%,a]6@fn62ZCBN<23LY=( ͽz-}{;|fJAaʵw'a#UmT| N(6IB$KkPBDf;b4\JV S~k(=>TYz茡HZ|Oh荊SRE2y KiYIHUVp5&~ NrHo' jqm-aʎ ^ `x%.\Ek76{Ap6= P=3,?u1{,43|!X/>f=YfU,^Q$X~owQ JVRvW ?n9>_pq{e|"EM+z ƞMtp];z#jj(hƄ 6k;O W$A@햋*&d 5i-ע|nׅ|X0g?lK-[`˙jj 7̪ae&htVDzoT@%X%&B#p ũƒTc$Y{im RN*'4:j)[i@W"4Ud6{&T5ׁ2w4lRi*a;NXad1"@}rfqR<⡯LaAsH,OPicɏКM+Z{4΄v酀5ew:2Tob\ݪLiGqL 7dI-4~Uj|ÍE;N7R?im;m~{.%;~/^t5R'䖃4)2a(J3 ‰\ayc\|ΑpoD&?xZ?:ǛU|n释4FܟF1 ?v\%!Ak׀;ҺvfDizކJ=7L]#a%|=%dTWb;p3x-2U,r+ZtqXn`Hc 8WI]_;:,&HDy/AO.P?GT45meC}޹6 mM}5CB]qjrFZ95nWm1, g\ojS@{V&4\87dᖋc}n EQ=gi;;a%:ym2O]124dBN@,J(wB6@^ڲNnsR9ۍ(K蹌Q$Q=;-{ ;ľP^FO^lƢf0b(3E\yEp4ґJ}dT*m^@\x툊 5NZBE{9*.N6Oo΃7ӳfh;P<"O@DюًXs 헸`iH]!deVccKBvh>GD11mrWRUM)޾QɒDDڋs%ڝ#C=~J_If}PTmba~HuNMn L>AsQr\ϝHɅ /?,7:) Q={瓫;g5N.]߇PzG=(u)ڿ-` 720p]R6&՗ (d_'Shht7b~ؒwڇ93T/,_[,T7 h=gqeG_u#'; Qhn~Ж)UZRҷ*dw)ߜE#u|A~~qw_ xuzskK*MwN Pëg{תDȪr+yA#2`W @(ML@rn`_Nw*m׀F5>> P|{.#Ϗc;taܚu:S9?Ceuj6-w J,LgjTUŃ wq)SnIT/=Q8 Ep!J L{Lcrhi"C+XfUg_^|=Qq:Sf>è0ǫ#YM!#$Y{@|; 7:܁n.} nFxZC,4f0Ƹ]=rKgtO^esνb4D ( D`'+, c_8mCfp*;;$&}`;yQ|HFoN(Y -渋n[r1<Ԇ\$j]"ʼn`iԝ\}WI7ԍm4<=d]uc~徤8z.N}/AQE!S[' h#Eۙh-;z啡cT<īwR!8LUJF9ՒŘ%$]˟M_}ۤ=ϙU{, YknХB4qCjT@ꖠS{*$j4EaG)@*|g'aO,n9 9=P<10B@H{U\9s㌢Pl47qV!8}~AU) 8IP{#ms~LZ0(p~sX橄cQHwNHT8ChJTٛ/P73ck]*}xlQ\K8eU=59cvz*/ #9V`[~lV ׽`^<-gka<1j(5뫛c>l|_Ye7Z }PGY[ɣAr?)=< E-glf2[T"rC/?'iG.ȠfVZwf 4uY(4o8(8;e w鏙layQ.I `dJ,'n8+><< yZƂdž[&w9T-@z-s48A5*`kE<"hq UM6~N.p^F+A nhtve,wql1GU{ !pelcMɠ@)D?焂*&j $n=]| _#{d\s-R<Վ]>p8p31WH$ȅx;j`Wu72y+f_Dp=z1CVVzP"_GB>7i_J4;O̚rh=q,;* #o3F .vA P D B푚$x^ee֮>l}^S]QR^Wh[zJ6Y3o|.C~[ R5P&)8 ~h/| |G;OH;Ky(o 0]DVEٕD^9j,╅ԟupu&Jrt (~ mzј~i up}P*K%30 B]9G۱o0 .̵֟kUi#-r1t"*%Rgħ)2/Rϳg!Yѿ۞.kD7S\|VM`' XQmHT5#ؐ>V +eZhe?ߙk%4YbI㑭;2ttspqzv7A6+s0 rxM}Wdڻy2`Hq0hQQ_a-׵uMU)DٛХޝ,H< ң!S{Td0䴝7ckjR sߏc)eZUKȑ";4R$լ+'WוIyry$xĖb_:E'Zn|eC٬kvYLµ5ufTTxZL_76+X.I}bD ?y1`uՊ[~1-ZolHh.PBW-Iɛaā1YQM{\v`q; k,InRQV,AmH17q=(0X8nxۻw*#Y Sʜ  o0Hf+8\1rL;lIQVſEpyAg7_rꋭ,uMMAL9ӯ.EJl`w|^mV.-"冬җԫ`+M}߻X5?Nʇ|1Al jҩJ Ʃi02FW:q2UЂR+vzp[Awygukk˾ޖpQf#S.u)s|={=S/N. e`-yxNfh (xLlPFR =xwiS$|i4A-Dg*r|}9Y7lg"a/xy{ڭ64n# sEaeb@10`MNAP #U@5]/y[%xvCkq]ipW}lɃm PӒ@GgwDֺ}Dvme\$$XN9&_tZџ7lJz%YpUj;W "JGkT 6eXJuH#^dL@{n*d\ C<ɮ fno\UC8#Tns^ONW%>`<+6zE(I *exOiC0ݬ@ 4o>^MͶupbT`b9OkЉFMϜ~9WT:M!lc{}^s~v?H`G wdamd1VRfz7 xvt4%K\Z6М ֆIؗ5̓|S|F iPI0GCӺm<ٯRݟF{]r ij_I (*jX ~QŰԗdհZv] gV^ _=&ZqS\wK1kH2qi |LLg@#G1bciP L R/e+)y]@4dL{{2'C'B' sK-c#0KXSj:pI@Z|c&'Ney{fxZdW'/Ӗ 4lo,jJ>/_‹9j7gyrFBdV R?o,kuJr .iWg7w,OxYW\~9(+"DyH`O%kid{Z'wN/BQ8d+4ه|$_.M7Mѥ&|hәi 90#]Nf;|7:[s;~.ո4xq(+!C:$ܥAB1r}{kn5q px@$y}PBQ0Ohsw}Tb%%t)Fōd߬n0&oDfP-|㒓ajF(RUAB@2=mnIs>}lgYeռe{\,@4w4}'ORL"eҜx2>l+Y$2_tr/hAFm0nA9R#<гO3LFec<lW(>~QL`8d if{Bã-P7:kR-7ȾG !U123TQ}pK0 )>m؇s edv%\YD!=K~;[eI!& ?uk!MӇ/VXY"Ou.Ɋ DY/#Z+Yv; &hH xw 14-z7i;6$0}kJ^_O .T=S ҦsMCd yHfls%^ёx#_#I}:UxrV jw -os  Zx3CJd pwo^x BW5( ewC~tgyaZm%|M 4$YpWt4u t<2Ш8~RRI_S9 G6*;p^ (ǦX2Co*T{ \V) ^ĽƃQAD ( A$!.IAGDn-0Y*tgMk>x)48%o\\x _D81Nce/?$/0Tu7th2b9~]R#O:!B{3 >e)Y_aZ>օUvsJB)r7H (fI^%Ο_{A<ﶲwh»R=7qFl@yj_^cBp"w# 3W%KZ%_{h#Jr 4aŤ|СfGf rc-\nvQ. +H՜hU AGԳܗC.^SL0#xD0dYZ( 0 ,2FNyɬ˪R^ v! 5mZ1Z脈 0F@2U"fDsY7 ~*,F Pa&TB 81Rة /^< ׆8`$<~jt]X0{^Ex(l)>X* `ץtZF[d^-Pt[9m+]Qn߭zT}#./6 c?%'1E"[B68Ez>ۀI[:2>stblPļ > s*Nqcmccղ#l6 9dꎲ"V% /Y &K_c JI1R&~krݒGA$@lK{ʙjN-t8)b|u\HwTϿH޹~qCRJG #D¡~ abe nꛗ:9 -5V+4b [+An"aT WjBrR!Qu jh[{_I+!+ԮS.LRfe&rSfz8,k9?"+Qba}t. f,(Nh,Q~YPd\m֝IԢr:,,^ ΟMvh3\ёSm< ?0 fkAK閐UD6JAQB'Y A Фc)`S~u25i.(~h3KYʯ Fwp3e}aVڴȅ8;nHWFPhdP'܄`Οoz%~aZREN1_Y)wW֦ba!jW;04P b$G5(=tG;yZ  oTJGL 6Mlْ]_ܯrSsB)P::/DӔpHh4w\//q ́c׽7-WI/:'aGk{R̮ Dѣ_XM"|M@d؏jJ|g] WōFp\+8_ޯ?Xp~U9_l'<\l>STĎ&qtx,2Z5TrW(2ƽV)և2 'ߧ'rۋxCVHKHzXq_i{znu[X' F-p G eC'Go׽8s:n9p2>GMG 1ycU? ;n[NP90O Dথ>ڮBq())J4xFHdY4TKu~>G>:G!nWy(iw0`@%ʛCgҞI6pǙsFGwkڤEC Js/0' -WCWWY'UEC-;9UK{AKf!~s>xAUZbtQunh #" U :R'4à6/Dhߘ}eL([#uYI㣗hRݥ@I*M@xHL$)fqH{Fmыˬw'+"gљА]#EG5>>gP3AL3Xv*]#d o:Y^ ljM(΄$?mqDu|Nbak/^QFUQpBA{j3/s /9evV4^?QB:p?ⱥq,WZ"MۡhPHI*v4Q9k$r w 8qoAQf":t8[)᪣YFN(}_b"_&׷g<4p85"o[;O&#mU ۘ 2P}sUL|BTPͣ\dB:DF ̀VyV(,lW#YD9ؙD 0Kh :)p#[N|Δ~8T1C'\?턣΢5Nkg|Ar&4Q؝Ca*8z'Jp:Nr|ÄBbs aĵ4ܾJ9BFIjB䝰a Vl@/rގ"/6Cy}ka>O2S|e‰LzAӡ`)7=^kfyr0cIa;~}*:l՞uțixmWlA%D9(}i.A@M4'A} u@{mkԽKܩaLoĿ`$qm cA7GwZv88rO4dA}CqAϽYÁDmY2JG#݁}z5@DH1d`T*#&AiPek y+5~{ gDXUǫ,i?FE }JZĸєX("2eQ)O3X?MZ . b_rv%|8ag2EW(lB @<_* /*SvePRWu??%N3G/PBjzvb~:>fF솑j(^ nnne0̸ =Ҕn oNJTŅzo&v,dr`:(oFc# :L&81@D 4r\[x 7}A5&7 }e iVMɿ})hV CroMϜc=?) j)H.1k6 O+0ҨR_,5_Ppu-(8?`J}U3#!Z)@E_پ ^"-;xvHUq'KwQ.[gKuM| dzrņgQ" :ʎ vE-tlQ?Gt fW lQ"'43:עyOz{,wj* B DCVT譬tQrբmn!T9AaWԙխxX^c!qc-G1A"jZ2t9Y0:uɆ )֒6«8T_hWJUќg])JJg!7j ϧpM5Ip%fCkuhLͥuJE3~dr 9:L.a .߼|j#^#)`a7ZUƛ*uT MΏTW6bs]Fgք5wILFgXG#K7>()Sr1X+\}Aƍ7L"Ue!-_=nx1|FdlAjWvvM+RNgyr7|Nh1Hƭ5؟*23\u@<))ْa="tJiq)̀Gvf'!j :7: v.դ8|sx}PcF{zgTh}꤄S, 8QIR{MǷQKɦ>'WEP;9d#ʾ Pg\bRF'JM9]!(0=`+ʈFqɦaJ0Lx5#!Lr{]SD& wq"!?&٦rU[9e8?~Q$N=$4Ѳ#p;'sduY}sVɧ.~dG&U]Ϗd%?:1U2 #S1%A.84DL(սRCl~`xo.jw=^A!· PU#0a9 [D 5E>ْf޲$A;u;M&=1`7{?a}?Y}o2ϋ :AԛlH"<Y xe$€}@夁<y!0ugh mJB {j`)2t6 TW_MI樗7݁Ӿ JȲJf͍qJIUC[Djdi,3^ xk \v劻FX_L؍fOpjlĴ_oѾM8?n@MxxO vO`ѧ|TkH1| &KC:gstT(/]ݜo9m}PE#Ky7u*Cq`-;A>(c<&aA-C ~߂PLjoLx`әlOh&?5mG)Z?=f9aKZd:a1l_#M ~d#a-7fRzz]V)pּ@Bc$t =֤76P>rR;>trT)ye-$~=gg0Ms_RԚcWMBgC/!1!58ۂ5.bS}=qfeWbZU7ApfD،T2jp!$E c(; {nI5/hth5-#x_7We< 3ˍ!Zl>%7.[-K7;n'^䐀#/XՒzyMo*7qXS?&%QmfsA'7( {u dL6K}O+g`f)ջ162?3˜dCxlhV0 X%x_Pl>$$ Ո Y;ԄDb*)Šmi !G 'S/E#<8,#K ۆatbS!}h.j&/3SaBʎqDsz,IQ95C'+,c+ʷ=ci>YB>]*ykpw[fe}0?Oc``Mbvh92(/_^"q #ה,Nb!Hc*Y< ݈>sae6zǥN=q+-lJ,zhGh|&cR{zKHfbژ& P>kx w.<-cA  F`,UsTcxsje{'ldVBYRe|#%%Wか?W)wGsDF"8"Qb@\uUpƸUr;PZ1h Ih͝7 m IZDΌ{ ofŕyȬ%,ŷX*)z2$h9evG'6\۝I]RzΉq%e ב ܒl19 ƽ  N7(`2– ?l{ӘY]a/kq0^$kСRvCX֧zXg穂!1ӵs:{ éDؕ^% c ̅:E񬑑p'ې~K% ILevp8ǔ`RE.M =fh4|8AA?&G战M0NkGry=-2 3 S[6 d4 :_Vf>5qh,{0"d GmYK͚E>i0RR% W'8|wU߁u=]AN ǁmq(Ѱi2VY.ҏT;Ikڞo~f12]6 I vxe3uʙ$G!dClD'm㱱07a‚C俈WJ^ؿ_iao/;sv(\Ko#K{KEp1Ճr^zD&Ӌ:Y͖dL(uӆ D%Cv-vԎQhõl׋`tybv#"LBMRv\o VS*yhQecwcn&oꪷ&‘ղbGAaS+oXZƻC z'*C3<ʾa.,E%*H#}©ֲxb a$%1c\b1ـuA\jV9yH࣫.{WfX38 ʷ 2NI}͹e.KgLnE͸m=@5!gi&![?~yn0Z ֜]9OwCب nlCc"}x4ԴroS}' F:v+4fUm2HnhK ֕TAw Uf-y$}7=SAYHGIM~ qb{NdwHh{τ>{EO0l s _NmE C/6V]jW.`0<5]z"ebELN+Xp[hnjAWq:+8ȁc6awǰ:3/ V|'[v{ýM+u|4@Գ-@$cɚmE# 3x^[Yg8&(x5}$266+h7Q^TUV`76CA-\2c/]鰓 ,CATұfS|RA@C , Ŷ nazԆ.QлϳG]gv:o@~|Dt\%H*U {VɍQ tNc.IJ'p~"U5Xw>`ƶʦ"jFm0_isK7/&S$iCt=apUA[ K3d<@`7lA))FuC糘샶~ǐWw^\ceN%: YM19lX K3prMQ5!Ώ q"+A_ľ6GJ>'L}HǶy,|] fx0۽gCF"D遮VIl$8B*f7DP6[z?xwfC0M.ZsY Q؟}C8|ub9[I$o ש~ԓKr=7ƜH"(< 9<s]XGp(۶Jjvß~Sm:LTQ#u$/?=qz͋]lqzt#啇[2Jǔu C3 32/D NR}˪zu t@9ZܘCJ&lCQKϟm,$u: KSU}XЈT~@ Z-J꘤bX$CZ 9m?/ӽ@J@T d@>K3àh,I-F8=ppJ!?,%+sRȼ"+wP< (p K.>z/*?# /HtCLFq~>٧%@ 5fydg {`'}vs'?]#0x2dΑ ܉C9ɕU)LFeCO$5'O Xy5y3 Gnf%Esve^X^)?1ò!({ng#oVX6A'[5_">iY 9ul#ߟe9W*ʷ;ϴ*d&/^6~b8Xp~eB'5A}W4wj*jD41ߠ݊AkA@[h5xPWW@u;%ADzC0l?d scelQE\Pb4od]_ ٞ؏&,i6G&ZD\cITD Z#}Jo6Gk[Ɨײv!`?C7B7+A>Y%f3C"Nڅ# R!9sFiMrfPQ[>kߙRV|Hr$DgGIh,Ϳ&<ŒT-PwOy}1[LK䴗zF|"> Fvb=j&ȖJl,֠}eE͵6Gkm*h7#մ$ٞNJqDP冬L3iʩ/@e9 WUФJRgS[8? 94?4KB+bwCS+|ΏCGCpl]4h^3C8숺ZP?;ᰲ'zS.E̿Ljã"];١L{-{qhLX ~xT=I+/O,-?>[^C o"-7B+*5QW%ĉlWlH^7۬Ǹ3F)q_7I`T8S=~xpRbc{Rڕ(K#tESGvr7vt-n7Ex+-,9F!YyR|C]-\Ie[8թSH uwII^O 97 u8{AQq_#=*+r³O9H^ﭦ"jߕ-D: (2ܨpQl+<;B^把 d=0}y72}=H"RYb-["^u)<#I0"8q+^ 0#dք =_OAs4@ 's'Oy1c2$z%So^Kל&J{^A?sz2lm9Z {ѹ$څ7Q5[`p/Ì%m˧D*9v^ZHD zbxY>MzTMlbUU@ "Q;G CU7}(H '$9(v2;o< 64m#$s!>=RNh:><ˬCYz$?A`r_wϏdxtMK71s٥+1[F?9SZe<p!r{XJ$v:H,Ǽ4I#X°}=̭kfpxajMFy1P9a)]z*Iw\qbxPfb W4Ss#=O3/fִz#}"L/15F'#}sΧ~}>FUZl)-Ihy.lۢ$eNpȌ"Ϫ J!{S?p擕;c$:gj1Su36V`NG1.1{3b*cHJC~I7U9/ ihw @\ y:#d{rO)uq+a_y+ +a]M@0O੫ET )ޭe)h3͟5MMi'_j}ޙDۤ`X;&A'귰oBUbU}8x;֐%X\ ^yO~i;umP{u^? %{t6Omz[d3EvڇA&wB k$뻺v8j { R5#UPKf *h;AeXU)|R`oDi6f,}JuAjپ%I?'qUidbaSz1Qaje-b7lHXYFuFrKi-~?UMa$#y#0mX4eR bf 1SIKHFK(it|gBUykW1}`63(otQ'V#0ֻs7~!f+thTڏ`xg>H))ZOr\gMYlOj)\G)J^ n{:?:>zD{2y +rʵTl±Y|eXZI)댩So}NSGOTxMK_C=r[Vn+cxn!_,zRasbp5bp1ly{B5w±lQ܈_q AG'2Rt0Gsy*ă?)'w)M/`TwEʪplm4p]^sSlP\啺FYM i qN"p/b0N{bw׷aPU`'/V፶ɭ֝pQ`U!o^HAq'/V$VCA%=Oh~LrKBYO Xnw)5 }6z>DE)^ X٤  Dǧ\7/ y pN8> ٮ8q&9:ʄ=(L\Rjutz=vI2!IG/6;oD~/;Im'F+зymk/h&0>xjV}p,P OTMzk+=紆mGHg0#r}y#c2ŚsRcaőKVG_(fPN!0=7m5:QQ% =Wѷr9eWst(:Ԟ9miVG+F.|tq*Hۘ1l<-5N}6?^ {+~Z. Ħ ^FʹFW<'$V}gW.ֵsFܸR cරo9LV9L1wkWLPgPmt?;&_Xt΀l{&6Mm $X8I,(zk/zGܞNk \?WA(U)'c FNQ/VEPoNuLGa (э8e.WONEBð Q6x1&H>|llǘ5iNSQ I6ZĘs7JJE:N_} 㬌pGA!esP,33ypNA*b2po[zD!曇C砈(Qt} ayAL1>c*Mι2[LqΉ4n|aѼ˼+kwmuO|"qMamw-z~ (ǨsI2OS∆X.4 2cpc @@fiq'= V<:?l:nr;F4Nj%VXՋ`pmGӐXl ;ڧ蝼݁bUZ*٫u4!hts%FNl8$64yRG/8Q;]ה~B?"5qKJDˉox/ 5GƍbÖ]#u{$m WKK]YV: =*ߗ#%4iR uz ;G7ZLX I"Π+wled(KpaHT!Txe i;7ϠD!XbɍB.˻iQ;rM?@2)Օ/Ie|8mT>blAi~Պ A/-h!tgt3y(7Dacd|ʑBjCֲ6TIxkv+6U!4ZwM?E2>6n 9ͫ]D~4v8;ʘ"w˂ܨX-UM& e2^| /7荁L2.hL%%h=̝Bz5{EJԪŏ L_lTwFDžF]&;`H=ylO#a jN(~j[<,=ooHołKX]`,u7Qr0կ彽rL;cxuKDR!&橏,p\e4F!$=kK>WLB }.OL^Z],mZSBТL^edJc :j\í׶Z{=E\*Af|nOֳS3 )!"L2\?ymƒUN;K (%T"g" 2湪ԫsoeDi &`q6U^ϙ(og)L{F=\[nۚ>=Rzp2Y#sUk,_Jݺ) BOL`#= ^Z.!Jˆ۞\"rg@l7땖(^V`"S똝n8/w[ؾh_}V^8{y?M*ٴLťv׼(:I+Od{2R6ZyvMlOr,ZBģa8]06YtJ҉_1ė9tB*UjJO,9:}x,0bmxղoN򷧫iAm~[^.=tPG".e$;^>kc >@{+v URW4>E>w)|"՘Iq=!ro\ȽLcPWo7?mm}q~Y-X]VBTSf1 W)0|ohb&.7˳\G/px4g%gɫGjRmȄ c"|΀O@礹Ez*kȫe4,gi|uVj9bpG?DoMx;px"VL_pPg]rxOXz#c*Ҧ5J6bhȁ{42,Y".| 3u9)zd'Nb_U[tBkg|g;lN^q9ElY:F$8cib$]A[Z# }?#֓[Kmp;}yhii NƏ /t9R&sܫ/`=ZF?X>ѝ/y^B4H_YTOD`2f0Jd-w ^#X`4v6ancڊ9o r?gxGsjPzը:N%:uU`&Hmb-^}šu⢛Wy|7gʥ%o6I[K`)p.PPR 4sP e`tjec V$;~nhy;b [&tɱabƞK9Vn~.F%BuտGL"[yB ;h+f~4b}RR]!5#Mmi eNR|Γm-GPj/> tĜG3Ot><#ȓY 3[hZ@\͎:Qq-$^"OmNj1c ؐZ7-1Yy7޵kCEx?^Rvl6.BisPtaC8E~$˧LF07Vt[|/*&3fƑ Nqiڎe[V>&5Ve/ w7  rt4䫟ݏgHZls>H Epx߃yOBհ-Nubt"*9d7jsWkcQxKw㞿z-^g'`beJH_,oRIţjǻRջ)1H]l.{;}ÿXAD0}8P 1cc$aC"IktO)>Clv"KObZ )ċ:9~vu`he:c@' ȶ GpJ9~Lvhxx>vC$<7OVNUKUIRk]d?{01))n;e)fz\}qg I"+ZqŪ>語%uWu[< ?|?9oƪ\``:kL:d7eN[t({Dw{kٟͬQ5<:^v"5"QUuXubZ::ārF1PieJ){rt>xh}P.-+  ܉sCIph ^}w ;J`z҇xUJSi\MD98.'Mjuq (=1t!wh])̠=3Rq͋Bt0u廰9^p5(:}RU6y$D:Ï6dm6^#w7!+j8sc㒿s(O2d_Y,ɶC4Wy3EA^h@_y(nŒ@*d5ث5YgX7|Ss7uh FC!EfʌI31'pqݕvP[i;}-eT\\+zDuT[3Bx׿qMX^A84ػA8֯W0i1t TH)_6J?7x:6R^u6,#xY{ yؒzA)(h[,|R)(w,χ hCe )D6fRYbDDHblf"Mw#e 0'ɗRgd hh5_C׵jGp!=Fѓ@~v* Er۷OPuZ&@3{[ DCqbM3%Mj/[]Oo?Y~-~ e¹HbwD@8 v# ?ͷ.QN3 +k G3>#Gܑ ,0쩘;pϥv=Qms̨ﱛ1s枼ɤxBxӎc-ѣ)9έ^@}֣"-[|h*X[+2S3:v|yb!"f{?o' =(,vdDA''AIhބ< ·DA{QA` hIMVZyyGێiMws7 )0F=gpA)#vJW+cpa#M: ^D+GICvI[ÓC3[՘( #Op`]tc|ZǡE@2%[dm*{)#G}UyبɱCqA DDYi$4.ՙjfR) ':*1e*2S$~|I24:=֨eqiinfP:&;gثDpd@pyQFG,Z!`̥K0PQ|n9-qw!; %΢yJ\$P%:n8yu,PǺRqɹz$P:1쪽Z,#ϑ;¦oGДOY[qFc#bPNϖ|ݳzqz {w܆E.%԰ at*WOܵ R^)^J{d5ŷUkVBG p;5IWޢbczFDH3:}R2|X,oW Us`[Ї>}`>$'( B~ߴ65C>Un!@be#(sܔUYl="HmNQUCfh<|vBz^OSǞɋR/K~9NgӰs0k%{KQP&δxIre8l,z,=1#If__ErK .Gx%ӏ {la 08Y4mfybJٺf{%Ks4i3b,t#ItLP&p2!C+ f_^Af'R [eG)a)cBڞUj迬Z!\CCC&J:E`'ښ'ڧ~zk}@USl4[ᤊ K\.V|c K$cM (2`g{<<'zyz([rQS!XB[qi6#= buw¡wbJ䘴@fR9e跶qf9!rρǀN.8şPJXh_a[ à@1uJݕ}+Q'$O;W|g `s_$f^@|fۯpR{-2JZ6z',pR jWW* Q*C{v£"_C9bV/>ߥ 8ͲJ]Im*eW?oy~j' OE>W-;4!]g$,)&swd"77QK HGCI-8o^zM H%:rZ\nd l5W8uUJA^{xmT~& D= ڰkk<a}cf.rZPȣp+l+<(!>͟Y!ߺd$"D D˕^á587dD7J>!Ha,4wC9It@sr^ڔ0/>*oc 9IW~헏YK]?F|5Drk$6²O6405pMкĔL3Nտ3gq gvå6kՠFC}A2`r7^zc8O>k+<[6zC0+̉HXPf?^s7&sԠ g׈; ci4_< U*4G43m V3A>fC^-(I2& 4):_}}eZ?\v߸68n8 =8ͮ ")rC"Ci+f'٩c;Jkh<a'D-+ g9>jJXHr>k v6W:t84 QtDC%"R(uŢ[K=yXg~_$!c2#΄oؔ[m,&jtz0!{d5ڔ[#)ɿVYӌ.wpiwb?ہDbgʶP{dY7K{J[ON)@ kK); _ ̂U`4:гȫ8LJl% [.5]Q mo+J/$]:'0 e TURg;.߂(0qpS?k{U콼}!'*OoyܤNz6FH:Q $̩۫MށpQek*QƢFjG.·f:'05 . V6ќ^.KmB/tPb?'+*D_Ët_s`Uz'3Ji}H I\M/ JMz}vo<Ȥ\' ʯҚ8PRvKKG mGݟ}R\c͓{r"gȗ! =iZynm{M@3r!=ZeL7ʌNЈ6h;1S~pi j< \ds?ӵT{l (XI%_6wirEoHps N',޳)B/yyz /Fo jHփE25ǿ.M^KOOBT i^@M+NIV#Kԁr)cü3j#T2Z)[P bg;Z CxJk֕^gco&ӕZ)B9gRl^R̽w`]GpXK8ҚA!eߕf,At\9'E ,@qGsİ i1ĸNܧ*c]ڑT4)]n*硑 m صwoZA_uaRT(NtnȄ0\,.Mt iÝ5{#/!bM7D͜ [Íkv֜p.ִ53 p/K >yASzGϚz}=uq;EK*h۱* !:BUke uZuݑFI #z^ FܓJikC4b9ׅ^S"MtgX֜EFqērV ~K!Nw(Ӻɲ\ lkg&?2?kBz"]EMm%smv7Hh *lTFO7ߊ*i#/^2 85[Q!XG p.#W3{ N}wwsFDi:WbqBHAq{źQuIjkE\'άf(DKu12*r$' >(f%jAſ֖hxBB]73xhmuwr[0 ?rnۂԮ%[q8gV" {L{M37TPa HC空8zȫ߅Q~'vԆc9M_۞Gc^o&x(`Mv>ղK[6F%\6Ͱ  a@$$2x1m/B#y%x3v]܁QSK_.FFJB ԭ2Hs5 5n!H~ 5ˑHEg/AtrϵFFpNEv|saOUh|ଇ(eR,YzbHIFܫf.ןbnqgF .FOr AσՃ>0n #zŽ| Cv?5JjtnʏCq簡ǖ`"y*p'mV+8(8uͳ֌~m|R {[B62Si!&n'3\]!K|_ Ǟp]s]XJO,&#xgK\?м 5qy6:`c޳tҧekaÜ]BsÛ sew@Akh) R`Tb[Dy.f?EiB=}2ԕl;4h~FN'byeVy߲q נ^j0th1INg$|w"h p` .CڽR=5q_ AAW.] cS=D%A︕r!7Q[0"!*]ѽ#?pkrҤ])ޜpEE6P+ qDX ѓdY:]H]yn2Fu#u)á颒Vb NSx6)>ީ&~Dd-4介.gBb@Dk/@α {-b}fw^sZJY,|(ӗ댠'NSCXbuE) Mtg$1rr^43W90NarJ"E?3ܭhzR =FҀ۹T]T%Hg7(3| YDŀ|BA1hS4qYNCZd [fyroZg~Ϛkb^V>x,~+ÊR\: еhOR! ߭\C_ D$KX3 ĘLws}_O$2'G xqh^C/C6~O5Ƿk-.N<5nVMíxi\YϯFbƩusoc&vh|Ն[l(JceߓWA#Z!Sa+K[Qg+& #pc"w!{&%81ClX=o|ẞ+g(_ H-8dk =,u)PF :8\+j"d(4gbkFFu&vM?b?O&'AaqM5[>{׫:[5ܷR9Z9 cohƪb#]7d*J%FXCZ"nxp=Kª,bXKCZq#;bn턯;RCyDt|6> _sGjtvP䳓L0Rir;jl0xvMW10'6?SV|HQ*a‡"g1m7S?ܞg!{e"3&ێظ~](\5V!,1T͋ƥxF}0Wz,~̌.#DHq.oq<>Ҭ r\Oj(Ҳe ?Q 0"H, {ٔdxPhSjRL fS*ʫ|]m,/Fa]kMإuaSU#T;F?o2s[*s ᘶ,:;W ~XCbҕzLH8ꖓ6מ#Ow ޾opr%Eepo;׋e,q/|v@ېpC~`^\ ,f*8cH1l"1&SFn$sy3w;ls_z5(#DI#I,_[Xo_r ȉZ8tӿf4ؿ^L?0{ÓX3:q`G)4V-,0oT`[_Zaop)`í6 Wޙ!kq%ć a6 mVUQ^eAmOx6pVL9a*P Ek?E0D\-_C6uK{Kً6;K8!p#3*^eRr>Yۋ]Aily)dl)3gKN8_-ww2E/Vv0yU׭oE@c7ciάTJFY c{q c2=Yo;$ `eLîO#zq{{ mm(7VJ݊g2KPu@?? S"GCPYxÖT@A{/maYGZiaqR* oW0zk5k_!n,'@w_kmځnڌE7A༠ qᲢ"}^? &콌0$#{\a[(Q0w4t8HiHNT9vaE?Ftsswò1ܤ7<;;or*MA~FXy`g Ҭ7G,캖3ZXw#Cc"P[Zί빻P;!.YٔF~Dϲ]rXrydfi+RK=rsdF0{M,B(&^ss-!tC:p{=aA|!&fTXkض_^z]oΫ.(N5PP[[`TJǵZ4˒ȲW/47ȬGu R /tT[O4EW{k;:W~vdžyʃڷLFAL庍TږU4i)g@'#oa+'nw?35}' !wUKH)/:ӯj1*;)9mVâǩ]΋4 ar%uTōī/`Yࠛ}Ee :߶hsvh_ C\dv9+Gɽ t~;[/BP͎ÀMqc-I:Z깞oɐ[* r.̺zqM4k4uaN>gKcKF}g2/}{ Y|U6 B*)6x^3LΊEǃ6^;5|`9j#r:۰'@w/75+m֣M!8}t1ȸGgYٽ|7FFw*$ VFB1<R$l؆LjIE&E@ltJB`O*:?.OpWYfU_dhh?bMT4kQxJR%c19x ie-Y<,:v<@@c dg+ɨⲳ.;)%ҍ%CG/Z(9ϋCx. [0‰kHJ6rIe@` \R.U3K_/ ɼit;熖G:wHҞ)*j@`r5FƐH4lw0 콤e\dH9asD0hⴝ S3blPXs* O6\[U{(d5o;yּWvk5!Fl4MduyRDG8&p3^R1zudS=XIC9ȍmMH%mg>v08|TD}n2]~_I01:w tµ-j{:`l7U=Z0#V&9*ç싵Om5<#4v%x6`Rf&Fͣ$ ~HRυMvs* /|~jorcc $Fb-[ՐJ}ܬ R}6y`cVlH&.e>1: 9 mҝ3%J)+$F@ 8Tz8f2,Wq5}e=̈́Fz u\G9Z3A;5WV`Q^ ߟ狻n3AEHqm, .c/aMg [ǻ ݾ_hM_CR=Ko!YH%J82pK[=`aRN4`f0B i/mqU`D%bH:FFoU!:֏xm,yguٴQKn !g(%`{?dxcslM!c[U0Od6w>4M9ČF*愹v!DB7h#cL>o˄/,`(Yo@ICq=a(bFWéf")ZH~]ٝTY%2 ?RHbRA<^ }d~4@5u`>af39HStB ]V bdhq_=sp{]T.|?>4,cӆ~ ^$|E kNm3i Ӱ_5ȰΎ%o2P.@/6WiG..::ş;_hcZev[]&Bl_EP\ݠ{[ 1!+YV;\~4g4!&x*lKbQB s@/}/ոq%M=֤GZ-u ,FڱڥnO J_"s([qsk\4sRW^c{/9a]ZidGHM[6c{fKNF?,-K2zۣ!f\AO HB/pԎ6~ /rgymS}ƺjo}@[%BQ.=! e^n.5n E1%1߸h a7ψ-NU! y0t#0ˏ_TMF%\pp@.!s^s7u ggQz%(qc9v[)"_z^+# Ĩj ̱8 xE6a/pdjZ2D>9hе(yil]=ZBj( ymު?A0*oz3O!vo.PxzF)]2HU|4@ڒPZq|]E %kruA[mCA8pǛ ȗ>G0a QCe Ub]6J|/ Л"7@U@Z>Jmx=-<*^+GN49J23mE44I˙$H2jw0ο#״휝mi\.Z7a;[4fmb,7b-ݬJi\,4Y!(hP)$;1*sXzL^/ߥC゛^VdMD'̄!$? }?G% %be֣ԚybIoKV`5rV 7=3HnTjAƺ. !F+e|zP.gV7^MuQ?4[(hnO[&b3v%6κm&*a rI˴lPEIڶ#3}-cfkY/\MFǍa8P\ddq(o #!)jul0!EkJROPp < f49Ց:rq]M -10-y7 e^`_dfJeU]niQ3A98ָWyT]3L3lʅA! "m}", %FA:Ղ f%H-}zߓ$RU mk1+x\ 2װ;^9 (ӿ!Q#Dx]jSp(qoN6,wln5j'*'IJ]D.By _=e9?.Go@;wE /dΔqso E!6uX^$aƫq^=L3_| LAHco5y KVoIJeJ~PBpu;f?b uAZzʁ~v]WLS )zүE> tZ`ZV&M,iA>J筟 R3ӈl5*VT82yKҹct,tC u:iU?}N{ʯѬF5m|lIKf2;E鍧Q. !u"Oj!;D˼ON_{*E8a٨X݌a/CBq^Xuxx !h|ܴ 'hz*z׮7yz4Eu*@@w^BDҭ)xn#tW۽cK6z)"v~?J2mv~Zخʰ;ƗFÁh,tj-C!D5VݡEC;zRv+ 孈 3ØB5_{ɵ:٥EZ¬+L!dQ+ؔD@'Д]QyrPCC-c'6K]w_CۭT^բQE/}hni> 8>u%z3?ɇi(9ߚGYua=/?COEkm9fU2gy=|, 6ian^ GD,*WswX2*7n!uIY6;"8T^& E,SL0y%f]3a׏' ͺ/\a݃L̎B9dZSOB {S)&%<~gvU 3+6x<5kk\.ųTR<Sb c@&BnİW kH.+n|oRֿ,Nuv9^25R 0 ,2 Gx.2 y5S =}3k';`:D;QV|k8զ<!sKFh*Jo,ht.g  us> f)) /-e \ #_vA\4 }a6Sۢ>@'{$TAiWWSc̒ʡ[wL ŬO50˱[jxqv|v߃7eqmUp((`1j!iwL0 5d$$>#$ &-VxSKןj:Uu:H'b~딙.e-(?~UO{acpoQQt"YsC?a!YW,>=#qM2 0xNO_AdrqNWοj|UfcfLbRmNB4LEbO [D:s4 鴓"Rg #7:"RσKzZMLn[ y#mz*ڢj Ϡ{WZ蹘74`5aeX-Bxg;Py/cԦ~Wz&UNW J"#!*C `>F8t޾ H1{q[!MA5&#E3wX 80q$,/=BZz'0o/(c (|g bƠ= Rr',Ԧj1tm4Dz-H<{ׅ<KA{9$.#|d6l6z+#x"I~H"9@-@Oo߃F&MT;@ܷ" i> ! C$.aI_+CG(z6iؗh=^BdH"Jp@@,Tfyhմ3d~|Wݜ >-;=VVa_S8=clZS:'ђbɶ@ NW'i?d*jCXmm륳S0=+ųO|Ooϗf<jX;(&UÁǫd'3[2Bn_.5AihKP1;7)"[Q+;J5AR[yPI&K Wvq ۷U֟sVu: o6W_o#o@kceҬ`dL6  Of-nXml0:Nw3(>~,Z9뙓n;cɐzʯ W?ռHO)/\^M뿤PIk(LY=ڨKS趜G#뿸Wng.id^a)E5Iw@P a{3.QhzBUtiiyKD}# oX7ySl \68`/4js48W8;!Rdvaژظh^6.Jx4ub-z>>OۧG-@k`얔)\'Ƹu֢gVsc`&i^u:V2|Y#wNJOrPtm[GM}(To]>:0 ^7tլߪhXK UԄɧFJZ,UZnuõS^=ߘ7im3%bfw\ )?@n.BK}";5E~wۏ.>?2v9pl h(L)+ fbR׶|]1 i}5ZΟpXJv"-98l*^=kL}u. 5HZ'?Rl 5kWU5t[ݭuBqRnN鶔'WBe×OwSkn9aH8g*}дFF厗Q IP)KPx姇k7VjΒηjq\H8z52"<6ɞ ;q=ꪆi\}v2[GxT[sc`ڨ2&X:/*r?>]GV=P>Y-]PogQ PM [$Sۖqs~xv !y t*\wG?G@^{]Ѽs0ˡxEp>F3Zz#~ao\Ik^L!b'cT=oaZudr7ns7),cEghyK3Ej鿡HkhZEk9M7f~Lb"}Fq~L'6^f2~ .Yү%2롃,Yz#6:=hr69_n79\{(MPC ֙PcЄX8;L+sr;yk$aRJ6yh0kt殴q%OyVJ|N3 8h(J]DKY5LvOΥAϿhE祵,ܤ$0E @ ÐީDͷK33N ?吝n#/wMɌ]{P歘j°bϚ9:_Z:f4hj㇟592?k[N"v\CJyXl uQÞ @&8},DUIm"ؑB̓gn>8ni9xֻթՌ^ :Y۸b `0VPt{Yh\=0{P$3хf=-T~x;"".@EV7.b.MŀuKltAdVl̥SyޡxbM7)w}Sm!]!,wZ !-3)!N3?A{S?~IQxO.WWAoJR9Vt5˟cknž Z]&g_ylX{͡+dO]&%mU#g@Q꜅v}ۇr4nyK^[֮-ylb1LOWjx*^9"}@bFCd}HjhhO} xsY3%%*rmxi"1c+?cQS>szqPT߳SK#yҿ[.*@v[AR-}?wImUsP>a;Q򯋍W 0Kw-^oLM 'C绿/9/hҨz!/Y!T! #eұ:,][}g}dFs"Y ]\1ᰖ;F0)`R:Qһ,IVXcBi+&dԳ{^Q_'\MTmu5`|LTc=9WrLKnGhQ^}Fd@ A hJ-t.k8l>ZOߥ @RX"G(<,MΖ6//Qe_W `Yizۉ+O?uB }C9(3?FLR{u9SO&vPD_{Kj|SMcP!/x?g} SJE%ق|VvŠ+6p2KL(' mU>Ԍmj;T}A~w+>ݦY6yS)_~.=}:;{"gQ6O(aJ(/`G8@#y;@[M;N tDٱV 1́ H%,{k4LO㎷ ]Љ+UGִm vw !π2x qؘvk;tZ,ᨿ1zhN=2Ywlr=d!oV/&*qI{)[C+uÚHv&&(^ Jk6Vl҉FfymXRc5F0R;ZD_1Pdk]? 1噴|ntGKKE2 vM4[t9@ћUD1^:]pVHP?u#7rLtpH_2%'#F/ȸ$';I!ʛ&jpWB_1dȢs gxwZm,ʃc3'+̘Ѿj,Hp$& >Ѷfp6i.VXQP?<)^6ߜyrhTh ],m~I,6I[]Zأ-07ZguG+ Ah?N%s*CZBiĪض:ZuZR:,5Frs>Xʃ*^cNjD-П3MxA4 &mi. }v}AVӟ㳻WDŒdA+( E]hZT6S#䣳cx+q+>F^y>%aH'V&sx[ hy䳗֜guKq0 _(S'Sټ`MN+EEѮ^qqrT?"#yj`FƘgSe$Mta=: ="v${ Cd:8Z$jT%y0_`QxɃ||O.4iʟTlCqRݠI*Eu2 #cDiQ.s;fsqNnq9=i~@j<^=>- /'iyhdV2N<xՙ^[.߅DZ{ΫKBy]MC},+Y'76W[3JOǮ()وKs쮁1OP{oY㪋YH93@2g0wj<|4F2IJn4V+wL"M+vrY)oE0$ RD>1D M@۪maؼ.uʾ]^Yd$< -.ֻ`sUê{*!M5ˌþustVSZ8V_w=?t+V}q*I V zruwd !jb*w_DIx^NgQ# PZԶWdNٮ|^|!d]$\`B"$@O<$XHio~>4XS‘akkcBcu$Gr \LUi?ā]rWlkm>a@+GvGTH1_bE֞@˒!Kz;\2 ́3HvCM("1xr>ݳAFgamхAؗ_s/J mC^lB]F $Am1^սLx0~Ncsu܅B1֕Ԁ`T2={K߽GgZ]!t`fNDн7')=o;jH9(l5:$okH^z p&ER Ý3D[Abh:i< !k%T _6U+*3>:V8r`a;Zզ}j^N&9AL\Idnj~~ [=XB} VkM" &xϭQo}b*o;&iHldXE @2@`Ȁ""-,Xpf᫨߫kmx 񐩇lOmd^2d(K|$}5UK< cUYbۨq=$-5 @nhk#|J#y]iyc~DCmM}֦b,CIUmvkjakh|y JշQO kkVGy3ߧҚKn׏\ *颱&ѭ4WyFd3E6m9&&oZMnt>.J}aOanf}sNf U~sFOG&%HsXeLjѱxUm3B"s,ǝ~/} JYuћG{O*Se_c4/Ǯg|Pwn*{;r:d<x65E 8z©i Ġj%3{93RbDabkVɤj \\bڌ@H:J2KX@9KzTnb㒝!`'։':" [GpL9E_F6Cȝ1c~ءPs"}㏲䴞@1{+Zֺ'tkiK>j R餪AhίT%r p'>7,hW8yÝeC=3WŊG뿤F"-rdwCn6(?b~Cҷz5j6xEEŃ4zݑ>ăB.(C{ +=gyk5~v::nfѭ|~1x!30`DD" nRӯ= js_h,DvFbi "Ed *3Dv~RQerxŸ\(Z͚RҢ#hl(ӭ:V#Ό4[Nt%L_Xx\{ݙ#M4F H1;mƓ}y眅4bM ޔ%Z0n9 QƤj t=e"ӔYAym#zi1=j|o.;&߿A=6 ?4RM,46UJ<`- ?Ђ`Ɔd;L`;U%r\46CWy,1@'Fƌ}"g:8Uz s>0SX9k'cũPdkLT64V,&5/7+ ocn9e ! lm?==x t23D*3ݼT m +II 5TʅƣOLt-qK]aOQxFB:6j%LPv3 &q֔uv0; eB\t=g+τS,N7ZUu8U>Зq: ;'5˰_iSPlY9F`xqe~b4&d':!9ȒA$>]l03a&!7c nue_=zCH7,8矺~X|A6GWNRYh%H6-E?|vܨG{^A:|HqV,\㕰qaf ؤ^ RP,{"O̟wKReѯ)zآVs` פ,uIzBA4zRX%.u:3D-񹜦%TlFլ,O39ZES ͙#@@13N$12l`\5ךn0KzQ6[!DMD^K2>1 vyڵЊŀ9D%(|_@auJyD?C>&pyBQףjZR',398w'G4h'4MNUO!.eN|%C4؆pzڬtq^;M a!|Ǫ}6MMB+4bv.gP슧I|tpo:dw&ᘖP"6E#2g3;7:ג|#~rBb9L|$ɓX8RNg ߬E k#ѳ8x9E3zꉃqF0X96ZQid>r\:{B{ pInN+ X/@O;J߯_;>ﮫMҗ%;X)Q#z&^vVɿG3Q܇8ﳆ/K/-v~-o*nߚ @CKABs{z}ժ'˯?n>;bbc:Xo<ױt4Qg}FkIюH2(!4&6@oXp03,DĞmybkG~I;Vט^5gmC{VRD0mVH{O`Ppǣ3-qꦯ5VDpX%r>IR\>eP(]1L(ֺ#E<:`;KQK7S^P0xF[Ie6uL"X\?9YBa#ui#äC቗Yebb P=[Ԟ6jsi9"fcd\sp&$B80T$pVy߃ާذĄJ\JQbsRjzN3$>HCF<伪‡@)D\"0dhP:xYς'hԁ<gzvSDmxt+l-06EupQ5gЅod -Uߐ]P8\4ON>c)DZ `e5a+n2TyV{m>wQ47 ucjUy_ǘhyդCD.C=l,?˫{9 Bp!(yսnjm$=8:O`亮9o6YSA-~lrZ[{_sϓ_t7#a%mY `5Ѐc(bi2/8 h4ʙȘκd#yuh܁9 D_|:U( ӵQe[Q(hZݥV֧^h\cpɘ20 fImHN%pv6LjjΊNp0O ֟2;ƈL^P[3/?|i`Q 1;lޫUQ3Zo!=" zQD@c<YC,.-MiÝEqnz4L#ͪ{++aGdIKj%CuM_>ӑSSE;k |w[vC`jOeu u*;+[gQt ٪ͦq**kr+f\I3`m [|+=ܽV(Vˡ<]+0crQ>{WJ/?L`z^J8щMvHF {s!x>4AnС$/>xv4%ZP0rw{ /CG ^ :^2`Co .LY 'mpҺQR9nW}vg@aZ &~S+t>AznoZhho{a@8 )ִƏqdB9]*,t8skۢcԙ ^h+HA|f@aGb K'qJPbΉ.͌ZXX^9M//k0ׇ5.+-0PqVii X! ;EE6p:e/qicela ?!&mfh 2NY\trAQZ>cЕ~;*Hk1BIJ1kL3Hɗ9E Q6l!*">q>dcw0QH $FF%%^]a]3Cժp8[z07tMv-,]ǮdOk3Bf5/N!h>E=ݧ/4ʓ~J  īT|p%i;êJM+xJ]ERe93*̮RpL9)1=Zև~|]dˢ X/T{jD-t[(J|zw=J^ѢBړd z܏_,Tܮ cv37uM|A ?x;9O|fd +8t2 #`@m| |)Cj~a upU⿣o0 i@D [Ӏ8?_B:i;*V|\҇ =Ly{S/R On_9W~iؓM)IY0+s %U՟][dt_c2&^A]:6rx!ئpN+6iuܺcq0(WT ]S>6zl s%oG)D:G=EsLBh<{iuhW ZjKњ6qGNTPk0eCv C@K *x(i<3tUqWWXL! +w?gtX~}Iw/WVW? (Փ(w2c (ujR*f3k֯.8ږLsI g^ ]_9]Ҁ-.K6o|!{.;sD.Il8ba{Hn戩PS-wl n@ti!B* Ȗg3IЊge%5QP+f)ӗ=v# &-n 5cPaU SzT`5g@vh~CvIZWoq>T@>I^cw࿖]GvݽtL,,0dAgkf 3$Ic_Gq;cg=fuqխ } ͮE'v!lKsX=$8FLc)I< oG-u/ vAp/$Y\W[ 5I!ɀPcsdPiۋ ~Xʬc ~O/"S?&BI;:TwCHLJh4B*}?5sxK`ygN)/ruCJu?y}&; zn<}myFӦޮMi62.S'SVM!BԈb'L1GDϙf\ ҤhHap?Iz5j8*N*CjCI wh,xwvꍝ|(Dº#="6+ P:<u'*:!{;rd%uw9"M3M}.C[ұ"|N୧r^--&ZE{窣?4"EL=mh6}.ڛ$̓ۺz,xŋ7ܷ=bYųHbN9C@~:EX"PQ]kӫH&{eԀL `$\ y<Xf W(+! 2ЉBrNA *YYPSVB૎[ @ EuJ¹a\f\WO>gGJSY$L'M-S4.D)@Z/:c`y :"MZW0q\E}gW+~s d5d Fv/I(5 LeNRNc81X t9Ӆ!$ݛg;RljoG42*n tE_˵b!S\@ I -y:wK1mW1 At4^B?!$RQԫ%HW CwU`KQ]vUz;gʜ''FH7u5eI<&;kWWSWpz@Ws?ըlJHAuᠡ=C[Y%/*CWDų\9=w ŝ "3&w\hnW}O'-v1YI9s1 fp,|C{?KdH}>zdy2(h' FC7I,6k nB,7{(v{r@ 7(2NY,!=R=9֛2=ɤy!mJXQ>`P@o61:h(MTiam ,LL`1aIROf, QK)q\$f* ~E(aB0FoØ8F'Q!B A! Et xL{" ܍/AN)@BaK@UF]!=}m_aCWr5$lU,ZO_v{5ӐEri*nnN1TەNEx `UsnJL5Nrλ=H9A:'$%Wմ12REW/Lxyw'zFnqS(Wo:!yL^? _.6 0\ϟKr`Rnvp\4P3J~i q/Q9zS\Ѽ%ұfm,KygRI͆VFFI~q9fl8 .m:1xuLvL}] BddwL~5 ]n19׎[eڈ#KTpxv/b2R=g_#rlO _6'ȱ %$GtsB4m&]J2^69F b M7T p!WvjNh:}VKxzFX5: C7e>bbFIuy_y; -N#r<БnWc dpe"xT^}66F83XT4=e֔-Pix2aff:l%A!,Pmadߚbk3_!)c6YmBR̐iRi Ipfe0Vyc PXE  gۘaA {sҝ>aļ#3pr*dfY$!g G_8j55Vɂ 9ò@;/<%=Y{RF_~G:~:Bktɡ!!+F WV2m8N6+BnPWYg:N/+?|Fί[%dYgG+ef͎`l>z Ūtjgڬڷ*k2^ӵ@@ S P%7T@󉔘Lɖ?ўmKqʢ7poպSm.++Vi8o Fb2QAK ]gȃy Te)a8n@х5wlt ¶'Ss*'su_0R`:;af6A V/M;.#\hW nBr@NWBVԗW$&* U6/ d^M7y[3wj\SWcLѴEW(_m=)^+[emn$vfvyDVePv3PƖvׅte[n]"c7C29 n+AH,dԄ{?u@qUW@Y8N֢Hlˑ" POqN%8 eGUpP@ ~qST+Pܒ(m8۞&G n⹀0E\ uC ;aRw9lDGP$I+ 0F xА[}W}͕qSLo)/z&& Y*6ehAR b֑빇'AP^}oay0~A;A?X]]]]t(v!DӋ慠uO۱]" @(Gy2Y48ÀRUt%JR (r1uS(p*ڀ$Iil*ƠP iQ*`  JY}cof& :B91bH0X*4UO8T [E< Rm4Ҥ 0VϦa_Cf}.Z{i{y@"zN5Q!eM-cEKgu =9f4sgw]TSMV^(R oOx%}w'^|)ACSi=L)<ĞM2bM4 Qhe?$biIML$Sd! &MhDҟjyz&ѐhdɡ D'PFd zhh4@CC@@ 1BRH@IO4OOSM h4̦CCA @@@jh44F$ɓFcSi驣M4 &OS@iMƠ?Rd4hhiFdG`$D&M@M2iM'120S4TOi='"ii=O)'2=oTSGP =Cj4i@g?Q3#X# OLʼnKULK![*-a`EȖmb3sr[,[jdVԫVT! KR]~K ]+E  B)ZqኘT0,T `j.3­ۅ C Ca 1b4b,4h,  &iF$`c 601`|T0U݌hlDH6a1& ``h`X  aĘi# a07`h0 nZ0 ClV[[UlF1W<-ѕRRࡄX VU`XiU [:!lZAШxe[ 5W0]Ts wTdWH=7^IiE\vNIp0,Oի`U5U*ү}#|U^bGz S4.d|0<#=UawX,Zx _˫p K:`߇}$ wRNWx;k~;w#u}pI56ދ!;[`6÷JF |jd8J4VeemˠQԹ-B[RI5H$5*E 0S* < iGaw m!UG~X, "dq8i %J VhVZB %H>),*BP,$$Bpy^&1Yh7acJmXHP R'v0K#0@H0DA, Ahr%i_i乖 noփO b%C$04ItAs˨K,+,2s;;1 )$ bʄ*hҵ$K# @)h0uo$:ky}j'eFf¬ $bc; +00v LbŅX,T1-UZ q2ԭjFY7 R%X •=- İ,F1Š1#L b1F#a`b0 &#Kc01&01L F#F# &01h Ah00000000 fAUK), D*@ A"ǔѡBXBDC!*J |`y>RJ}L0c0ɌQ%)AfH X$Yaa^S`74ha:# )UP^KA}r$>X*.h,U_VF|HD@$I& bUޫlUY_yzҽxmO:@ҽpavC<hgmZ>m{>U# ҩl4̯=3j'aW]dRʟ~0K^ ʇ.0 PKmC%nYa[8.j08A L%ZWJxA΅X @d@ ܸsap84+E^vᡔ5T臞*U$IfXI#Bn7 yI*1}[zXa~;!Vؘ=@FXK%2b&Ix^Iy*<%b?p'wW/#P(yKDS$ '`CH< C: xAX9йVF;8',P_8<@t;ݓ' 6fc"AP-0^ xhwxߺjv!th| ؼ`aχñf?A8vJ fkߡh7y!CjAۆ=waD>|0R{C59Wn6;qjBs!BN'M:h8ʗ @=oC\4D0xz!.xb@y߅އ*:e yxIt ``E-R5a|!y˴>/I܃I ZzRV͓ 1eChъVV2JEbpNd2nU̫ @(c*ʝ]0x%]TWC]GzL"/|<&^ظAK!.b$ $HC90jXRKƑu\<^p|dR~U^zXtz}5xW]X {YeMUf3&_4U.']K9`:vnrbU/^VKx/eh17R%Kj*܏>8ڧsUzaoJ=4Y&+$kvxt*W2wC䜅ȒkuAuӎsGYy wd: NEur^lE_QiF# 6}2}QnVKSH9ΗsU(;*0fYD 9YBH%ASݔ)?'Am@$@8Gpt F@a2A@D, &zܧfiDdƠ @D&qcWW' ) ʮi`i,W]Z ^E?P<-:ʵV釪#ԫ 0% %m|C``{H˒YCH;OC!~P0, 2qLgmWU:Йe .TGJ/)H/ޅAvAzЭ#?Eԏ;''H|!{0viH$WZ9>*熁QL0ճPe6K,I(L !,&1CÕ8T Ad, Vh] $-p{0j }B(fXc>4- [ ZOuUȽnCy%S$I-V0^V 1L`1CZYy%W./+t#C*>ymh1H!xګ%O~;$}nC6CdCRPڡʄ۰`^x Abt A1Zn[NӇ}0[pC<7`td4A\=PACq 5p ~i RQ ody6 bN@M3d|DTVG\9u(E~Uzt/:%[-. XhL Doq(1eE,2K%SnRNm9sF i0䑰I0]$B LXm@L((p¢;0ᒨ4'F˼懝|ھ/2TF(“0& {1=Ǵ:k!Ы yC*ڮ WR̺ V 3a.ӡK ӣo敼޹׹|l߮p| XlC@ ں,`5ֲw϶އ2aCaPt)pРsi4RIRIZ7'2N).Z t}gx  C(Cim:kЏP; [pXq$)'$_!a;봀;PI4ZT$u Ǒ +h&'\LНjKI? 1Z}uAه/RMBTcv Л1dBl'iz (d %Y`/W P? |p0{/ue]tk/5\ <2dՅc6I>ē$W}N(CT2 BbI>쪒}XTa 2I@*I1J|a65|uh8x#Ns'0M;C*?兀qr4>I?lVpFQ(UW:MOn'B0?uRJ&# > 0^}%pmB띑 /ޯdK7G 웆W%8GjW;Jshs#> ZqpF"jjGXuCv:6_?:VQWܑs*dn  ^YB̒̓@\P]G5~^G#4]껚&1ǮCD%c$= %ZO C,N.O@rtSe_FUvaawhL$0.a#"Wm^J>x:#zU#z0.dmp6֐ f=zITH( \*=GsTpU窸jUW5w'=\WݫV5]à>O ^{DZ\䍗url3ҭ=$H6FH\sW#p\9~"UwCU_:hmWp?nUpU˕_t9Uy>XV=0yGjJzWUrs:W\;*5^ H>} `^T8ҵl6^F&amjIYd l>C$ ?MT^P^;J{ xr}0YVJW$;uOQeI(<4A~rAy.`7AQlF|a84ph4peUwF_09vG>\{AG#L40`cޏ9x18 ܋Ƿho5 ,4>`o86X6Z:am0!#p6"шnL,J$J2`jP 00b`LbXcLU%Ub&VcKLFJ )*XUX0[4*DDl |a$bak0`i[nbhe `4Х,) $T 0H,(zb< \A- #($r3X%)` I 'â8Cϓθ$D f`!Z$N Gq`iTY`* Q_ 5!7 OLw£ 0;tMEr3)5 S{H+0# I0XVUeXA*`N!&I ?G5ã c\ _h`f(Fw" 1PTTC, z 7OLj0b)a%3wmA"A!{O$VKX$s >S)#5iZH0[1E\ST zs$̕-1%1HB AD@'ޯΙf!!H$|Pda< ~T@(io_p;"BhB1+CxtfHbiSȧPRP#BjyOP d E$b$-$b9UL=s--6G@շ(Ԅ+  2 ")#&$ kcBAOwd8 E)Dqh Ϲ"NsqD'DADKdnnw"+YLaa:45R+l$k=0xt+Fم=%eS6::;BAXЀX08ȆHc 6A\4tײYlpLse=if )5XQ6 𲍲 4FM=Z{ eOZH凉#c;FV Hk= +j(H|>8[*"zi)VR  }Q3_PXf, f3솃DNRIͪ }uVPUU9CEU`?iQ߫cbDS U*d&,u[jJ{WBླ9\X_/q>$ b0Lx=; NTػdσ=pBRU䨠 (( ((nOc(9N&1n&k0* *M, g@4`lzM0c v VԵZX9MͩV:7F11DG&R]QUUUUUzK7:H+Av4h~#!w`d@HoS)+)q_<.^2RhQo h ,V$oJט~LI+ PK.NtN\Вyr֊J;PM뙲gDqq\v1{/LZ<|d}҄w3{ΘaǗnTŻz*N &[n~<#0M:M$+GGA+iJo߈])'8$C:WX 79҆1S:[:8f)K)B4Z-ȲB3̨Cv\E6$7aQzH'nͯs9_Ѩwo/MzA#]=w7}vv#fmp[F9JV8MvWe{}SjSwWNæ;zݫqH!]}=2ٝ1ճSӦtg6p|lih8 v)޴nwNzeM֝1)^oo#qNezوy9s8:N=;:;t]}T鶜x_wq2<6mL_4B%.z[}oqt{Õntc~r|1-5JmXNs߱륫{e]p7Ww,;6MsF1ЄE>rS<_ݻTTr=V A\Rc"WR[esP3)J::~߁] 5\\rdd_.+w+W}Ys⩄tI\&>sG"zV{鎄BHM0 #Uei42SZ<7epC͊ǣ&}tm0jH_aAPanbcfc&Ч Tb~!qLA{g?~)nOYO,r`d!$G{+:DAzʽK;ovc]NkZyIgb#CXP B0"M9wYM%n7i֯8e|"|A~[8Bݡ$W~O<^\$m2NkCyUw,6qRv7?5<WqEUȌ b_Jo7r 5dH X v蕚OeC`wGmYMq :du8 Kc=nj ?OpnPyng*315S& (|JE,b\B1nZE UHȼvlИ:xGqg8MۖDjPHI7>r{4ǹ uS}}]3k$mŹ`ΐHGig/|s;^xajح b\gG[6k }9z{Syէ{΍w=Qyձx3yWOy6n=o(HkA6I"%Xȥ{8 +_WbeoQ|=j3{ٳNn)S*WϼOtH`f`LJda0 *O?0vm4 fKXGv;RÆ(!*o!ϓQV-D2dba4jҕdĬXd ~fn. 2Q2!F[S_YZx|j@kLiRQIM,ʙy9@Qǒᑽb͖Z[[Q{.%׋+ J6F2+ >VcXYrzKcIdtۤ<|٩aЕ绀,]{j{F (̹GG;2~< S313jٔ3 #{Y(V*BuBW8)!Nuشa|xP$i= @vhbbѦA1Zh)BM);)&- G*REAEײ N}>#m^u9:^/ 5ņ}v .ё/5$Kq^'z, BvͷԱ Zei^P-Mbh7@{31R``Ul@׼P[ ^Gڏv"  Jz0lќ:6B 쐚ȁyx] Ť~rwʞY׏wzZ}4g!e.#vȪxQW4bek^\E 8ꃁ=7.qɣh,R8:{< đNp0C8lѾ†4Ӈz݃RBRPjXE"LI RTJ lb;c;VI!Bni{ZzTQ7 H3YdND #D]^H_A[I.T[Ҥ 61U~w7賂jom)MZ)E"D˛lJUFJiq% ߽=_nP=yR5y_.٣a3o 1" IA`Suh[Gw8DHLaTW?+^ҮZ$m;+lJޒVW0୩R*D&RPp̫fd`(a\F.HhPҁ@d0tH er P`] VTTK6IFdHͥjIJf2+ zRloOce'$yr/\gekE1Y|QX8D/̊ `& 3f6)6-1{"+46ڊC +Ru&0.%y0+¤%|ebT/h{LАZby;'A𭀉AD.bT2xiG3yQSiT˫,9IDU: e\.kuߟ|ucw~OG,b$0 - H ,D*s³%FQN.ąt,ŝƈ7[=~I QK$ܶ:}y.|QzF`qDJTKT1#jvG6V02#}lC[G!_3}N~f  LѸ{#[ alUUUUUUUUUUD@ ooOw6p4q xYeS7K)%=G:.t/08Shb.Q 6Z{mmqoCwh0f4x)L:}It\p!"(SCE ` hH|bUYU*JR@*(bj314zfy͏նt`7DbH@j}p 4ws^8o;6X`Fы?Rr3T{9pq&Ub!1/8p>WJ _RrpۆQt{yBHs9 7gC(KԼAc |ǀow|RAe)I!{8t?"L FjJ].Os朦AB4~P O_p/v<C?:{`=rAfшb(pQe^j3*ב.,b}ŋ}my;pLkff0,ax%X xbe9R-$L B'ROI&9y4;sMc~s`+tq׷x{ʉHI̗>XtOutnGQ$ 9<Q$@ CI\J@/^QyZgCs4:wnyUrLF\y!a$)ycmx!Cc(xNFGX`vߗ`P ύ~|Uz]=G1k#t쓯\dVS+|);[ ',T6_;^' $7`0 J ^(g78l[`=ffVYWZ~Jw|N a, *ʰ ̉&0@)2*T%ER1H % MERAy<^pQ%Jy;J =rDdr P҄{8"|hb`A߼z=)=ΗݞϞ:>^*zbD9M)y*%;Y[;EE^.:.e]To1z #]O1(*"(Su|׫UTUPzaqx^멡xi]C*LVWɧ^P}}-}j}B؏-./ao  +@1VQV bu_/8U-mWo 'Ƞ2gck.> \6d<2g4a=.+<2׵$d] LA㴟5/hyv^GQv$$ʔRp$$i(V5դ y 匩5lN ]g# &=>;x"}_K.1|zRE;{2﷢PٱfY p8x($ȫZ-{aeO(8H^ϛslB4Y;;&3Hc22LYk\az1P˓$fuNk3]KAҩR2R- tݿ%WRK2t$tCf)]*XrUw~'5djٻVO3vQחO ,tE/{ڢ?!8d0T+~8_]%)q{oxhQSؑilB.&{|ji ,-i```oU_*l[i0Ա2\)lԫؾnTR31G q@Cgl$AinWe;sjo")JγFD}Y1Q"䑤s q\l3Ō[)-XnK8Pd7h7hX0ȉdVem\ZbePrI%:2{( & q*fk -‘D` ҁpA4P4(N@pL*휅`2)%-)0ΤHLQS(aZq lIMZ( Eb]$ސ4!epf:YI!ұ#th!RmGVueREƓLIB ]T` !i6ϡSҬBm2&;/59e"HW>8<eNѕO)[ ;Ol.t)E+}a_4vCnb9sDid,Kfv*U$, a_ *A@%4RzY:D}~;;R_6<{|S,%;bPa3A$(G<zxq6ٟIx^>XP-aF /B9,(:EA^` ŀFbr8Tnp30CLB;ʘLcn>P0BE$g4al:Rpg304MXJ2k8/!la!jWwl].0m|=y+Wju_'XUIEl:c؞c}<=JOK弶xFRk"w>给<lXESt M?WK'9Wq~/|gZwc9s:Ma6DrT$荵$D41҆tԱ]Tf?&xXzت)JIBuV5Xg5Z3z.շM1Eo,J+*B-G9Ӝ,ēX}~~=/U.t 'kG]i(YLfwb%%">PɯˡiIq(*JEo+t[}ߞ7fLa<&Ί~ҫI7teL];?± &il^TPSf$%RP֖ؿ ʤ`(IG~fM(Hs+ DݒO|NNwʪwO=sRkPoʊ͞s,GX.ŢRNp/=, ט7zm +9Cd(WS:#o^˥oev\))N3:?;1 9)\K*f%rE2{AQe 1j_KP8(P;0C4C ub᮶K4'@t(߯Տ! rlІ-d*+g笢0r %z8Z#iWUz^h, 9.E], 99grS`0懿 - ؅Aslj8pxp1<4 ] a-?U_d4YULXX Pb@ӅtQ0| ֲ3TI5aI&B$KMaiBt : 8jVu@ߺPȫz­?0?i *дX,-? )"KD:0VXQI7rNB.E=vrz}g(?=-ZJYRp;\} ajCk  hbP[z9,pwV0؅7h,ӫT݁y؎-g`V`-cX aׇ x !d=w h2<烲|/`낁p:BX \ @bRpj~Oj?;Fcb3ϒi h% ASKiψy+ħM:%RYlˢx7IDc:[_3m=uUrcU̻ŏl;ggs&=CQ7T5l,Jy,Ӏk;,ХqD(Qؔ1N),?EҝpXX;HMQQ]ro & 1VTTdjСٗh5.)"ڦ2I(p)isAq@ڙ+%,ǹle`\tv::rȥ<^KI_& VhY 1U(c;⢌52ߛ{bTjMq;ECRiGD[*8TJ_Jb{(Z1V[{[NꚂ`/0z r;o)Ja5R@G(^ XYZTV&m5l mKnyrrCQo5. 構س ..i-sٱ< {9⮬_sBy:[ o=Csj;Exa}06U1CC2~KQMٓ_9QhT694Fut$eјG]?[>!SͫXMD;a;pݰN=csLޔhU tmi$R*HLө:߶؛WHN\%j3S^a'LO u 7yNkg4)*5#7xaf sO% 몛s~;llM,-q\X-11Ⱥv*!T҄djiӣ]G]tmUTQU6TPKQJ$pGan\vG[EC¬L#ZyZQDD|z?77I?M-kZilM4M4M= 4M4M4fiiific=6lMgffyy34M4M//ooݟRE\Ƹ$|c*֪N਩\&deз%e'܊dK#c!lU?B'H|;bအVͣ ֖PdCVE& J!%PE"% ϡ~WKᰟRG&A2 kW#&M?RQm<'&vOO]^){ܓd|m=Ie=ݾomv0yqLPVIJ7?ٟo}2+k i 23aXUN0j sFlbhj+f @,iܘe6I ZF%5 &"IF[050Ŷa"hPr($mUcjP(Y6 dPv;MpS* PiR 6MoT/ j`Aid4ɦCFF%w_1а8~#j st onf+)_y+ $h?baU;~?w]>>oߠUy1Ӽpyڷ^Tyjtc>kr 7p>;gzoI;'_Ae>no?A8PEιgN^{Jui:l_?meWD='IͽgSh*=ypE=V\57}'zn6kܣY-oV 3 ,$[6HR%-F2eXIX01`FʶLKLXʞ~L$݊GUnEd) Iڔ_{^] _O v!:~{JW+(:NͣȤu]XfjGkFtQ*V"yH裧ƍUr#ԑԭȎ9Q#XZGQU~ G^JWd"k yVwZ̳ +FhQ?A\paxנ^3#jFRuWYjvק?hssT:tQ%8#ɟ ״cԾ%+;E:N%xn/vԾKףɆ#\(}c> _$}˸ծ=9ztFMK;޶ ^P,)YQ-#nwN]ׯIZD{: `Ǝ5#_5o_}<4ti7<ǯ_Ӥ|wi H^ ol0[c#n'^ۅs@(O޳{Zt~hs1'H!$hL0^%>Pk>W|&_IϤJyjWؘJ | YtX%k.Hwki7jؘL&M,iVU`j#JXI;hLQ2oF7b7CAfk+KH֦(F3V5լFeL&Ujɡ3H6GG ofTCX6ʢO8٢mF51t˝+d۔Y~ɒ~!-͵{z2bEJ]W3%zC{ݥlJOj{[÷;L74uTFR5Jk-T57|vfUE-B#u{C_ebW_e ݂.UZ Rx ygW_}rWI\̻:tPUw%q(}WqGɾe9m ++a-\]8$|&'oﲲW {6f}1ER0K$v7@ELd4p&#rbz"Dr@BWnݸ MDm" sJ:$|GYO? np?6a?{Ol}+~tjlJžF__#=#EuxNNU_1G{zo>d_E쳭l=?y@2Bte^|/jQzҕŒ "w f EF #332~o\ەnvץz<_< ei8]ٚYej - xۅuO%P>gNĜH4bڴ#rLъ ~)'h&˹F:takߊЩ9/\M"GkW|Au}׫黗kTb=]Z{AѶco36?>[I@"fh @Jx .pe[_x3 JBk@4۵g@4x;]k ցh>;=ۺ=޸, ".iU"X퐏p=YVh}߉Q~Ʈ꣭Y#k2M\_d¨.3b-hG`K{0k#X'xI @$ЭS|| Q\j,+T9S"ՕWCMdBR9A$C}/y }gX7^b }G{X??enǎ^`#R yO]V;5\2YCF#T-dtLF'.+I L $$ s6  X2V%wP, }?jߪ5?#򺿼>wQn{q7nֻ82мfB%ZЯuϜxnef{=s6eO嘸,9GY]3&Zq9q?yog{G ؗs$%WD$l oWW>!zI_+$Wwzwsx\ү.0 kPjDZTg @EuKݭ*h]DS, cb@(MHЗ#i_\r= o4_?#':pEe+q_p:R<MŇհ;Н3Ś=̫}F .{CXքGhMecc\ddbB{f֤XDsowC\SWq#7?lpu}??Rj8,9>hn570pEoy;s4{wQsuN.Ƕ2×+vk;mD(0DcDˉ훛ߑ,_O?5D$yrٵhBMI*8 0 $<)^ _ Hί<4E4E$E!bn 0%}+d:kaWl̬#;r\O߽-νiś>`G+~u[? \xOgCs:nn ~ntl'}-`Gqrؿt,uq>om~Ӡq}1ս!='Yd @2/+hGIq#c1;mVmh`" I전If\r:||\@o 0;`8mk~mNqh@r50t, YAQ<A@qqÌ33AyQY;/ƇƾM?\-8/MB/.]ג?4hxG"t&`|=e[ ax`~ppa@z,LׄIغO*Uv̤i#R,b50FvR71kLen58Ua6?BbOʴ̼aۣ?(׸|.َ|2kmfWkpWzܙV-OX;ZM4տ&F}`⇴ن7Dta>Y6$s0p/ޟp-L40~)F\Lܟv8`۝L0z~xڟʔd)UQjfmmU>""!DDDD5ӛ;6Ȱb2ʦ&RBVJJa"ʩ YadS UHYA`YC|U)wIXB4M41:\F+ejIYe2)IĵQFeG&Z& qalX6f Myh[)(SmN۳f]42f ̆2a!#[ebF l݄WNV>(m?2cLU۬9*[f' !d,:h 2uળ)#2YfRJRMn&RYMFMM0VSNM0XՕYac&Lp2bqfe֝xׁcZ3IhɌ.kh4%+rܲ]nqZ ˮj讋tnm5u' G][nuʍj0sX̟6+J_mxm2Xd3j.8bɌ1J32Z[MnܯR\鴳82Na͵Zʹܒ魛Vޥ>cvJ-m,2qCe5PHRQ9?ȴ6 JқkKyCX%ډ¹Xvkyjh5#Zָ֨\,ҒF7#lMm}cj-e3[1݄fPceGpL)wJ*)}WSW?^਻gYk> |Bxs|8 WZF ^d4DLi|C%%|JYIV eM;->nmE[W0DZJԢd]zs'w ?'\ϣ2(.%wo~G>1ŘIDQEDQEDQEDQEEDQEDQEFYu~MLf^I.{a)J]ח++ݱv7c+/ ᱍZ0QLXf8p谚a!E[5Pm?->t= &̺>WV30齨>~JΧ7 8N:B٢f2fzBX)X#(Cndrw)xVT0(ʕx (Ħ,JeCVD[2XLFC ى%# 0J+)PndQldٵVa,2Zd,bXҦIY,a&fYaցh!Qm,1@)jYn)YG/g6QY[mfYdDDDDE.[\ DV.:)J ˃!iK,bVeaVihŴJP̊P kcT"вhPeb EUIFoHģ`(!k%2^E,BWK&)zbdG2' XWBRCJ P> ]ZעҬ;u˛"7 ݕ QS"mGjߩ t0嬐UCg߽W>fCؕ?Ogj<8yˍP SmUTJy+re0RģUa5)JkJҭaaV ĬJĬJV2AY}VS| 2Y"5RIPQlY2c!,Ɍ D`7QVUfTie d5*Y -*Vb"Q,U4jhcXjF0IkQ 5bb֒ # k2h*VJ%S2ZZ`BF1%+aF!Ze)+Bqm-L18kUNW h -fS̋E%5Jʢj.eb@6V%%kܬ*yY!f12fC 檗6TMfF-(JFQJ,ŔfY.L+yk`Sf01#/H=BZjq8a0L86o=\N>`)\ “Ǩ|lo ľeq3Z3Viayu1mf4yfYa#lGm;"c`JbEw&.^dW=ދ:zcWcdGjĸ2֣lur=첷cw19+:Y# h@-ݬ;4i8joڣ'6؞KN6Ug()0q^DfJKRB]c|/A.*_%v2+1;u/:^ϔ}V*L1s/ַ*u3;1ekMAw:]®llU:vuUS]xUwvH]X'*űVwۼkؼ%\W}cLKU[{'Z%K]/M2CzxKK~"URu=UںxމWZݳU8N̫*༽˶읬#z.ҮzuKwEឃWyW, 0Y1xU;E;]d'A:zKMw2*1[Sqx]znwʱڻͦ wwHnpy/'R +#Q CQc%] 6A8ŴE}+jس`JųJaVѱcUit^ا{3 "rOO?x>.G]L}Ȅ¯Gl8B>YlzJ_.)FUDYֳ}) jxgL^.^Ӽ,w!y^?s,9EC<p8Rf4{Ew$ߒcU# :>3>RTs*!V_d^K7V9exa=p`xZzĶvn. K'wϟIw;'eGuw Q%kCi(nIʹ6b¨a V$#Ż^߸J9e'>З~zϷ=%)שݦn7Mfyi u7p`fM6Y$`'yOpW> yKײG#dz*pu )5iXeqcէIƱ%ؖ)h~yt6i6\\'%.θV{.oY#<\1}7QDZ-Z);)*Za'Vʻ_Q%t{# dǶ*^ _oYGÁW b,L+ XW#a/Q|<#S3v {:n֬,Xc'tǯo%wv m9S*ޣijN5UCẫ-%[Wkt}j}ؗ@NU rWhxJ1\LWhj߭[QejbcSl4ڮ3JF=y~oXt9bbs [T뗯M[&C缪+bP𾡱A&R4_~%zCQqaTL&cW_Z{ ^zgR轫,a7X117V&[-7ќNG&p0#&eً \tniٱgUes^<-.nx6]i_*^fC2:gv7qqqn 5gsNl9*c pJYFeffdd1FdEp6ao7L]UvW}=xH{%HPKMM(Ub*GN4FHd̫0233!皺6mJ[`bW4NilˑJ*0Wd'~±ƕ6S($NӖKX)021頓"NT$d{֘hfV -Ϝ{/|Њ[*UuӲ;Ϲ~}\xa)=ܦNw@FX#{Y`k֧|r:'|~=77Cgs7-ӡ`FTe1biRrr=b#)_[ aB|^TG+pu|"iJIƞɌfcߊU}.dp%T[?R]eyT:=9;ڥq\BG.ᆳD\ #^2c"$2NJec9Qx< R Y_mhwHxNj]+GpiKO  nƺԴ+J8lPKc,/aо ƶ6֔YIRfef(zTc+)㵊\<.d[1W').1%C{,>О]uIy[W^&zP'A.,.eQ[2)P⡩w*biKUl&Lpu_Qco|W3xiUd c4Nĸ-d TӸ- mv 9ʲ2LR}YUu;Th$p%b[<qK$2¡ı`W6#Wa;B߲NEHC QQzM./Ѵn'2ҁR)W]ݧx҇YCW2KaPRl<cTJ/MK J꧜غ]TjqC 6(xؕ¥З[S^ReT7ȓKZ%1 ]K1zɓK-U1𔺏Rm:i{p][v5p۱vWR ~ %]/mQ΂O'XʓyU=zs}Ō8*;aF#j%nUnn 0` 0`L0` 0`ZmMdԕIRXe.2榤i Jbzþx40_'݁aOKM:/\M䖨$ؠ>~jKÿJ䥛3$Җ˂M<˭⓰]d8Tc֮XDƄIС(vfP|g̞NqGBW݈kedeW \Yj[ZFԌF#=KeCg>zFTeFeiimYCucUcMa l[|&xppLi4=Cf·Cw..1W:W%[qtE'k:y8K'šY#FHČqA9*\T.pe\^.&G8 p<%ڷ0c FZS-iZZ|},ޜS0OqRu<&xilsr<%t@:eWCwE<w6I]|*wb\9-x1}h`X̾3gԾmn};F>*#p/yǫg}JgczUmp!WO'k0-vUu]yE<ʫ.R]r(R^x:l{Ui^.u ˷̑wW]E?{{)G%z t@%Q/P/h0'`KX1',lA3,j8ݫ2]VͷړmFksb8Fչ)]8Q*ڹ[q3+03Jd-ƙ,lf3`mf-XYb2pdl6бYb)1o֭&ZqocMXE#Y eon lݨ9QX֭8.-o7,cm!Ufkbkm"ֵk.M"lTҷ16UM&R̮V1eL1aMYaЍYe[իTՍkeE{!3tr0AXJ)h%\X3{h577&) [bT"a`bUic6֕TjU(UdLT1Y"݅p [RTȡNB XJC~eheebDId)UM-VHfHJUD+ XJŖa,͒ee ,KX2ʵX2eMFU"Re1L6( $ibb†RɈ,A""9_vJ!1n'w>W{?Yg1.i4\*%v("J6T\Nr'C^΢b*0rv3fZȋ\x~_]sxj2Bw7׉k9-n6틅E[ԷܴX5LiIE9?\JߴNbV#E1!k  zÑU<+M5f,>c;B`Z1FGV}/'Gu7υy7=wOg:9gzBi)VܜyYI<=;'s˷=l DW&_%lkRDb?9&1e51-imV?9iE Ti?,F~H珘 E5?ΘҘmnp6hcQߑ"1a>@YHEY'~awX˵F"#"H^>Ôqu/l6(>wgyl!O!R˩oBߌx47|l`|ty3^<("] T0"> #؄z#)H.Êpqߣb1Nk^1䜎s7%c>;KiM6p1lvvN7j.%q=3i]V{xm̀153`Td~Tu/O49{V+D{G)+߼^]`8wɻA|\_TFHxjFHXeܯ%$-4I^_iHGwKjGގ&Yrl o{ç5`ڕlwLx8{HKfo>##) D@&HNJq m6FkAj226Ln Y^8s1|GF3E9:c#g3aI a߇9t?1/vY/yaUJY+}Fڡ /(zQBST;A^uB ]fYo'-OtN44GcBuゲXUil@ nc<j͈/`q2__z~>[s*ny.jﱱc6>{mr_CHZdN3d4ߔ@솨8 xsO_ ӤōWlϼ*+XA 0rzFQCtmi6Kc)Hbp#N18@.O&,;{qV*0&ad~o,ORdUi˒k+msR2'yDωIVYّbfō Bl 0uo~(Λ,ɳgO]oHINڮV3:\]c7A8mdb18BH@ HyD9t~:N%& ZC8Q00ra@.H~ wȾF:nȈ^/sOȟ+FyWR@|˥ZyʊYe @S;+Uڅh8S(Um\hSNM SGC?YA"ИµUV,t*^)p881Fe5F˩qJsˌIƵsAm0g<~CdA= ??<5=s؄a̟;7)C}63QQb@C@B܊AHypG*9>7pprs~:5#"i,@|7Ԭyʣ#Q8*7>^hyn񘥰K{? ϙά4"?^&14yn-(7 3?U5HddB`@:hu0QCcs80lyЏ_|g_k ,^G̏E`?0ݐ@B2_%7&pd7}ޔ0^액LVM1h-q<# &xfa@)q}&~@V~TJb m?Y\TT4+ k}9\JFAn Y/C6W[-፠\S?xamZNc C' ~b#/@Å93a&hI۔BB lMef~ |9k4%[+H @W⭡@mPh`αȜxeB3z +Oi!t'vSBȞU)1%"+ihQT)`fSӈV:QAH5I1ʐ)E:yNH3uoWvLϚGYڐbɥ{<$ ek)g^~Cᆬ6PZ+T!KK*|#VaNTͿ+m*,!]b y HG:aov/7ZSQ@hl&ɲlK%d6MekM4Vԥ)JR)JS0qqȱbŊ1c1`c #1 jh`d0)pLԨ7nݻvׯ^!B!R4QΚhni%kZ$Hp[ &jE3&wZb97}zEEp.nu3/m3̅v*6>_|%bwVZY|x'B =22 : >u~o梗'wʋ8eU~+uky'4t|5GE/AG}CegNw1n/r,0>P2myœ(9 ᤻H%SDpJ|9Jm#ފzWBJiPɹ%6R0#aK L#0G##=rT8wz"GB‡E i>0#` 2#*[dfUbfơ ѓ&20FƔKbF*EZj6֨sp7gUm2a)C 8!3(Hof9ק(lTꃦ\ 1d 0F!NRQO)؎L0UV$U+NaU)d}9(a#Ĕ,#]m#d+ܢ$_J 2 FL(C$>D4Ch*IlPnFpc[HiLC(aA0 4FHl. 2(X֊Rw[iML[#F2aaXH#7#0) Nj)P8އG B8R(G] t#jޕ?g>WPFs%<|>yOG0#*8F).G#ŸGA:XB3P#ކaKp# FW~x:otC)dPTK1#K*m[SUmkt^ fff6z  Q)F1c(RTa(e"Pߩ%ȨHpp48\qոq- 7mZf,"0چkrʕ\vDg!1a_tE.489ʡ0pR$d(=Z'&[M9\8&ris\+f|!썭amb1ӎۆ\02]D#,jC?m چ+i &A3E7-FLc2fC2dr5i$u8k6CZak7iV+tAbTPlЮ,}f>莊i kT5Pֹqqf0FFnHHFMmӑ(2 q$=&DXITpZ`#$h,bU @j#QIJ jJ9/Cf}B=0ѵK32bf3 `EFDQEDQEDQEDQlQEDQEDQ3 a#(p\1n=EmG gkޤ }d=ګ Zwօ!˗&72Ʊ1c#L#(aJu)n)[mqk-h1A7Ȗ0̬$p]ImUS]_c}9{^eǺ. y{^vafl^j&Da(SC CZ 芛urS8m^Ǎg@ Ob*h0Œ0ٙk[fQSFbA5*fAa$)(79|<2Da1fe<9T7(tW, 4 %Q&LET2&䲕6 "Kx]<㡁Cچq`b sm(hC*%⡪ U@CїG^aQȝ ;lA9Q#J[ʢ鰑jF4S2:aGKffAdc3TILK1`ĩXXJcL2%I2 #a#HTF)f,dRRR ȥ ̤&eQXU(e"T$Je02 `(FDC`FC1$&H g$TT-l#BuD0%U)֤#'%">8|9ޮ/r/rȇ!JRT4 !TʕVPK 0F030#20F<*l B,U JMvJ+E`Th5 -Fe hȚ%S!h5KA)U2!%L)0*ePe2 YMA 2U)J"*E65H1(dFS`EJ2CL@P1 $&6QDb,b,RFQJ#S |\œ$ `xAZ˂xޟg>ᢤGpzFXtX]Q+4U.VÛ Vj QC >竆3#c )MhHx7o$JOT1˚sĢM}聲9\#jˉhІA(P$@%񗾊A,\!hi1,j Q-I jF^ ` sg³f,(DU^izNFZɤ(K$&d,lV  =mF}G82!1Q-4g\IAGNp~&p0bJ}j Hۻִ4`& ĤsJ* K%I6̹TGV:g&,m2bwyO┏j.F^% I Czݝ;;.N&2#W*is澭Z]R=k]u\lƛ.kW9KڑߺsvzU7Vc0JۤAF+F+Z]A|o$?:F|1#RChGdT76/f^^ox>*>Tue> } QIJ6|Έއ/!qtv>=2^Ó^[R߹MlDo$;TRDAĐ$>?}3ߡ4~=%S1I t:pS\I^ ]^6\̻$sRYreer03,J)+0z=gCٞNKױCb9| fDlT}Wh]_y#pe=X2\Cq >u@xwU]ս;W +7\Wn3A'I ָBk6Ͽ=-(y0-e wsYPmw4:xnCjsM_}LZorE9PuvR}S,%0a)Ԫ~+T~Ep©ɗj҇ĩHI2CY!~/0q'\T]~]ݮR2?i!7kq\jfW]֯Q QtBf9DVͳ@Հ՛lٕie y:GԷp RClqFI>"viТ=#jCK oC/(]~(N15QE hCwU +n^w~ `-@ jY֣B PdTh/G#{2;˻|v[S^/w/"y>&}n nq㻻g=b!9w}z#2Fe 2ff(Y#$aTj3L{[\-hNwGns#]ISRtkM 鴣ު{v`#f"0331FHT&fffcI{j!VD8J Ct]+EWV\t\d࢓iQpO>ggFow?sϖV22R9uAX̙d~t3AH?x#?~J|$a)?)p#:#0G2>`rn4Ĕw+ '3Y4F#)4T^&q^Fï1G0dfLak l-˹[Mw.՜3#ͦ~r ]RF$j* A&F_翢FtM]Xى{ݗ;+Ym_gW'S=kQq48$ϱcA2~((ֵH"""""&$I$I$I$I&:I2Ce6JJ,)5ף"u1i1r!)\yycǤ*U pR)JR)JZֵ k#8㮺ZֵOg4vyOP9~1@y]ѡͲa e 7RoȻJ_%XT=.P@MDeӢҖdnrz>ȣ8x؛qDPDxufܱ`?s$lA?CٳMW=d< a{`e5 jE G&@$( @4jDMiҨ 0f(.КPW6P ;֔(3l6ӓP(+ Fm=`un*( SGxSM=Cڦ44yM6h@yF44lJBhO#O" 6iM @ =Mh2@h 2hA@ @h@hh  zhM0 FjB O"z1O=2eOI4@h4h  RD 4 zbSɚ~4OHCF5@M@44ѠB\HɘQ+HdRFU R) ;"#+s:1zE^c$#6e;ԆIzw*n\&!QCTT]HsؤۊCT^E!8RWFWRhhKxÙf7WuDʋ L!z $<T= M!!y/,<TG.qCHiERBJ[R bHl*E5jm dŕfkZMcU0Uͭ\8SSRjQ64k-QlQ5JQ3!K2PfU]1jVɉ6 !\ l4 OorqoEOVlG |c15>t|T*8|}[UC*0F!IA/Rl<8!dIQDbbCG4E Y(,<0KT1 He 0Ći!tx42ߤ756Rqۨf* P-$ ed!Cd0>2(Y$[6JXm՛n VUr\ZqRV eUZk "Ua*IWdp6 RPU'T<Rs<AG%:r~=.D\hr.IT=QWp)8nvbCHw u۪]\R WP%JPP\ M!;(qJw";,CCĐR" 8Hx;:CJJN(mR PNIRQi 1!hrGb HvwR%Ԇgf1@edUY:D-&,` z['[%;Tw= ʐԆYCC%'ɡzI/CHpHu!Pͨz=ZD$=Sy(bIZCziۅN ‡](l2] $< a!TNąu09T-iBʳV$ƱliBT[ćRCt7=*ڇW(P=CԐNEH}>nw^f)Fy|v;҇!H{oCCC*v\rHl:R)6CCCd5ed%tGʣUBCI C'e $6\d^¤'02tRU00002V!IF*K |(h XXB%Czipc[PiLC0FUeV1 BƐ ¡C&Ę5(1wM0bc2/Zx3zɖhc $)0BT1 8'U ԠE&a!\ecKY2gkZYcdf1i7`Ǿkn43 fN:mqM7e3!0ՖؓUXS5VAяPU}&h:tUe ĦUd<9wVL0e165FKTҩ8ǓZ ceWoDdXxm!ɜk,Mml5Hޥ-U__jFnR[5FcmjI V1ޭ;;|ב҇GI5֚Zi icBCFHh1#Fm~|ƶcQՈ̪U^jRz,eToIX_:^U;o<[ݷwcOؕuJBRfWFKZoҐU`ʑni ~ݎM7pf$<t'7˧}o,RbY΃ <{e\q(QUe 1u% *I+ǩk ]A*=QWVaUYuHEc$(on֋ Ԟ,eEO*~@]Nd _5SgnԢrR͞DP|wiWbrRsusݮKKu%U.RGQe8iRTy~kyI}b|2w*`*k% |_~,tnVn7+7MxnH MI$RT{Dw6V@6J"dTIK[?}UK>h[Re_H\7 Ij}nAn{XFC[ˡ-K9'558'V ʔۺwwBΝZ\ ._?Bu|MapˁEg2WK2&[qMmH!.,c ڽKG }*iQ\)f:wSAR{,2R{2*R#~suO:C%L^e/دqpTnn#%Sdy?ec2ޑvz[etXbꆪ߆U2s ̱osxQJ;SUZfNKP{YV6ٳ+L,]{0F7-J5',vjk ]$ ] \)uȪ{xFvdyV0l ~K]"/*Fb})xNUX62viKu-$G9h-bWpʃ+* #_;vtmZ[kw/;.^};Apnw~8ݖ/pK߭ӿ'>]i@qޗWo^#*Q:XГe}_Iۥ)d^_g1\re d^X!gJ_Rq2֠L"VBؖ^&eIBSVe-jKkrc1KMxK.tI:ey[ Z^T/,({a5m9ؕ~Kp\E†p:%%1j&u7t뾑&3ynۢ]~KlE\7g,Ldݶĸ-2CIk-V[{tDYR`Pe ]]ԮBKrU[!dL }-ԲK26,Ap9> N|MIJi.?%GTMБ͔$[l/vNh=XH^#$83]F!!bIcXFU`ҡY#PecXi75c6ۺ͛>{Lk ף[tr߆\-&#\]E)u.|C Ut, Ë[WobKa eVp5XTnGQtx8i;K dl5ʏb)n{W^TlvߗO=_~ wsAnRUTpyR^ ys6rR^_}"^8J'\ }HETO^dz;<f݄%..slw~2}CV>1T~3`Kt)0_2{t}VhŜ*d9rrLX&14i66ɦfTkfHmffrP.7XաN lȮA\00aWeN-T0Pq(&ѲFlʒޠ2CVRU&"XD$3 :Y"mnEK IK)J3*Lb TV!e!Q1!$1!o̳*5 Umf` bjbbȣVUeJFLWARqCs{!Q?PRE e#JHpnH~Ɔ ćB2{R׽_uNsүǿ&5UPM /x{܋v ӽtDE!nK[KkS9#pc0`$LH :?yĄ.҉O]K :s01yT`9lP-Jo|o[W0{88m"]8WkF_vsU')R􈗴(Kr?)> PĆ$6Hq!bC(rHsJM*RqH{iIP졄1“D4(e !EX%'!JLC>3? : 6_wQ%'P> oA|KCw=?__}~'~@?Λzj"Kd҆i,+1`: =N|o>|N~غ}Hȓ@!@Q `&j\k5Y!$l\r׿gεjj-,"Wjzŷy>'#8 |#_7T/|:y9eڬ_ʗ=0w7Cׯλ~0s#L3F;W D](78%QvpUCPuAX̳FqG3=wQ-k*S?1n{\?h})>*R}B\(~f~oC9pHr$7HbCCI2ޠe1kPUwe.ᕺu,vNrЁ~ې? @*D>WN#D D[f{  1$YHZ ʅi+L]h ,ɇ3NAH h"#Vn Bǵ Ssq!ÇG99$S++v}1d2ot1[eV9=C//B*Q޾ZkKU )+KEL&G˿#K7B9v,iWtuBu?_Zh:(y2# Ofn`TAPT`(@6r͊Ƅ!B!B%eYe߲ff)JR)JRDDDDDDDDDDDDDDDDDDDDDFfffffffffffffffffh"IԒ*JiiiiǨ.뮺R 3pYZg,T}@ "!@@0F2+@"NmJ(Q!((u@+h QЊJ$~j~Jjzj%S!M6122Fhz4dh4h4i  h P4 ij =U)ڞ hhhMT)Dz1 A#@ɦ 4@44FiC@dLM2 2iiMSTjh4i0)dMmN3%~يȆ!2xNR<`leRF W~RpHm!B:%$2!!TѦ)'A-6)1!PC)' l !w$6Cd6Hj!\hFV*z!,e36XHS/b I*C%C=&R(%tybz\WEBPʪ̷<ْf!桑 Y)1C0$09!H{ izIx3 \/DCM$: MT[RTY)>C$2!CLVLYVaw5HU-\qMMIFAmEF*F- RZƫTm;Wƛfgɩ[&& ,RCG@l8!Ӟg1SCs Ac?L CjX:0p:ppc 2FPdF!U5Ee&IC raC)&D2R#)&AV$2-`ʢʀ3(dĆi!pe+*I̪ʬ hȃa"1Y0=Y$6CC)hbAj-*Tňݻ*BC 611+ZV6N!Pȇ!dJy&$V`$zCv%CH`U$87$5CK.W@94H"&"^u!I܇p~Cΐ=JjP<+!!; O6J UEڇ?U$(z꓍ ! =ZmI>oCCC<}9)GmԆ$;w$2 E6bA21XbF"$n؉3L` z['c! ;•T7 )>}d=) *e2CȆ$=ć࡞d2*|<{hn;$1v!ƆԨ;CƐiTU/YUY`:MhT3*$MM bC!̎hn(v!TiIrH}d=1LJ Hixh÷AwJT<|r!gU]!P{t7C_$AS$9!iIbP}pEuTG(rQ#А)=iI;PۂChEL9;qHn 1 )eeVʯU- L!VX4Mh L`i!(dh,jHmCsMX1 k*Q6b7қ6 #KX`0d_q,1 ćC\4!%'J!!OD!8CCҐ) ;$m9)7HmUFQ|Ć"QJC9ORϡn)<4H|ԆRT*CT<!5!CyEĪ8ye'usHwbx%w æ64 #2)&sZ)rMQmi fXkYc[ }lLVdXm!-f2g-Z}pcxn4 R7* ~flFpoW=1oQ8<58T|NHkMA4ӓƪ LCFHhވHFLkf5x# , "ҪR{51H*7IYQR#IYF *V(\9FE +}bNQ%" \/(|(`V@12lڥkk"(آ("("("(""("("rmKlݩUϽD3Y =]yk0w7/\!e#Hxz b ^oڊb,>g,Qx UgI* T 1j(GH>3 孀`SNo49QT66[.kqVՠjlq!bBBsl 7xz}ffmJ4 W9Q¦QUEmNζv55yb5k9gbİ7lVbkU&5Yo9܄tkیA%S}wx{5pֻCiv T?q/>5BǍ*b\n&GìG|%e:w,G˿]#Ÿ)ijEro)xnf, s,44Z9[Ukb Jmi/Jh,җ߽-˭ Etk+˱A^'g* bĕ7B+XA CMKʅ=jK̿ZkZ^[ϗyTOSEO g}teBr4ΎqzBfg_ꗰ;|UOd/fR< }nNs}9Km\-IU h7J5*J8)P& C6}_w]qǓ=b_~IDhK$E u< R6-[c4za:?aXz{KaʱކndޢoS7!,I(6#K0¶Kr9#8vTQ9Fc$MGD}}PRK~ \7 8ebZhM3Mm&קx`UxcuB̴ 泦TM;I :ʫgm}eM&at9rWKɅ-KYb=ƈC3%8 *MZyX6O>bu Rp">YĞ-7Rio)=0UCr'nvZ0Kl%|Y={#/#꒮ SfY.k\?PoӫO U+Łc*gɤKfY唏u3r]詷 ϕhګjf6eiekvѶ3o*e&p::FjЂ[mrV*1:.-w V9.vxR";fviKu-Ud[Gm/9Y5ɛr9X $96WTIb@@_޾:Gqx/䖗gTW5#po߇;ݜ|<{Q.=5*T0P0b2 c,2i ֱok ZvS^/Cd슞$ZBzV^|(x5CHe !(a!eFH':eVUck3L`ͶV縵V Y`\|VARr»AwYUkj&:ZRʔNҿ`T"H YgΥF| {WUj;^O )/aAK:wY{\Sy뺠멀 U`z^"{[3)Oe=ށmOdS h)jvb۹ݱo>2ܗD.fU};p>g>]zqWz}B$e 2U :8챉9Ws=8'ͶkZp6eUd?0Zz,tb5,Umw^7┆VԱ!<.c7ttx:MU} I!i 0\hr Sgt\ w2g͇ zL0Y&w yRZ2!j)Lwo=ϿbWטd^T};cڒmmqƲvnb\!"-KYR}2T:ީDnm*Z,vq8nX#b 1D#vϿ¥8Q$DW}_:7͈I㟱ΠZLBۧxTKb#TE_gMsO1KѪOxu/K躓)W^uKupDT{QR<}5K5,߰.u= tϲKO"ԥ"z︍Odd8eejb~jj\3)uE,v˳ tmuSjNyBR}KI]{Nu(i{S|r>9sz?7"ΪmU5Vj 0` 0` 2` 0`nsk6&ks٤\> qvs,D:/TQ_]uU艷%M!G)Tk"} >2\,JUTQULq9KSu ̳1:}e.wU\:yEMQ:> fv$ &Qu^e~Ϥ3/;g40QkpC5(i!2P–504,#HecXiHޫ{Y[3mŪ-f'g<IFdDDA( F-4]Ҙ'Sq!|1S(9 1AK?S/^T[ҪKi-jAKjD]<|ZqJӖxSA $k8rRn͒,v|-}/u/wf>h}~}3Iz=R:33*$8 >p> eU `H/Kѧ ԢzSM'*3P䔌)MsǶmKRRx8wx>7-אrH+Nn r S㭊rQl%6\fTٌZ}.KԢ0,b%E3͑(&$i̢1 FU+ʋ&E&b weL `67 TJk %I"dIbR+2̅Y)2C(dCF! Y! (2X[e6eXfٙVEKKU#&(%HɅ 褜r!ϞPQP Oꋅ C(bΩ'z$?CP"$`Tu7.寫n|5A3AH/D5괁=!@K~ky&# ,wYY6lB lxP6wgC/ʑ 1gR=o [d`;-1CQ\%WiZ494>z<~ ~Ӄ>^/eՈ̍eyfe·'I?FO WIT%ZTKޡ1! P$2PC9$9m)7HuRNCO11C(vM(j5CeEOoCjC>'?p/uHdC6?܍R~ Rd;yd :~lފgAxƬDNewxM$F9Tƀ}Jah{s.S1p@ *.kpι:jRQ0HA4SȨ!`D58ou맚P4ngkXl]ЊH3i?Hd_KG +D>vۥvw*J*U]Ȃ? acSg>m?ѽgȩaΤޣ΢1ڥځB.\Plp78%l@h~1t(`TN:AX3kqG?ݜ{.ZTR>?=䇸P“IBHn*~&$91!!ސ~ }|W튚CXؽ)5*Ƿ CLC<~` '/; o3~ =y=ݠm-x^-%@U5/ ݌;LpR0sѫZݻBQ;&I MM2 eST@7 !vZ;1 #mkm`4Z1[NLLc(=n~9=>i~TDdFcUT֦ZF>zҿ~)Xu=ݪrkwڋSp PWk$L1IM ckh48uQ$o? ws)F) B) 1F#!HRR&ugSkZֵkZbQE*TRkI$I$@I$I$I$I$ $I$I$I$LuSL+sͩ3g=h!vUbnȕ&Nd͚4A<ǏA>L%˖fLcR)JR)c5.53#8L.Y|ccy4DO>sDDb) 63 f3̣~}*OE/L*)rjUFK ]Jr])@ #׈f:yE- &$@FAtPhxu羃^+t Y߫؇UYYp/8pO7sjEZ/.(+$k0:E_DhOfPPlƟM:4FF}]ZPO6D 4JP!2CF@ V#@gq k)$D =M=JiR!)TSiPi6A =@hڞA@ Py@$Ј6QiMh4A 4@hP  $U$B T4ME?J<=7zM=CFa(Ꞑ=M4PJH@!@S&Fžʏ=$FP=OL҇z4za4i~C&eWdRFVCTC(b*O ȏ"l&j6-մޒC!*T"@~RpHm!e lĆURIhIѨPRbC) *Nzٵ P mCdHnA5U£̪cH;SU :eWbVbf$e<Iz7. $TxDhzCT.qH{/oRz%+ԑ*M pRxf 6dٔpifzb`͂H}ʸ 5U`qU\[8 EO^hcj zAH8\6pA!P1EGKM'6ɆI TH`T"H`P$  C%&$3I ;Ln)^CsSl'30 CT ha !ʠ  023#eUəf#`*eW#\qjKHX4jҵW)Z*e!C٤=7(jʆEQRqJ:(mCHo)9$=jEʓHyhy(|ԇˡTvPHbCrCC!٪!̠11F221dV)@fU.)lidM%Rul@'zK(pHdt=?M!zt=  *[סHxbCHyu;g!ߔJ(y({T7w!8P FP롤!C$:J֐UXH0g%m)51U1!ԐG:ԇenzJU$;<ғ^ICڡ=T\}dtt*L;4ryaLJ3C;mCJCIIPH{T7ߡ!;,CCHs!bd8m8$6C\] |#(rb#Hq)=2ňr졷UE*LRs /w%\V*F&#U,0U`=mbLA75 4t4ibUe ʬ $0b!! 5eTș5Q6b7ԛ6 #KX`0d_Zm֖LECh`1!HpĆ N~!֊NC!MOCHqqqoCT!*$>rCuUC4"tʨU?o=Ԇ { %^ZsT>J% ғCU$2y$=4HyHp%N4QC);|ORu=Y!ڐiP!Hi+$4v$;(v{;JNN8PhoIi!C4;(j');PۊЖ.S*#'&3l LYbo p5o f䥵AYxR3vڕr٪3kV'1Xǂ@Rv>GJ&֚CZi i5'Ze f5G,ĪpٗC4]:gwlFK3vxu=h*3U LNL!/#Nj/JG$z s$qQq5,E܊ʫhbtBe\*$Iaa^FJST"jaN-4JĤ$IЊXfjtًTI c 6h 5S5  "A@l 4^N6 CLP\,4RY {Jڠr}Ls7qCL+ U'c6]qN5YHty,p@I"jDl֙Z!;S0^C!@2Mt6fNECJ3] ۯ[2YdrX]nۂ?%TFNOԇ|ߩȮDPT€%SnΞ}Zp\Uz0DUsʶ[`[~7g^"^/B%).^b\TK uK~eҾm7^]b]yfVKKmqt#W|5;޻똎]w!B:R=L| x ԻL#8~Uou/^% q37i- "< ܲ;ʎW2z<Ӊq.b5Y`zW]pxDxuBUaK<"ϣ0Ak cw~TG?/;RRR I2;;3/nqr'"yI6~V/X$g*M8 ^|)s {_?stb/%u;'0`*Ш, )CYdOd]sW₼x~#Y=O~~xNSn>=rjPJ(e 1$b(XFQ{\PZ#oxt/'{%;)m=jЮНTWdR!b!b(8Ts&*T@p&0 hVoJޢVe'0MV%MiiK"wg`ޕ,ŘfwDg|dzy;_A쒹ۈCtI|sRNrUa.ЂG*ltʬX`b_ip}q'۲bygzz^—t񔛜%9cvH8}e.e/Q.5@r%_.ޓȧ>Zճnt.>\RTҞ2*Þnq'Q6*>>@)tJ^.gK7;˲]COE{yuiC:TOIq} >YZ juû.ćLj-Rr?kk?-WgvTsԗtWGȯ]G: mTYj 0d 0` 0`Ƀ 0` չ-֌X3KmYshIKN Ͼ4|c~mI|J^:aziIU;6H;ږ6J9hAZ^DWww.IbKV눮w9g؄*Ud UhTw䉗"ZmqUq¸j#1}?_N2FLfT(hFbWup],Z+gI8<&`mc!+5"ŘdM*rR*u:3R:\ ]=nEAn \Wg7vٙȂ$`Eͩ [ZW\nl9g󟩶g/;'4jͯC,ׁQi!e C),kʬT;VHC1iNi&t̞D`q[z(Ow.5ٲ\9PrtNnt_a&X\"i݅)26 x5oVFaő«MdÜrrL102fk,`+LTP[,esW `opEJJ7(M&i#i*hP*0e IAN죡&FDZȉb)55 dFeIAb*“$1 02$1d$2-YeFlʱ6ͳ2dTɑFD*+**FL*̩8!_P Ϣ^`SiVaVPOt=ć H~qC!І~o}z\ު}S?o:}ʕl<=% o#~E/-Y_)\7gČ^ޱ8"crg2s|ڏS ֪? SlRa -2p3!uff>cp {88UOVӈ̍eyfeO߂?:h'bpq_~ PĆ$6Hq(b!e !5)7HtNI!z̡CbD4(e P"'! &Pćkӟ?d/Ok2Pڟv׹P`еfNhy{ Il}f P+cx%lO$0io0< ɲ%[\Ё2@+QxRdP0hI2] ;'% ,i2#@!+hݻwMx]T:i.ݻݺCT_X|W,ieͦUD嫖*Cy6{h~/ސt|g{9J:W(_kJHz zJ/aU%yLzbx(W߹^V*AeL3F;W D]npKU]-U|%r9 0jh2k`umtg~5ҤְEڥyWzϵW)nPI'O %‡ʡHsCHp!Ȑ!I $8OgrbUBVJ|D(_vf-`϶@Ah4ֿ ؁j$,M7tvg`<Ǡ=͠mqS['쁼/*%k^Y:*:aC6u1b8ř0Uf8䨼Hې0h0""ap' g@ -+:z:80h|ĻD' &&1|s^h_X}e_DbF-ij#AVTX10k2aoېl5/uvu$&K{|$oQ[]xQ)zx_QU!>lyR>O>O>OXDDe:ֵkZֵkiU   UT(2UUUUUUUUUUUUUUUUbD}ߗJU J, {m8q&L'Ef5keU@x̊cB xQzP*BBŷ$.l.H[Eǘ!stxⅾm\b#uujtCFB{ S,ϚW`ⅴ7`/*_;9iI[bSʘ,KIMUYEVP-Jd QL*a-[VՂȌ"1L !adY) K¦¦0T*ٕ<3}| L[ e`| /}JUY冪ǫԲ,(\|,RΞ %]).0_BVԄ3AeSƂ,\5k[,h*!`Ej6k"d!ajDR,7+BwkVsuVj6lffffcbٛ3KV+2S$H[ cqZ6 l JRV*)LdĂW* RF4HHמg} 7>0eS'FpZŐ'?dFu+)Azuic8qё-v![%дN8i VJTȡ9Q _RȼīZ,,z J)XWtH1R 0bPB˜B^:FB)b'%z_iWTqʥ%LS!Sm,`F5J9% 2V)dd̰֫f4㩥ZJ)AB%a*MF KDE `Q,-M!eCB VP[KTU6(&7fj2We:"0, FB%+ S(0#*bdV $&21HȒ&e-*4KFJp4ldL,(eV-j", ` a%IeqpS53S53S53S53S53SdeFYde3mwO0BE,Ui 2XdDM$paT0 F!bC3eL&be TjdS))̊RK%&(c1*dqLPZJLb,EV0̔,daTʙSdʅ, dYPX,S$ʘŖ `X1L S EZ%ԊTD͢k[6*C)% $%d02VJbH F0FUEqFTMTZe I.XKƒU"ȫRMV4إ0AY#*$UMP`:gEXCJ:1S\332LS+Mi)"8Z֛Y mҨ6UlUe$a1fV,@LYdŋ$ʙe "‚d2 VV*2`Y0;*,H0 LU2#"eQS bQ*ubX,ᄂJ U&X%ffT )cT&J ba*XL,%XUR1 ȥaFR M@] X,P ̨c }0mP\FUeM[(̬e[VvfaS*eUYR2L"dQo1hrB j> kl\5ѽh cFoh-Z Ye_ekѣa ʛXno}_tC:z>k{hs3b[, TSQFF5rj,5e++(hlj%L X&a)mboLc*bVLzha{ &_¦VXeCjW uG,pE\VEV1de& #aՒ-X) a6 Ktрn`ՆXeXƦLdLdMM (ajH4`Tʓ֢jcS11GM-prݢ8deX2V%tWm͌pYeV XX1K*Tʧ8ܦJqVÍŔZ#!#`XF6\ltvەv]$;#j/0zÈhX1ʣJ4B^FIEc[d@ILA'ŕzM>8FEIT1`EY 3`Xf!bD\$ة|PJF 8 -@e-K&5H´VFQ@ĚaZRFa-*TS   V,&TXV 4i-)A L Y%!ba`K2ER4Kq†0ĭP68i! jZhm6.2T" TBe t+%#P6 zQeFY ZF%p}0Z|U(3&&'RTQL.0H4$G9^a\dn쥳ϱܿap{-,,F@ņ_-h,YbXŖ,Yy1\7꾑O gw(%bH덏 hOνM+,cE~M_mGx=!菩h_ ZG2.v}770Eh-j-VVX4M%{7hv;{?Q<g}s&|x;ψBl 6Ӎ: 9 0ccsF4hѣF4hѣGiwksp0-ǂ7_nrrcM6s:e2^?gvvߺW'dh49LǯWFf?7\6pFNNzL I8R0ŋssͯ1][G'6mS.nm%G8\]19<fPwx>-Fbގx #uR9 `q5Z˓rʼaey^1_QOtݦ35f/hn1h&.q\I0sôu*ﶼՏ #2nCջ+ܔ=[Iv.r>aX]KW_QV}v+&/DwycLl6yo6.윲&KtF#}wouae..6y֍0vW# KՅ`b=:l8؛}cg3hel^K u XX0ؙ#VïWC .7awؖsa`pXhhYbuOp6+uJq#6Yb 8newQq.Ӆol#B}SsbF+_ :eV2+ ah2LS),ܵ+W+\j| |F_@t: t].Kun\u8A>ڮ㰑pU^Rsy/IQyWyWrt:.K^jիԼY3-fUdX[))XXKQ9W[^;F&I^ky5ךxo̽Nw'rq+X]: dwet=yl4tq@!:0aո{/m|={K+{na8;87"OR} ѧmwY+.<^wsr^{_1y4/ϩe?S|KeKse< wBS=B WqXɕ1y&6cjAW$ݤ>qTptG?ieK 4eila>IG^\yvrL_z-tfyt7~se=aG` me aajѨi=a萵 ezDbju/j\j*}al238K ‘FP#iCVa ~l!vV`FAk9!^9M͒ [-,0ԂRt +*Џ?t~≺)"&0*aom!#!W2R4ohq-2VUƥ#<8#R99Jh+l}ۄO~,Y#0,^V^Ml<l:^yj e\ZWes i]/eh߽޾K8~#~_vNꗄ.'iz\Kc G4me缉+̎FJKڟB1 *z, NvlUVdZ^cr; F+uI&a(FKF*aMFz1BFgu,|C%Vo=64ݕiSUhU>C-cƵK.-[ \itr4GTwt4bFh[X|AUeEu6`Lܺ%BkU_åŒ*w$5^;[PV?s΅4Fgo?a˵ RGԯT|Ӕ^ y}=Cd`Cß3>HHCIw }dNS)ÉjjR} =#oS+>}.o p̺ab?/'cj~_}UT1eW˜X'ZnɩɓS 1-9JI\ɌgEWC(há5W.Cu"9&Uaڧkz2'?ߺ#e#(w۽@8DyB?DO d22"x$~0H>|:W?v7p0 ,>+o}oiU-_Ta2N%v>c>j9 %^ᾑopˋk{Z.u:ڝѓNWFuQ:uv'Q̜)~s)<p-ae^hREÙ1ʚ66f='\Bhv9rnon[չnoV幽[ۖqnޜQ|PK- OIĪ;1~oST_'P% , %RVKUeCFF,L---V;NaX (?(N"J]m`^FR}El[, 9^.l(s&j Zv Dj]q.eJnoEu\Kz1#rQ8\. CihɥcMsuκ)q\Ws] 9w;r+ 9 /!|*pGResn6X` 8vŇb e1Rh'v;3u=ӪPW6D'!ę`bŖX.qarJTʜʟW+B=*+s s hb0n]WINU{؏vܻv I2WKf11ZrC,WGExO%EIW{ca! kRHYKXP҇)T(Gt!uxaX1y2'2`8K>AqHseMt]#؊qA6bͪmHcZ&I&##ܤ`ʆO"Fw.CC#e1Um({(r\XS{޾0$1ԮTRݓ`]Wl};bJ%㨿 e㤏&VG25SCx)‘TtoFz%4VKB똫 ':ГR>q/lʲCKѩO#^XlhGj6Zxц62aڲm,e6VVMMSVV,,=/m*paPlF$<#ڊrbٹߎ,TnL-4a)-Uѕ0JiW2>U{Z.٨ֶF?ok^R8pnawk|g-#,!hq _o1b1b2Ȭ.6y\>YTJԏ*:\cM%Pe=]!ҪjPg]b?r[fØPǗ%- T{^HW'acH_oۊ⦝1Zd`aeb<ʑH1}6ɴ~TjTvT Αi? iiydd\оklnGTpK}戱O.rCxHSsCmRp Fmp7?t8<$ď%C%O;Z5`*ʏ?Q[N9_ĵĥk=T x8oMS$6xёZ5r*ptZT#q}ЖbY`ʙeFe Jic%'PGn;Osx+RU;׸n?aYQC)W,`ؒ^(ޫb'JZEdY ,Qe, 3:+0U]Wem-[5>p`\ߔmֱj<4hop> 2I0b\FCHW/%(p]ԍt] 6I{9 㹱ٶa0Y)#*őZ$.pr1tF*. _ 0 YgIGT|:̸_!>g}+\G!1S$L& G#%C φ#d7~YY8RDQNк  VF EXb qoZWa|HcDe{S WǔtrXC_/b>[Y12dKN]/wGUGG5^*<猽aTzFRF#GS; khŖFGTj=إH~Jxobix `ȧ<"=:=[[V#K9%*SVJ0x\I뻧z3^ R>+be2n_%>8[Z6-4>ƫ>.#>ڧNIL赦fe5FVŒ6m1A|`SQ70}-5XXpJiJUO m[6#Ekjm܏Jx 1u":"XKG3\Q=ݧtP<C;{o"?|;ufT1݆U?2#Gٹґ6>EXǴo4w{iCަ]HpvY}$9U9$<^5R` ImSubL|du-Tj*Mگ*D0IXT5EVX\8Izޏ1zC; @,NV̔-%;ʡWjCtv)#')s:zp:EĸG)_ǘdȷ:^*hT_9oE *eácO3Ov#me șS_:$/ڶ,,Q2ҮźG=۰07g ַ+:q3ryNf# 1e(Ղnm_R;GT8"mpԪ"Tm. HZjĭK@25j7 I\9,2ŵ+Zg/Y&n-R*Sg]jEv-xUT8]N*F&2Ss"vi5y#*ʂJF-SCmd+B#ƉrvD䴍y8a*y2dvQr.iĤQac̆e)+beVNV“V)& X12EGU#g`ϐu뺧?:#RUvz/4)ÙNVh,)^;z_\yQX;T ~0|VT2)Cڥw^(^%%[:qW:0HrE ħ<]d%)vg!߳5<)#%?Rq,Ja5 '7.yٲieϞdy&Ű {bm2;-.E\p\B-#j6dWK`eh6+`ݩm,V16JGڢ~ޫ/x\5|9+Dy8^ue[52wW97+H!0Z| CUB۟ JmTh,,EvTdOJCc=+Xs; FӲV^etʅEY Sm40` 0` 0`%Zjml I̱YfCV [[[ͣQ[mo-EL4eS MTaWK+'h Jɢd5+&Ԭ&CRh Jɢa9bF2V1Y)6Qc "5JC '.cȩO(mSO[hoNV յQ]+xJN~pʽ`cx)#U>TڨmT-#_^0 0m'85JZ󨌩GKF–TQsaq}T=4dOO!s:HJަ&Q0~Bs熽3*U sN ]y9)M)QXjCBd7$47TP$1[pCN2V3%Zr,NZ5ӸUrd0s6<}.Fo'^fYXm59fpgGauamEQKemjL1,1V k33uœvf31:G;w.6ULՖe6ʖ5h[9F)>uh8YXaUr(v 疨u]QY4M-M7ީ`apIؓai&[[#eeN&h!ŊfYˑScUYYX1mEe& ĹFSa&2e-cC4Xw;eIiԯyC $m)* >šS)9y-sqPT]1a/MjLYd- deFYjYK]=3;oC 󄫍C*ʔOiC&*US2yJвۃhvL^+{['Fl5aK +KE CsFF0~4ha̹Hu\haZc?E}-s /qu+^Sc*iaӨ+IsNT5^Y2L|R*╥RƦջUSI ]#ߧȻ-e"CI- n7 8c.P~Xy BE/~}G_UYeX0,lPig̕`4c0<#\#2ZѬ58&akۅf`LcI&mk[& U+M3KZmfB-%ZKf-\iA .F)n. Vb̜\h0` 0\-0 bZV" Em)HYU,g)[46kC i$詐X,JD,«0&a^., jKbGnAC7K<Dw=AgI `=]&\ fYL}x)!#Yf+mr͚@c8kʋ4XrJqdK VI #&}k}ju㵮= CJIg $pT'6G2gT#!FGlW{t"8,aA e 力='=fo& A"9 ʂ%/iY촿z9(h\?m`~ q;@ $ߴ%]wMQп[+埗*LV>WRwXw}\?O@~oBTf)쥍]M#mpėKQ{})9tcLmHn? 5ɘ0_ 5M,Ap3>PM0ׄ@KW_{2rOoS52C)*C?Q(rB]"_˧yo]d.HXօ.W*̰Oe?4/H-PBMI mC;]<:yʝrB3[ɀC $OaHH„XBD] #ޞ_Dd5M$ln%XcUIfXi%QMQO Ĩqxc~Nfe6F"x.nӽ~BvڽUEtSpm0[$Pd2I&;~w;:֦3??br=SPdM(/V)4j~$/|yF=Gv\QVaxsA/AQr4ox%^i+]T-gys/Y}8G|E>3Pkxqmt3I;KkkkjE"HR-KRԵ-JE"Hݣb_V4rqqqqoEQD\hQEQEQEQEQES)QS[F} UСEFtfΧ'ބwwwwwwwwzwhdv:tӨPûРS.rM۝?ēEM4Noc9}e}~|a?n>e5Q-7z)UT@)V'Xx!M=4TC>wݞA]no{W{}Us{w>\_/|}@$/.V/o}ޯ{g{vދ15ޝӽ׼uwNrzǻE^m]vmŝu1 wak^-` -{w=*}@mP^zuaem]\97_{Tx;>n{ }Uxw<_|]U}wo}w>΋pc|>|/o[Rm:}۾r\6|:[ }7m2xJmmP+mTR-mZhiUB1hFwnRU՘ݍ7.lz;ddZenrk^㞽kowcb[os^:E۹/y᯳8WczۺSg V/w+>I}[ҧkv5ivy>;wumkUuֽ T{yoJKcsx{5u2k;\W+6/=}{x@;ク<>wn}y{˰h* nyw k>]|=}x>;۽vh}jfۇg0};p}ޕמּM>ko/>;%oWxz׶̷y{Hh uqyV̻v-(;w5eZ4룥={mls:릙(alȮMMڎX]w2onܔ;nNmCelյwۻ;nGuSwe˶fncNolimVя{x뙻skVZjrin]vl˶[vsr9u׵l]{w{n]=o\sݥ,3kw99uӗ\VuwYwm ڠCm2-hnuF4eb6ղ%YZ5*54m4ХYιve˳۽nv/nUo^n99u^ַ{uqvǻ.&$Z1{=vܬnwumn{˻ޗYvt;@=i4l6ImZM{`e A `oo'ZD=<zi] t$zz݀tP:h{%(4z}{t^U.ǻ mv{v-n][rc[o>yuDgU1`L`& 1<4ɀ&0d` 0&`& # 14dM4UL&4OL4ɀS0L!& biL&CP!h фɈ ! L&#F#LC#L M4@CFF B4 2 0iL&& #4h&b`D̆LMhbbd@iMI%c hZVCCZE`a0kXefeXfaffff3+30efcխijZ1kFY\maZff6iaIஞ@id6mwƚ77kl3MIƍ+O)nWRYή9N֌o8c-eg4c1h3353Mk5-d8ocm0ہfi0c3,ͱk3&q2fc6,mf l""!ZD5Hd Ah܂X(Z8 "@t)ْVͱchf3ZVe'@)A 8J %!*÷7"wq+٘9xG=_YR;-6iu7)q1y;m(s]\if0eL?fY7x3G#e^dz0s=;˴y ܻv-;m|k-r)^٬cC;]ìh0VckSʱYfdֱ4kV&a07n'}{?.{0nKKWdɗ xOS׫ةlrb*igɻ}fabN< 0m1YۃXFxLcTc1L 9y4j6,]cs338Z4/ VF[PX|b44020±10z0qm6bӸ~<';m4edc5il\ SDq,` e0`[D|̬4K S1hc 0rilY8Q!!}9'os={?oӭN8V;DcAwAaY˫3̸Vݾ߬}h:CuojΫn.յj:1+HE׍c:{Mw]år};,EV8ԊibafG [<͙)Aя9l_6ɥfNQ+Y2X/J3'ŢL& LM,MRaX(JzE鑜J" 1z)7AB4Pt['3n z=}Yso˱?}yW/2ƞvɂC҈9q'lHr l7Sx@ci6(< Y\b-ӵ dbcS^fyםj}A'z)OIx q dF$+@4UѦګ@ЊѪd s{zG3Ձ\$>C$fPYHp%`Ex%שݓxcGdlry[:/%"BU  &LA}4 T spAE1}/n8!JE%\ "8!Ȃ 09}i^UO>DDo;o|,]VOrs{N`|#nz[{ ˹ ~/G>}xp){>}2Y3?gn=187@ ,% _I C%H 5c1\8m7qmm3Մ @iJT$KBȀM"~~Թ:pݱ]gpoɶ»k'6(α0tfcWKq|ݫ}8·,/{/dqL/J\]wlX.,iNśxw~/^4}q5k3 {k89HִֶM{f3 {nXV$,0^{fsx)%8K)IRe%G242-آJjv⽍/dys9ObMuЖd:yoUv2,U`SʠzxtzyJ}^s/Hbb. aV0qP L0H|SDg9CLMmd58lUD5?1aUs$Z ËDcM*2d-͌,*!d QDkG즰ʱʙLSصMS⏄3<8owhZt>L[p^U~zʺ¯a+6W_n.˲.˲sw;sw;sw;sw;sw;*vvק{|{-;z t-= lqIu\W/۱eS͛=59,'{Q;N^-Cv.#FW^=N<w^/xĺNMU:뭏ؗmxyiwc0g551ox.hwwdWŕ޴dá(wNs;1N+|CǺ:ޡڧ0vۻmyC4lN'pܯ3\LyGWxwvu s3ji]\^y֮ '|'zVNv]qcj<˚1cd:/'f-ۼ#j0+w3Һ'8LU6p;7w].wsr6zp6;jm(~ttC+swl0v-nl3j.goSTca#xA鮁3u6.c81AbA ]vsaR,S!fbÆCH 9-4hs1KoXlXC}NPђ84>WUw,!]!b$w;S`1),1c02\(h`JBXX%A#+ LĬf!c*e*`?LE84q 5*w^j_Y](sbhucQ2eӃB,(#oT(ka:i=y|݋:ܕN~c6[" @s ?IGE)oZ@zdD~ 9>sm/jz, |́K?mT8MRݰ*76s^#a$N0eWax FD 7άݞZ\rBl/)~qI/;ɼܮyYP >w`x wZ2CJſXDDsL*x"rI vPguǦ,tuvv|(nqA!tunGnC>WRE> َ&NMŸ,!u"ص4Im?( sP5>?R9*`2Uŀjjz+w?Ϛf 4.E[M )nѭ^1 ʖ)"a]j >eb\ #3W)Ou^ue2BБ5":F2|M_L$<"wta}|6;u @*{ #@H)|5, ioBb^[t};%& @6~0!u ?;bcL;> 0>nl; @bk *a:o~0&x4g)Qh[+0G)pe_'t!f ʧ>_d@uP/v)P&@X1,v<:F)^2"q|u8=1Tb;]ܓ_q…$ }1sEDE !@UvPIn]8k+\Ė9%obws&b\}S%3(Ϩq6`^;- s8ɓBtrViezWFy*UujBn݌GA~J42"!iru>½] +pԟ?sa o_9Jh1V0±YK ?_c*j{A#3(}w?Ʈ]r~v/ğȣ<>sO񿱼oD9@*{U EU-2&Z,9 7U,ަZtskϲ  1VY+W *F?\`*}iylú_}-͡ ^]'qjjQN,2 u]щ{x_Z^j7N[F[Nͫ?㥌迂c9> e*a`K{X^~ E'+ 7 4XUMM~sJ ԾʬL;1sH|qp29n}Ph_>՘|o`%ýQvinkd*D_|6p5;Q>,jo| 7Et}޵~W{ʽ{ (VyE)cit@On!Mygla.nU \Uš^eoq%ܝڗؠ%2 dY33*)2bGz$XIZ# ʏ0Xk#  VƬJ2K a$b0)a, a2 0 ,Xc aaMRԗN΃cs<ŀ8F0P0 &6j@N qQguK.3g޺i2Ò?'0k&03:=,%SzB!{|Z#P^<U3Tz%/P% DW]߄v7uM)9k baRdz4!gNDn ,q*AkP0;U)ByO8_en*aB2+< 2.Y 2nLX{uMM_0'v15n -ˍ ]-0{5D4HCyW9R"P V9DB~ZP& z': &ON9"~> $Tn7 Z. 4 {A@Y0 5@jy3Oegut36ZE8 y}ԥ(BKv.n1 5%R*ݥ{oB;䳧)7nֳ 9˶7p8/gbCrFڡ(ftU[ᗲWEUbcaJ$\7.|t q8 [sc>M#= WSs`dq'ZkIy]bE i!!ЛtpU]ғ[v\D-F"`؇aGSd ADz3 N:#""e&ɐjnC&nJ.dvm&n;JhyCC"36dUVzo BM^8dMFZ פ.uJ*wx 7J$a=0zv xHW^rZ@Q5k[>Ajd?q7==#趜Xz9F0@00]ltK;4"<鬪iUG>TWv$S0^a\hǍ鑼>$q v+b }>ǺϹc+UG~zv pF@0fLRP 0#6&zkY:ǣ+<Ȣ_2 T/:( DTa=uQ\v+@7H[D#ڻ.4{.8whgνJ<Xm؇0L]vG5G!Aܡ`o\(\6d[?J/$@#d&K HJ\*[T鯥)]ͳ K~7_K(zJ}CG3J%\6m6匾~3ZA0N'Z^%]5r6_\L즍ut b3:7j+T!@=zfPzh._P,36+^M{mԀ*aFx[K0xޛ'V" PdSpB"v=orfI/z"%f,2̲L²,(̑UfEc%#0̠*|xE S/}N驛o[=Qkf= `xolfc3Ma+2dfʘL2Y{Zf&fafZafXRf3!R#JJ0XijdXı,{Wݓ2 |}~HߞmYptց'0lNMLo.'1{|&c+QK7lL'{NR+/|o~$G();<Ҍ,O|?j羟3NA0G{?ck&2_osMI?iWS6kU*YB; tt,u 'cޣ;8 Q@ fyz(l)>k~Uրa޺ih;]z/){R }XN%͞z4c$O%K`kHmKcZ5ɼvFHF_$s4(4M}:}dL`;@U#x f0B'%ɮdÀ p]>o\l7ks?Mzǭj#vU, Թ1佺H3+U:rfïjdUXqlpH0yL} p? )6ܪ c"z97ݻPa@N%(1āvTbi^y $[۹XXtwXžd3|d QB{PL,O_JQ> Q.u::AVD1^y۠ie?u޷xqS\&(g%@h?S]v-n"ڔsW?@ Z Z\uRܚ1hWk-h)m@oSWmm=e 5)XPDou,ƥ1`j9?@M秘)wO`EP yI5\&d7ҐZX<s+3Nݭ"]XODtPK1֦9Fkq!4,|rhv,:,HC \nZXa1%(f7is}Z:Y8 cOd&)2#0@"#&muE6ne>~lGicCxK M{(&prAH^<0VG>;=v"{ ZӴ mßBaLmW_1C;#ZK!)byxSYޤgTi@p,/xouj:G{0ȟ`{c#r̉b GoK1KBQ`QM+dcUHF}3)x3[IbVk^v?~y[\MV>&lhm/ݙ؋FfX/ܳi$Άߺ\(~FJ)z0i?*ca c @n@2Y*uo gQm 5d>'M/=U| )nA\&iiF!YVGomp#ثy hn)GeJ:xe]mBtcD7~o \)Tlm2M3v@ 7Z?{ s>xj*%EDΎNϾlRچCR& Ȇ/uq;Gn ]Mڳo{IɱDד j@ ~9p$mG$Yc3 t7On%j6ꕯf7lCL:iDFF-QZ0^UVO #^_iWߧ­L>* %1F 2l̈3)`Rjh:֬3UŊYʨ?ł{@T 5|*6_1gd_qVrzX,L{\R u}Фrjy@@n T ,y0=|zak ]Uk0pzy, )>;cS~!%ɳTbfYʟiIP39$N9hr@M̈Gl ِ "U4" ǏPZ0dvx`#2Ϣ:w e9aCѢP`G:7P65F@Yp{f;{ۮpxPa'Ɖ56; H½ׅ;Mq*Zi tB16 AV7@?):$1{5Dd.Z7 ZȘ@\#UG\CI֯]=CpfSmf$eyfUBS@)H]"# S{p~9rZ3x[o,ֹ" =e*0.1)]'/\H@'l'Zmş.U俬o{{=8o#LkoCAW.6α cD~㊦aPdJ0 Ex$7F?qƱH#F至0(S&힛dsb08x4i=wv,X}gb܌e@ `/t;>OnJh.H qb;]:o}E0EiD;"f/ypK>T-p5y2S|*w}91i;Ăh} d1Zp ">Z[I6T१ =u (I69S3<@ju69"'(i@;>ѿFZA4SsNБ d$f5ԶGT=K:`ڿ9{+NJ2$җSb}QFBV R8"HD" >.:I9fzEvPI%~a ={J5&kMɫ}nQ뭑qYϽ'!ʢ5CV̬t{ks 7l'!_MGkzV yd? +x|~u/.H, X 7I]ZTM{]K*jhY]FNUrF1>a? 9Ru2"QV.ܣ¼ k;Vp[d.?T儳4vCjpS%̙yo֜Zx=mwQ/Wo*]׋_^tY"vϕDfQdH ah;v8]Zԍ__ۦ% ("wrq7#M3D^&kR41ټ}GǡdzK4W3ۙ#áu1ZYV0 16P+j>jp!> JD)W?W|=pgGWgkr0_9ISz)\NyڗО`{P^T'~u vi1ex$].S+>hszb[2wxwoI+D{|rB4԰CU .w0z%:Q; I0RHe{`<-c)UN9q~K ύawOW~# p{ʂdFԴ#=?;w>?P5D2sW ޕD;sD)aH  V$ow}n-_e&E7 {;)b;{h:}$(iqS <5 5YLJ?kzo ޔn]dtߦfKB/9p  dA㧊&&F(-CIK1+YY[4ҳZ7M dbbj7MB7M&&&F'F' +V&+++V̢)Zm- Uԍ[փ5Z%b[Bj7O2:S alB0吂iŞyo[-ql،9} " B` xDZ=KClٍ4?[1Ngcbs'%qM1V5db-4؇jCdV1 dhƣugM$FcLLUO,ErN2c++UYYYed5FDq&3VֵMb5JlZJ1LudbdbeD1q1A7LMޘ1S^ JX&} doS&d18V2+ri6L%dbu#j4 9QHLjU°X eX, L2#_EB6ʌU5b: ֖M1X͚ڶQem[VVVVVWVFVFVFV17Y%77ahXc,ep[VUKUKUKUKUŪŕBI 0M&7N4(U4)[ƣ+ 4XŖ,ҋF,+t YYYYYDY&VT3"f+,%sʝYYYY\ jՕZKs8k++VՍkkdYC)N5Ʋjjpy4Ro[lL 2&nXqMV,eR`4zh?|nLj1::1v_Ɨ3uWHccNg;n8;&69ÓgӠŹc  Ƙ$ acrpv\f4Ƙn N dT1b/*lQp6WSU}^[A `R@%뫜YzeZ4~u`I!{6gdo%"gݧ}vQMxR6X4d^y'팋PALhU.U"{v9t<΁=@It,`G{ŭzud”eJ@])5JQ mSTZHXAlJA`r=L|y'{'{&[2?z =0@TՖOQgx<;>Wz3KfzSS#H5Z/Lw7aS@Ot¾顗zPp]4R/BGƆ"stԲO-r>o>&\DuLZ+<_*#խi$3%IvafFd{8DxM;q$%[ BpHVAn-PfGzA`4cYt/g١J3MX\d(Z$> **I Bu˼udV} V8%ϕewr&ڧ2Aԛ%P!Qi8BAy ꞱTe3Q t}GOn!5@)S [6n>~1n̵"m1? E#_|v**1 .wf2RX8=GLزM'rڜV ڴNJF;3lɻ-D48#-y8uf@l2s~zU f6`B"+6RI2?{2N0g?|H* ճz9_|~;mf`MHtS1sUVg l|4W $Nym?Y;(ᝡ* iuN񬔮dS)Om>yyB]kV_p|@0!s3P>֣ W 5"Zx+b;h|_rT- 4h:T=1=/\X]7EEjYiTT-HD1Y\ʿfD8sa4wO]RREwI_Ő9 s"n>P#QkU_@ SV>d Ӣ!sqZsV,Xws!jedut;>" AOf =.I=T*Ĺ ~s `B$% Za}~d80ҲJ=fwy%L;mػ~ty'kOjN^ԛx|ftnFOlHm, dӗ_=4Kr  T 9O z#B·':£ 7Dlդ@IIl!_Ӫ^+ZCԛg[QSH?*3}6/ K=_Hze`;{3>VRoEd$d@ʱbf&`X3YA>.X7f4ƫb LXi+L,XS2&&&&T"1fC+VFY2c!XLAXʼnh3YФ+]` $h5Qh |TQ?ю{Z@]2'@(XX |nVsLlGHDчh5nMFH?^iTY1uMtϸ@vF@WeUXYVw+ˑOTΦHˤ\D6wpJ W"էUsGϾS^2gC&/_ ͨ*/wۦBs ?+^{7&8`>75,QB{ uFD@wVPYyU(LJY0@It/[^0XȈinXk{ӶD@ՠ+(|h19ԗFx|ʾb!WO%Ւ"9 x`o.Kp/\4E}b. TpV1B0).NS2opopc&r8O4*+QL-P"*y8%vdsVJsJWGaǪA{y, M%j:Sg|N>QKO4QlZJC1uB67֗\{G;+%R(U&Cݒ7;QNnsPT];=_i/ހlh @/eB]uK$ A,ȟ$1k%>Qv m@q/{#*1ѥT&)5M Xyv't.|@HNE|GB}UxA7\z,|G~MSR׆C~qiD68=_9:}7AXp+c(W{,%۬?y]]' u1G eoPkr{P?Un(1Ofw&zGBi&YmAz{%kL^3IOͨCkfJ2W]SЄc7T.wdtlYw c}t~ #:scԹOr =T/ɚlP|ݒ@xAoD@Te6V+QOF#TF%1>|$`j |#eND1T/8W!f mh\)}@ʘj (^ߖ?-@lѻwˣmRT'fR$a]x3_^s22(ݥEQWB܄ ;R\ᄏx$)JUˑx՟p/BpmD-P,x@AR5SH%ycY?5I7*ߤ6Oa&^v3آxݰ 6{U矂CKn{UJe`0[ PB@@#U kF1k!, h3"D@ H$ܝOPyM,mB:Z/NI`,xCp`y|$Xc#SМl7L7RK^~!OIi0ygt}ۣ߁L權}P<")%,kW^mv)s1̳sZQz7Ў&|9:TD1GB ;$h@UʉMtmY *$ZdtkkAko>H72!u~j2S`~#a)Ey.>]zJ+PtC\s  gu*֣c >`IHqV./4Q<7Lғ;9M$,u~iѶcEE6?G?ƛ%?@6lن _wYm O"u5aTF/ͪd MjL ^+8,/=u( ! =u[dOhVDfˣ!ttaX0T%,Kqr<hhuߌ򦶿؟&c@\0,VavzYˊ$M#0: z!+h+C۸8pE0?p2@0O/@N/ؾYum,:/)lYPIϩ\]v@3y)%'$r0 l}s@Px0}x#i8P4T{ pxr1ILbx^_d~x@4hz/aV)?DL^Q8,W͍!uC6T}J Q2:yr)TOGXKPriqy4YD]?>=R |EPfbq]L~l:" 崏 ߝw`oq(iI0OiVltTډH^}S++8הG/kѫ4,uj>^v7@զ{a<={xL+QOk^zD5.m2OAA#@0hCr`g},؍ߏoU߮|E\8/:XC:^^&þ{?S&Ho$J#kZtZ"p5q'ӀY?}F6} lުV)\r!m<\/JC[P %k$<<(*9䔜9}OeX%ˮh#ڛL?Ri9_l0K~$\‚erOSQY]]4h;e%(*{Ҥ\G2S Pg&8#] 7o zŦjvA;IOzy))` 00u^Y+WvЏ= okcq1цPl$TOE =JרY չ~^#xO zv/Ё#"讚sqԘ&& d.tGpQn+^6uz[r HI w>E9Ҙ]f;TP`̣(i8D)@/~gna]==}&Clg*jp>%{s'XqE+jjr1s|3YTA]\_o' b# \Cy23xқ9gpnա4ruJc˰ -Bf*߇|u:RFSwLRC$Z3=N׵0ìT|/C' )9Ν:;B]w4!E߫f*ڣ3M0qXJx)HbX^Ij98.ux~!5 k;;b S4{:`Fp -Alݜ ɂpQ(L1ǹ t6Db=`êC*hX1<|7ɵVJ@O.tzzYp+?f;PZUH>Cm6%}F`#sʶ+9km~aG,Ax'}=B™Wަ,TZ*),s_gwd=|#dQ%/>L:h\pwS4jpKVdObppx .q|ffϠ GjaݟY5sY$_4$iZn8E}N1F&Yki ^ p5@I MEC( ͵rSYFft }6=YEH9#5pϏ8%$2;t'g7EkknPGw*.|W;:]Ob۝r3#t̀5En士C+KfO?^u;酽^KLdWj0,2L-bL̲ggTjr"5S=}#_qZ,D+%ha+ɾHd=Բ+'aZ^n[+o{8vd~,DeWY٫TmN(3b=08# K ܧuƫWNQ*9{GQEwLP(@]侤; /'YRz:̲zh{8*rJMY:tia[?ĄJwbV_>}.&:;3e$,@yezM-p$w( I)/ t c"1YsK0(s, 1]o{l&Votn93|qyR >@| \NtذJqXcўrNfیN2F#0^TݠȸQ*e)T!'.Y@ : i.j,bHsO)سo˭K&=21#o,TPR2gb+>obo%&mMd\9l6rRcr{ϰ|h%}S?bעZ!&e)5VU`;]WStN B'BBnf5y50^zn6H }ZFxlZB{/9K1U~wR л"&/{ϽGh^J=KevX VUOO`bv۹3aJ2MQsK`/eGx RR5r =s d.1w+lʡ;h=*]9#kL\soMGXYqHr3PL:oIY߸w4ZB'Nx|r5lk4=N!Q/걚E OWMhŠO|ැ[ ]_on:J͢NZR{ɏP \ic/E'm_'$Qb{C?=U)!w3@:uVƳsU^sɡHa+soׁ۾xW]C P 'p6/hMq!#C-[mUHFޓMab+jࣘS*#{ډtYp^w~I01wũߞq5Zax^l%DtwdwulTjMg*iy}K׎PZi+WBChƯ 2E 䇨:ʇ8iQ:+[%][DŽ/S%eZ7ofb5U?8]o@{GȖ@oX̕؛V^ch:, 3 zf+J]d!%hxZxuixעT)5;|9V;jh9ضD1In[-2YdZ3C{n"/5 z2oˉl> h)td(>z>4AD&ë&=qf>ӝiJrKa~Vm!yJb*@ 6g^C uF|';T<SU$9ykR;sUQ=_=QC d7s/U'zWz^`]J*7 K}M~.NtR-)F|cn̲C\]^44?KA,T{H`A6/;@ D&&_ ܑ;_gG m5%wJꢄu*ERI5A= '&h"eKWԬ&“KqàԸerY 6#G~+Wld6Bn))eV^Y$%w(&aTN}:sנiW&j@ٯ6Q,m9v{y2^[ԀނI1JkIe;LJ #XKqKũPVȴes)}:(Z ͯăj^x:Ls`2 XUm 뷬ڽ_ma.aADž|MAZGZ8hyaeOgbCJΆP{«[@ԩ"YQjK\rɦӔ4vIH truYޫrtsY q/A݃yƿTko❗r 3-=*z<#ҵ_tQ T Sf =fP~hL:1y}F/bE<7h._0Kkbi%h;~5 2soz*y$}XX?$E6?.'$&`eETUiczI zyzX9@_;z1Qd8.2Y~P7HX~nwrTe-7,@8O;27te ԗVxB[[douD0inj㍝SkⰟ!Xy.5wU8c0cCɆ9Uް uÏӊnSyRHUCN.ZȎ`5( DDrg3*PLDꌸ*yyW_K$YME@Dwpz6O+]v$__}WpUo'7je>5~]U^ 䟘>z ?DzD<ǕҞOw~#tzT?{lG>I8|U='_:Pױ{(z|EO\BPZJ*aTaׄЏO?ra?9' YAc ,cKL1aG< ta10Ŋ,S}|O״EV_3K,VV+"*ŌK+ eBڿ-ZDe,YbXea2ʫb2D'7]W;.Oqb#݅͑u?RCb[KڪoF:-DNƟ^>TOx 'Oiq+N _Dܜ/>N4. O{FD# G5S# }?ߺ׮iXfGaZo5)ߟ3=DҬ>os8NWEu~#u}'xqL4%}7a+[S,ky-R}"| +>6;]HS1Z1Z4JcJ)c(S _nV auL CBpi} y5W74F锛+ RC׵[RZ+ |?_k;2z/^/[U,LTED'&"a?TdX'{0x_f׭tGSf,+N/RУL)WRZU)]Z+M<8 ^jٍcvvV쒗x=q.g37+zZVV$I6 ߚW&Ltx4:p9:Nh:☘Q.¸5сB5]K"aD@A1ԋ Mu[ۭB_;@·<<C7le4V+s&n[1],˵v8vu ¸7+z޷ncؼtӦ:c226jڲq\]'cv&ٕ&ժٕhMƘ+UCEj%=E<>ݼ\1t)dpVφgRmJ81sVI[BkxpQOcqzƀHD}ʎn?\.ΜN .v;M^M\=ms\iƜzj\g ڹ+oY\m\k[W1\s\_bNQsrj.r\[>qOsu #<K_O\qnʻnҎ/ٯBWtW]WN"F.:SOFה\qk]Gu< Mx؛ucҝ-'Q4yGe읔M6.άq{yU/QSʼO%u*a6iyCuw]h.aҞ]m?]Rr䫲w"+1q<ùZcTx.f ws5+jV-;d,v7; @.uS]swpZ'1t)r9˴TlNGThvMהZ*<d\j)Vxrr'$Һ4{]}þ즓IНxNV;g;D 5].ݧ3s ^Zp4nO*It ѕCch'N:hSC?/`1D}hK)KZq(Ě78u{?,/njq׵ulmYfDsE+=x,S( C&Ff.cXW2\֔Qc' VU4 j忸!}S=/uk b{ gyZnˀ ;X>qהrBf԰AD63LP ׸.tytvJEӱV*|@@̽pћ}<4-u_g釲VNS 3aڽGJACCb '3LɎxevT+m@ QХ_||?gwN|8?ocD9kBۇo7]TS1!>wgxn[}j"q#nx3Fy0`"/K{+"Htc {\ɸ_K m %| Cvht ~ߍdָߣ“'/SehL__wT4k}KV&:{ŶU:UTo{ńz4|lqJ} nrYe]i"w iX,1@!ZaI l!10omu`,懐Ksa=v }z.|ҌC !+!)TIE/0V_Qb%nN+ι:QH =svG{EZ cA#& !^Uc}¹j\-CɬOeo^#Jڭu/i~u.kW+ך5m]\kjmej:qN\}M>ƹ띨#Aep[6vipq۬޻,۩qW ؘeyI㮂 -/,Xuip.YpnW_;em]NnWFGxq 7],5j]-޷{n. SbGEY^3+Uxс+k+[:ެc㶷2-29];NӴ8WEПms\5s\+< kzspU*UvpYv括..7t8up+EpU<%~d22Nˁ\xro,.mpWl=/:GcMEyyl+++Ƌs_Y~^1 ^«A1yZKHDu7uwzyݏ϶O^\z;-Ny |㶫{z; u9o},GU^RkNmKr(t[OxXSb"W5^dqa#4M_;ͲޘcUBMͬ34nZ{mnC_?OZ;'ivo_|8<-Od*,jUEj&:0puwr`1ď,цެ^x;ԺRRVFD{˴ ۅ^ Q7ΑQ80ciF,znF[]+!>^⌦<12W$ʉ7~M"! !1 tB''qW1}O֊ނzLx2` Y[3g: /.!Sչ<%$/Q~ցI [j lu5FcE/媀XR^rev" Ǐ-\rⅅuw*R27R%5Mõ5ϑn6t.*i9o>* ;*L.Bq 3Q0b![wTFiC}6uΧ>`ditK> `p6 `'Iȃ[2=4pr:1 ұ1ji=C䪖߫91$nJmگQZaP~tyJ֘dw_m5訾@.g#_@6jqȇ; Z78-[(^^gF9~w]ps}!q~b4&MRAA5*lTJuňE_(.O.ΑOzN;A4nX/"~7S x2TIU[UMD2@E?os+Kqa#Sgѫr$i)veOkT]AEDxF#A [rqƀ IޔmDv9I_QQf,>CNLgʤQFAbʝ_+moW/Ō=E]4sf,耟unIӯ3۸9ONCV8 .#NEȕ=h%۸ ar2#0(\Ed}r+xU\X٪hܽ`]X4)r~:lN;į<.X CQp>|ZGW&2[F{V`X^3y{P3ྸ+uDц5~z܎TMEUJTCzW!gt1\5+H)XOBF~0Ɔ|JDlpRF\;83Wf0aMs]Y `;oG:.IלܺˋdacoEU2*"v ,j "J%L ++W૥惀T N}iIhjrP~ڥiPX*rabjHLGN6\̔u!I"e`ܯg{QtOK+f;D scl>Ieuu:JKM=q ҕʪr ^K |at7896i*|X=\!|òTEu%B XDoڹfN'nG'ߝryv=b=y6~KC"/Ԫ[&ʼn.-현"@p_νKIe]m':'3x (*]s[1DmܵW֧͈¦4 _nny&~==m_l#ńvf/-d@( 2zf^{탰}HIViz"3D'1ּ_ӞPt?y$U"@eH}lBVqlVv )Swn/%1ʌM~ݫmf!$[d#UU["Vxdl.!>>ʱd}$:qo`F4$ l -T1[%7 f+]^H+~sƄ ,%Q D"NZ2]0k ײ-5Ԡ ^ xٴuih+noJOrݷ2`~>wz띛ZpLH`ŝ0{ Z%tַwR҆0F], Un*W^@-G!2iXtJT/8J;[Ua59~l> K~#1..[y ߳N+7鋠g2KW%7>s-UZ=ƥֽ@9! 먬.1,^ʲKQ0z?]xr5Y< z5 1qhKB|lqd!TR|kkq\B[} <7te ťvFU*Ѓ<戮mFFff0Vdr组:89.A6?]SnU~}bVCT_RU0-;4jEӛX=zǪȶ*U.$ $|ۨP{Un>Xwk.x񤇚ɑ*p6c/jzXqE+૜t a9 V}df6LӐ2,L~D@LZ9[8>7DT۲vtS@ *BI^V4׊]@xofj|z==1>N_ŋ_yK} !@O6g@>"/Q҅le2Չ4mYXKq62YX2j41c1hՖZ+UҵZIՍVHe`LR#h8XŖ,e,YX Zʎ1VZVM&&>?>~}ۖmnVD.@WVbG*CP;l6%@Ӑ:*|mط¸W \+p¿|gJt֯ecepzp²WZ²' WX[[HK/+C27S:hN5֡ҺW_ 8 XÅx=uڔ}rn^ScuVUi>h0UYQ*KQk5M% J%[[[VVVյm]px)cw([xygFfg::\Џ ikt<ǕZM鸭GK_Ҫ^+}c˹#8qxc~ˁ\J&'#UAг; +5^[֣Oa{ ^r\W+sEm\Ֆ:qM{緮½yՖWaQ]wei^n70ms-4xG:wjcjm[Fյm[S.kktڶ+emZxk_=KtgC{E񣕫ݚt1~7}8N=luwxd=V']:w#\klٳq\nK.p\ p. p\\. p\+pWn#8Z7F#1·<^=۳ջ&??__ДLeeeeSv'G*wPWOMخ]jx*+G \\۸rO:>22ba1L>^4K7߃^],1߯)i{nz/ Hh4IA@PG^{V1c(Zz 4\O<$Xud AM EN]&6hZp(?n"@]M✀D6(^q , Qu(]hMM[2p:!r̩?R4j;-D;S!Te›'/bJqCjېE^\,[ӗsBGsHEYHE kP\ eOA0} Uh$vO-h&ٷeXOY@de.P|mº~RiQ+#>OSNt0#dsƣ##F#h8=drdl'# ( pr:RF8拐/*huO?3Zf'{[עզc|;x?à| aS}Yg=W=i鴝kAs|12[k4S9t#η9fcYn'>"س{7ǫ)aЖمXN>kC˺Te^7"ѴMs%VGu_O3<]_: Yf ~2E}I_kuEvP'KWᬚBVmw uZdHk,C1]_$&MGʰ8d8)3퀯C~Neʋ'Ók`QV7tӏ4ozu'+$?;NczOKcZڬnck;]+vD4'E{2'WÍ&w(pd`N;bƢ*K1Qa(pB+A޼\ L)[S#&<57'1hO09teHh  :;M!6- NAlU~~ D[% 8gfW\Z^#LRwCkrKKxF!` !F%,c |ޫj1. vi8@#\4DVPpD ]4v$ 0Oˆ'ja ִ]A,F!}7$_L^#B!O|GDZ뙳0O!a;XHQ`_2-@B$X)Xm5Đ>,e|S l@cB<H~Zohf@u!Lƨkyw'r.\VzqXguATYvH/UEƕ߹ÿ[I&{0P|Fr2+A,{ٝ7_C|-rn*6 WC#z=ПeS >>{=wjʍ\"Zxe™;c1WK'zM-Lrz=d&Cq ; ZS#2[p#M6@{ǕT?Ym ;3P^\ 6vzA =/?%.m論/]_0vGܬ2bB%*EӕBr*iY ̐"@a6\dEK M󩟚m9toC!P7~FvG!^cIAl65<&Kĝ]A&96Jf{ ܽ7sGNˆJ ε=!pv8moeUz~ BX9 'b'l" X6DKsDvB.8"@#|{WzF]Ks` oe/ϱU9@yŨ 뙀Ue7&[Gtm>X*{:M}e>H/ ZU hA%}{ 6#}&?úB&op:V+~$ *52*% 6 \lki ;2ZDujhvo<a+%o/idfKJx>cbc9IK6%jC>l/Eu;~I>'6J}e!^yWr,BeBI!i'}ɒhj6*U|@췔zp*6ٻg{>~G/Ӷq~i&OMӥŹ6Uk󗒺8R. rN E^Ge偷e:_w"m-w\QzOR@` @ ?_U?^^}ã %N5ƹ^^z/K1edF2,Rdd\ƘV#hc,e2YYXrn/ןМo^buh$r]m94˙s.&W%rWHwC:d繕]Kn[z޽shJnҶ#=:Ϫiohj`4t m@ [kiih @-"660֧(``F+\U'F7oY^u }WM:nӵv5˱Z㾷ٮ\ۺW_]eVGBwF~rbbltהyUmbV+SҜ9.e̹BWt^m[WV޷ӷV,edeYeYeYYXe11Y+r!:<66ej 6hFU;vK,C)f%iµXVGz.zt'RM׉bXŖ,e,YbXaŖ,/ҋ< %d,ݥzڵt]w¹OA"GqANu'.ꎣlVEڶm\Vv]<5b\Wyw n7º\/j,N]]OhLMK',<UzNQ鮌<muSkl8QsC\qʡc:[qn讥vNw66m k+滞I`<R<jYz*<=ӵC_ܿILI*[<* K% R @{)#\2{re-=a"?~bzrf|u`Zj`&#¥Djzi)ڲzەbo\D@p}%-HQJXH~8>n`Tj,? 8TX f0%@@h؊3S^SȎ2xm1. TBG@X¾ujYgd\00v4` "..ҶQ蘁ܕ0.쨏fXdu_=5m"Լ)(pl F EEttzhIV%y)O<{U2O]gW(ܖ;swWm(f~ILyOS(cgE-;<( ‰v1"f}-K<yvrXAǃt|_ ^SrL2˞juR LlP﫤@RiL̈,*E7i' rGt}_fg杛}j  ـ- YyqK7p -U"'TI#HnOmowIs"a@!xq!AHQos,Hn^#(~k5*jgEOvؙ ďbSJ2i5xʝg9oNK%a^1 TR򱍚Y~P)yi !`ЗK&cn RzF!iH`SB:|H ,QsLo Be$7)+B1?~`$]x6'b81;J;/jfOzƬ luDEb^1?S~{]ozOx7p<_%99ޒd>XC]Ss}{L(1d)S!g497ظ|>_?Ö'e~^:|ݽ4?L3}'6ӸҮ6KTy}do"x!y~\1Cg*+PktSif͔F/eU_+tu9}rL+7>k8Ň'^xmU 27 h/]gE|NR ПٳdC|T[LVaz[(gy*D@b>"ccr_iخxFcY;V/nVjgLKl'XoR<~j\0w~?>:j큍aYٛlNj^o}M3,&̳3+z7:,dqəZ ݎoY+LeP@- |Eʳجb4oӯT cUaL /sx;S8ضa[u8pf;P@OK-3!#L _HuJډVy|{ށч}/>m4e>b.B߀:~^LaB;omUYD;)/oQr'*Iɢ bm!+ھlTKv* +5:@6ͱo +]_4P\ ۵D]هVE[,\ &ěľ>x+QG|$FnyH X3}UBoNz/*΋5POۺ"A~jaI 8FyE jXBZFр%Ցy4^=>M|ZW;+ 9pѽooQ\\oSN_iplc/eJ}KVKQk>PAY_{á!v&d[N9@7d 7[N L}l(b@M57&M&?l1o,-0Z'HarLlN%|~?:O_>LVNYT*<IJD%Dy*F M{فD6I~bnq [. 4;kn|T^?s*~jIp-:1qFεq(Am={J8铜u~3WHcV|>'Cag{R9?rIsOomA_Z/U@"{jDEƽ(O̵0]j$?չ+)?9 W}Gſe`^6n޸R>S6 !Hkn_xiIk ohu .ݻˮ j}d H""2#²jrt緻u@Bv@@UY|HZ4@p͝/x6Y] iM5ASV(hOJ~kDד ˘e>Z4\ `\dnbS AᖲEj\dO o}{̾-۱_w(%>ޟBt7┞ ? ;~ʏpF@ȁE~Y"}c^P:^yP0NZ|S(V` (@cS,Hp;HM.~O9qcƉ#|6U䋊9|,IG؆拨iMʷ]&=~2T_n`z]%g08ynپQ[^a'KMRѾ*Dm/r/pT uHPRAONI- m&s:~hh@>avڽ~F*J3f< UC<> TeOtr<_M&D9rv2̀{bT!Yfxc]6Cc3h2\{jj+?~?/.bIXV++UUjLVIYZ++%Y "eoZoj=XM)e2YZTUGˋ,mXbeb!jM11CUM&D `7 [P 2ͫTJҦ+jZU+eh64l{o4ՕS=ؓXtV~ǟx8RJX+Iv*`۫Pڱ6j+VI 7/ϰu:Sw̫^_Bx)I\mғ.lu19g/:<ыGyNJM|$ptgs 밄vsytOTzDǮddN.ݮ]j/_BmtG9÷]Rebv7Ù,sa̛:j+'Ln޸µY][W{p.n[:Z|󴺆VR`.}/_Q?YF 0zb*=־fۣvFbH&k<H^*[9ט3C)csS"h!ֹZGH(Z:aW@M1*0?{t}p8· Cc1icCKJycɒbɕVYYeb2bXXVYYX+ c1c1c1c1S1Lc1c1eddec+,X`W S``VYX+debV&V xؑ$hw8būbqCsL 秷HwrG>CLvD5x{N?xpW)>}>H>ynTG;M%YWmX72bKyY z:gQV,4WNK3n_*rvҸ/uUn$8qT- s|ʮ 03ڛqY~V5͍nM?h6vnE(LzRؐӞs ]bD].> OVn.f_Ze5jԐO __ `6\75Mׅ4=="_׬(&m%ApژNt\>.4%iFB.Տl)-㵽ޘa gWW;y jt K`9zj>Q`-K?(W.Ymyix@%[jWzb\VʲsVim9CfI I ҵ'~sz_/ҺVVLWDiv++ݱ4jmZjW ڷn|ϘB'5 J~yz\U][[+[VVъ 3""LZi흾NSZ 1yccY"<B8RO=OMetQГjWLtϡ˭ΧstG|u?c7k~wuΞt__59ғ鮇;}nA8+}G|G7vXVceqa\rm^V]޲^"[-w!!xpcŚ1/߿к2z9fYݞ 3/]$+Ձy)AH~(`6W}{u(4mL`1d_$锊a'BsT-;۫얲6+i¹Rۭ (ѯ{0 `&:u1L3a8X h2VX%|!U> )3sBl ]%\/i1Q/=Jy{fysB,N$MDL)ѷɾ[i!ukH%vـHU8$Ďf{٢9eW%lZ޲dH;hFQ:[kڡ۠ dGMCCMSys)e^Pf Dv2K9T1ֈjN=SȷОX@^z׉ sF6\!x''mu:@!iʓ"6L=@#K onoo۲/'}Ruzyn/7:}M9y]WgYUh]cED"/^çNݑ: 6,FèwpLײW. m磡txڷs'.y͜[W~Ǔy:+ZTu"'جs[fյvkB|hD'Eo컴[eO9|BtTQިOcSI.Z5FY\όls:^jf> t2D,J8/9)y2G3Oܮ usӗC)$00ٝH-/<>^;,9$uj8 Dah@ k9PVcI؃+YPU$5qau~ܧ `O'Ty$#Ƈݦ$s{*vj6ruwv [QmGm,a`&ch;PZ)8-kJ"x>.+\R">"ށV5E V/V)_7ͦV{V'>!N}gc )eǚ:pp(~ s1w:rZj;sùm?WMu}QK󤯹׌vLPpo-㿣~IaQYSe֕@ (Uxx+^"zO=U\)lD#0)yPRH&RT:Bˆ0 8:R7rW(>!}oˣg2]=?I`&yg?G@~c; \) ihf pwɌV`FN K$tÝS+ᚡ~tEUF 0 a1 <[:EvVwT1 ~LDzጬ,D0FtɈJa2[G/; sY})E&v^1°^$L]HQh0-\wnRTp<hƙ_rTN5L$[]TfjۉkْT;mEoh; gT@HumAW" <`v}`J/pZ"ΧO?D  `|v ^PLcz]bf3拍2 x\U{y*i>e\t.<ݏ3؀@ vC9 ^NU8YRyYFx 4|{^w܏[n/ɊL Ӻ&ks\|^ ~ĔDJIU !(ȋfڋZ*銱VD+鳰@J5OT舉΅%nݑv'BG"~'w :?5'1TSH'=}&)ID<\#n kz?n.g:0^S{N.Xu+J]V+]jWS}^%܏ʋfyneX*8CEv3Yy:BhSŐd)=V8ipZVu#A9 rXZmhƻE^:+yLy&(}fzRX 95 Lr{z(]{U .Jڽ7Cl4DbbD$ gq:f>۟s`ԕcxTw$bVaߤ|g ~$L MNvWJE`3u/6626If* ?3Qxyl%cP屉%&#\ѭW(z1gk0/la*6.5=[c\5>Z=5~;.Th8\4=.`IaeڬG*򲭭$/Ut)t5^<揽Ys,eM l QmNp뗹#'-?XhL8P  NI+!Ag֢ \ E\$ouc '&{͈b wJWw yVWL׎N*S cZ>O,qF*b BN?uK+^?2'"XxAO >vN?svR[:c;~? 3NȱP, M#RP:"RflrmM @eF؁(& &[k{{Ze¹um]<2Q"`pYeXY^h(Oװ~k})P upr&RD*=yo2fM襷[5ҙ|`?O=;~:. CL{t7 YC`m4+k:Cw@@Hˆ^S3B/z0@Rn)<"cwR$Is}{!01Hտ##S$^ bR9lMq~͡ࢃ!RL}2e>#иJm9ExQ㱆dN  n]/^T,u6Vi1PnLlzyN{j ;Io\Ň&[}cVd*Kf2QoSqeD6sĭiKOw X,`8(%ַܷoH; e/ ӝ[ m#oSEEהtˈظX<:PF9{[¿=KAspm8~~n Yuwި#]<N_7ByZmR9Y=ؒ(Hq:o*RY4erea ]M@%OUE4u>0MΧR)#8c?;ikc@c.~fuyWRn.w=_@-cmcPao馀ϫl^'M0"x -.]ki5><'1?Zjh[ḝFʎuircMXMj69fVn".V!P࿩VnH "<]#ELN0X_ PsyS@VA!)z~CoDV8zI|n,>q1MTX'oK4 C%1k? ټAm8z\wmUL?JZ~8g][(Ў.~xo@cޠrVvSݢ7iSa2"bǂSWsoY}ʄCI(!'/_+3z\x#d(ěMַ8G$rmJ3HO!Ǚh138{t߶X~(l~Q2D=Pbe@@,=?6Wzz7D܃Jkp):ۇo\N-qWW;η>\(ë˗wstt%z@-d@^ޕyP[nk$TM~fZ[D{C)ojd0ѬѽF+éAU+@+qT1s$t>$Eڋ4)Eo#"eGIaQf1a ;OP^75ሹrP@<ile 4M宦V Jq/5s };yZj#u**C5C!mר 5F|% N-LEiοZJ^xDLi\뉷C\Fwi d%pEU/A@-C]j)҃p\,~}%zkV/|NA([\g'ZXbxm/AXoCYޑ &=z@_ƳS 70uRH}&98V9)0 O(N,PxʀTJk gZ.  o#3n*squ~gt::;}N\JOm7W8GRv+rXf<> XūFF+ҟ}7IrSɃ[MЪfY[BȃHWU*nc>1y(xPy``9,+p,^@BH[tN{YB}=eAJHkZ~Ϲ}rԐmZbYa|89xgf;z9,jl5^ec]/ >H2xs]d;'хݎ Z-L ׵Fބ|pK"xqoj,˕@[]CEtFӶA;HIͷ37ZyU3wNiY_J'ulӀ9>Klo[ +;ev@nN)%WEY !axhȪuqY>}iP[ sRJad(u "!`U˅ƲBWߋ~OFzWϐG#8@#K yݦ9G:gye3#vǧxnkbFoCn6.ug!ʂDO)eVVVVFV1Ql'r\cRCʍJo*G15+wU*qBo{)ZR OoSx࣢(ܟU.eRr⡲oDd;4apV=NlBmﲂu p{tlWc!~JNc"Poׯq.=IH/ob,@fD@4v[ m_>]?OeF8 m}+j_Ѩ)w 0kt ͒r`MZjm,*]j?Ere}4$svFrD2Ԃ` @u))Y-ܨ/ҝrkH "R0eeMIإ>>=5=rqNCɌ1ɢiQ6O480&xoM' 1 .eЌH0YXi2}lH覊땅b&{;\\\\9\k#+Y:j7Cg Hj]Uԗ+Nw9w{.KE唸110LM*YyѲbxBMz\뻎NMzZv(镑%\Օeu.3Wx50׻uƷKpl++++g*YXjj].MesKwEny6RԜ+E;nMU q6湫jFԲǚzժ箧[YZ\k[]pWQ +v6A+JbY\Օ`[\R]6ή)38V묺s\(stV[9W'W peqfj'&W3V9ڹ[. t.+] VG h\Չdc(V00sVj)M%2*W,Aps:[jbt(jEԋus]j麖VVe9Q4i64gsL>xO9OOLOVuqM߳ MqJs*rwrYyp~`A-0OCJ[zmnz]5-Vz F+vN 6cM6i7p{ g:drrps:GyNUʽcy*v\dOꊹVZ@!C@cf,O)n0xHz錣!{Ȼw\n$6Le eRva"/\G87N""vZ|Y#z7yi'a*k@ w~.˰+ ) vUpmm]e@~NrgE;I 2]tL s/<'^.󓟐Wv(1|1݁eUwZqdp:n'zH7/4[NR {Ag^Z!j,.:;(,H4p~9]E-|x%Ke|B^z1k 1"^?vή~(la]060ӳ6e!Y6~xd\lD0`T4Vu@>F/;jnJhAS?KɈESYg!S~ܛ9?=km=?$ԏ0jL|ft訞0eO}7I@顊Az]q͕zSv` j jriPd)Nެ u$*rJYԽ&"I'/'rHys=?|ŋpWG{x%W Q_}7a/*L~8Wb%OIKϾ Q|_?X}y6L@"4Fۣ݌MAACz'T9Q߱0tHg OUy2$:M.f/ l] Uk->Q-ڹq8*IF>!LC+fMG2'QbQtY! !J5X\H~IAJ ΝONRGc: '܃gMs4lO.N|k;Sȭ9:o!S]W!FpJ?52Yg'fI_vgeBxrV0eأ/Glqxdj0JJ\l.[ &#c@Ža^BihVƃU䚂1:׎[JSZtSbIR*dStf\g<p{;$f\T X̃vW{J\uޓog׌L \ iR3Ec%ڡc) ²_ZbFbU-4aF&*T? DCya -C%?mn!g"،M+  `nW달CY]yJ>_{]vk9 GfI…zz[ORM<($O1vCQ5`+d8$DB;aD qNk=.SfFMRm?&׏kxg~XcT1rF"(DjaL?s@v9jO+Ig{^]-.wtֹ֚HpOΛ_8D;to+Ŝ 19٠]#Ai '@;]ݓOK"cC :ϵ(mƽ<㍈y90hW푤\hE~$%d?s *Oc34sQ4 nG'D ;r(gX9Є]p_Y.>UjE#{}imJJ~!g?T䀸Jq퐷`]^drkna$|"\ Rg?ÌSa9ZZ:mTNuʭ-7nh{wg&/_: sGq8fOGjz2YXu 7ےpVyB}'&KBy>+)n}W]wITE}s[85` 6B5'8^KGޱUN1E4<{l'k\[bϙ< {5Nsan6T $1kP~Zl"GdO>- J'/7(DsẸseY1yMڛ7W`؋KMݳKWtUޠK9PR!]iHXvE Wm`^ b {k:g\CW >4  (bL*L}-dbdA1睪dL1:+e~^LHxd@[XVf yɚ{iigCKrn ȱ@]Q?KתS>-xnU8n=\U#5xUnQ4krV $C|BZV<~ՆD \V'V{U77^ӎo3>⫕~ޒQ9d ];NBsu1Q{Rj=MTiYw|wk=R!fBsP jW_ZFICܮue}]2k4X-y=7G@<.Q/y 9eOHKkĻ9+oVrzC h;?32HyNvVJ/alg7_+p;8K^鵩xeϤ0,OC#M|1*ɣj6 hTxqho]"sxDDNpz:N{ALO[3 2$zS eޫh?-yLA @K\Wl> 4MRc_t65 :n ҁ<ʏQWJ=-2}OhO5(8|E'J(+4It F I>AJ>'2Z%KR}>1]r$TO {u 9 ͇ -% Qn>coxVlrj}X~yFOqia4+4X_2aL.̿l_l v}'4h佧]ԟ+Foפ6^D80_^ $rP#h,-VOOXT^bۆ oi~t|wܙ|骞aۙb^gˆQ;!\Z*Rw Gi#q]mŒm9[~n-Xi>^ vitQw&ɜRiC}q In;Ki,[ᰘ\Nmm GŕĦ/'6eئ?/ U܍:k6~ܾaixt^-\Djzm*e/wnB \Bz|*$1WM+zf ĥ~xou,|72^{oU٢ r:;Hr:/wt qW֡U$ ξP I$7V\W,$:7PPŘ&FdN6WHO]?fI^O>/N7 N'D}>cor"Ժ~urEP$<;xl?Ω' cKb`N[NE}X5Vt(Oa xy$mr]:VWyoI0u| V#=c0Gttw|70c2qs≾~vBID>TAJ65zMMV)K|}jHr)q$X䶯ZI!%"*և[f]?jt ۟ HHEt|y a%/ל9OG_d EޝatXUoxbZG#Н#ףLϽwkڣS-p#B]`yIC|gзe(8.݁L4X0,Ҿ $\?NʶbO.}̍\܃bV5RW>/=q%xm1é/b2[( JWcݑRF1j{Lw!HkB_D +u!gT,+9&Phh(5^LA ]{@;^%u/ў烺C)ё099N )<iYLN `^ ЎfTًXâ* "^K=;-gAڪj [.-7轹Ke|D÷ Mw*:jtdYΫgƦyW~. H|bq׫iFKlSO6֪Yjhtz&\[Ŷ57@x񚄟23[wu22dž4Q>̝!BJS }cdץɟdяMxN{1gT*~ML7=Cpp;dG&(ş^ uaIwɭ-[mz;Q *<O8 7/oIi-.NxsM'YiJ2n^euB5Ewgk Ɖ7-ȋ]4^I` `',e՞C>ULfM>^H3#:"\$Wf2`_xg1dےq>-7j" ֗ehTV_FT9l'"}0@m\pe\]W47MbFzwK=o+ܓ:ѸO(fV?4L#6Y"+!A9skr"8nz20z~*C.Enja B%iAљŦYIή 3+m}f:Jel{{,{ x=ssV\Po<+4,HLӷ Qn2R6ܩNv.}!G.]k-p'΁lSoG6L5/~$oe!k:W0rv?; z5ΌAba:ݽRi~GΪN Z{3&Ft[y ꟛݲ+~r&|$+l}\sj>ˉی}jPq :: oB&zs[X&K-(SpeLIWMj{XC'u?| 9y"W39!.m!jc8_J4KX1^wzEnlwN+ c@Tr!XcNH_w6]AYK'+Cι ~Z\[K;Θk\6ɯ-+ @ վz uEx~kP̬ vۚh&$nw3E o X{,e?ۏVU˝NMb[ynb&,_K;+8~^7u8 ޤ0}lOQq]êMWU~.KZ(d=~.3Љ 젏TRkz'|cʒ@:8;Wip҈Ifh~ĀK9 ۸1}-X:F=IA099Si}7IGޏ rV&XaW+Ö[j՛BKjs\SVMĘqI\cv] ؁퉒nOf,X_a1>6fVm˲5Y|U? {=l8oL ~oO9S,O=[eN&\I ;ڼF/;idƵoӗqM)oRӀ&K1dTcJ}J؝1foywc;?.t;=_Km?5{nfLɁkdT0NC㋏=+R׶]_`G%/W9Mt9Y?Iv_/"ߑU;{-H}ӗi_:4ɔGݽztganj_Q} eejvLQ{=uXne}j 0RnU#Q@co+BLwi::=VNyì" |i8&MZ ,Ӷ 5CU} ~ZFut=x7~9mf0evQHM^ߚZG(ӎZ4 sKK>~qOj8Y9Jph! m K)LIz+5BDJ/>mB/m_BߟV^,/f]i9kYH8F}t$aG]3# 46Q䩍KLԒ:m; [Rd?tࢋ@9n;m>&.jahNq|u 9 I;u[; 뢒75?@XG\ er۪~aj˛t8JWD9GT?O?f ǦT4 {ÍcMe_?3mj:usotDnZm#9g\i# 8̢;ϵ]Vjw}%e$`Fso&k(/~cȁcE}_aPQ#S3e(*85 e.x|)y\*>RDLrNz4nBkΕ?\g1&:շhcgo%~;iJ=5OSKd~1mUI^~茽Nbno|VjIz4Ju#;n4-O&ie>{xq lxOI,Krĩ>e'Ae100;(>>Ni1ݮdvf(4F#vk)2iϋrQV]2 9Q+/4(MBFƪ,VΌ/q?콩_.{c;yFnf= [*B`\EJ;ESٗI2ZܳaMW_O +0x_v֍Q;ڕe\<ǃCUq^|#Oh1Yݒ 'fS5\؟,drv5GsOwj;ZX9aθ'iPDm,Ӥ`OH{ j&9_=h"vGDL(;+n/v<OGD0^(7/ɟjr.zu%&`.csu0jg+G"c)KH93Vv곻SCk߆c&?-h[6puR9wݚXQ=o%rjt[I6ipuC^ZfoN𒚎.釆;.]'cxaK\tU2F`Yt j-͞}$P| }KtQ_wcIvxI8hէQU,hd y_Id0vY+C"s^y;9Өuv*[̖g-$fW'j<=h;ݍ8|cw|N/#tH2s [m;G)97Yfbᴵ~ޡnW=m>ч 7r=d~:r<ރ&:f&q%j]NfzʯזekՐeax}^7fdI™Ɩ8jx;X5M}J** 6_, VW|k4;^36[3. v'Q.*(r9&ŤX;仠2fuN/]fg}ߚyuu.s$i`;3W fvz]:L7zN] JHOÉ}}lui8˱Ƈ.E 9^LmoϹ}Aڬj{|S<hYc3{ncC}3Ny$~ υ[m/bKޝ엪>Hbf/;]{ + hygM{B|iQ'#gMN=BwZ)@< l Ŗ>e/Y#%N%'y#F@lJQԫy6S^!yJ9jnii\ħ<-~[ 9Z.\J%yV`::VؤI_S'O|NmDoKzǴ?)|K?˻+o¬ۦĆlj S]o{/&ܣ!I$ rN奶ndʐޫp֦gY}4%#rXQϳ9~ZCSsϮ}= ~˲NR|ꧭ4\bY7Az&n?Qv#) ŴX}ֽ8y3nݒ=&'t+۹7'[kþIJJw ;na&{kᵒ '6tj[w4>E#:.og)ӌY׽9?@A)0HwLZ1&7^5ʮy\}98g@ِ_\U[OK!X/V}{GES~Eҙt ,IDY ۄOonj^*ͺkoP2 ŰDž+5{`Q4`Ӥ~2&Kـc*N`w ;{C;nr SQilIx=d'u}w\MV]HZv v Q#<b稂=i\<]&a6})sc]t8=83Ŧ_ϯ=ڶn"1~vO*u(Le F4*:l44_QqU ͓{aZKR,o2}`vy䩤*5)7fw\)IA@ei7W, ,ߩݷ vA$ʣ#ET-Ss&咷$eR)6 d8S1dk7r~ ϊ/z޴5j:-ŗ=:VdVp*~?n\v+㽱e=b3)Pɪi_{3w=5(?UmITjI~' MfUDha@Vo|]`.o?}NKd)?WK@M3  SO"_oU07┃HB8&c]qWi4&ԮZQS B 66tdyhU)K~">6w,j18ڳ-؝Urvm9GуhBG|}4ls>E912@PD~U'sY*BqlJ)#LҴ{}ܐĬ!3ND)I۔VmP GϙgOTIhXK /y<Ōu}Tb[N^2M\]5 MgFu=SxY{֘_#B\^AE2hRp/"Khw@nkkޑ'VNL۞f0G}aW7S(uu0֒ީ\G9Uه7LnBWo>&qF{{ d 'A)jA/܄^O6OE #zq(zC>IYOùR2ȱlUCNV%8[%@/p%G(b>oDŇf]YHk"ˏV#:Obbeo> Y|ޛ8^q"A=e=>\WuiB* j3%cl}4?Wĝ+'WC~^bpG|P.3Iŝ2}}) V@x`fh% & o1-bכsbSAB_Qu5VRے}vM阩OҢ̀xR6c tNJPIhcۖrIPP`^JNoDeZfw(?B2פA^:RmO;~ [W|-51B̍3w1/*ꉄUdWӟ]y,avzaX8G`F ߘi3G}d^p*Sz׼r筍r{&osRY.f2!%䬮צ2> E`Ӄ,**(qôtq]} VO-aXE;v6X+ؖ@]O=7XN'daV3 0nַnicjBX,_LA;9cv97!Z=,-[Ri1VtY, ^JX+8\Q^QA7("rhJ+W޶-N8ʖ$8J:3j2,IWJCdKE}1#8Jm@&rf''oqdF)lkuYuOB$+Z[;"܀4^S$jmJpr iUi~tK N}P76&X7)Ԇ[i!'ssgSyW_{Yuy~.{_z9n"hP48-@dBC-QGPyǎ64p؝2R##-zBI9yr:T_h;>msђ`R u^L[Q $)ur 7x M ! !Zxb ;d5d4ȪW!deE0{TZƛJ9w8ɸKKENi;lE`4QpFkƀ[[!Z"d9bؕ,go * 87 8oZ=p&^#skK1뼴>XZ枚qLVyzqV鑪kRi#x6(HMT'l}ٔzo񗔅OPgUp߭^k*t`Od}ձľ A]$A =lN*d:)ȹo{'XG6{117V""šZ3Hz ^p} '$Rwe3tZuƹ燝n8Kz 5U#Ck\( 1f^z.kWsOY]1xl͊$≗䖩 tFX[sA;( ?#nG>LbHmd$?Yrө|ϐU ,Gp&3zLf\M6quⳟޝdv3u!jvsoA"r3/+1cx~c.M6n8af˰?d T 7"BemS̸&[sKqUP4 n%oXlnص |j7؝r.Ty 1WTĨ8E*ȼ(mx˵>1:* 󾮌8r yP~Du9t=,**Bb{l IHLyx]-WBl~~1tXV9,fAKUXZ08UKQKQv^^{fB,?bnXn,C9>k|ٴhBK#w444 0-ԦGk+#mʦy4D{x4[N߿a0 C &~+Ȗxє_f6Uð-2dN+59ɃRfUfJK}o7D QiȽ[<;sDv(X?[~mM*驰q1P":$ xht N.E8Y tp_UBtUW7 zȳsx95}Լδ/>0-CP87$EQ|5-Go7Lm4Ǥv#|HВ{T_f$߿^-"Uhs?JJթ~/ElMqLJգTߣ  3B[>1:{-lGoаZ&f:%uvZj|NW4]ba ۺe/-okHHtf~(l5:O#>.f8/t4!IjDnC(}R*ΗmPHv"s գ"7>NvVmǍ}aY?;_iVJ?hr!] bx8hK}qwnܖ6xY'c񩝞immʰsfp{Gw;mF 13E/0$bH =͓}׎LJ!s5 g$*} QSuny#,}֓+ Fq6@)XD#8o:jb*I܂;/9`%~4MRlZ%Hī5HYkޠk˳iP66#mVDMLt2C+gO=4Y1V adMtw\U8\G\swkivQo]==ANQfHT!m+]"M+#^p|a3;[Z[hy? E,}-GsހBE>pܰ!拚h-V;LP I+yydž3ij-7E^ 4L^fԘȀ$i= e;Ꞧ%YDs D!5Qk++nR&ތ6F%OzJM~ T QJ0GpvF G[M0l23f`2 `ʹsI ?ꇭr6k0F(i˰ӛ-?,("i\P o|HHХώ]s[z~xdD3#F0Bg@(@eӏ;V@Ymũ$߶ 3^#`<۩Х&PP dMVz#K4Q=$y,w }ٹM盢e[(i/4`*` AKKLˮD;E,*sV8<$Y6C wޥ38k3pYL>Cډ^9T͕+g~$/yo\0o4 _%6љ T$bRgq|ȏr@& e5;ݝ?? Jh,œ}c,NS۶HI'<µH.VTMZ\/pC~!*hh7:W.3ƙPݟ$}DƜț#Cy ]{{1~ollBq&o낣pU+m [c0i&i3,]E5Lh+8/:;Lo ]ڙrΉ&sJ/PeC*ڭVMUV!uY4߁eg3UigB C+r~jJۼP{Bx bvaLn} > +oamrh5u~i@%W[SAa=W냿u:ndB?搵oOQ$?: :\Ql5Tb$I `L!Uw%a4G-\1 @پ8]S/ӡAK @ g_XEQgalj#=6,!ErJb3Yį(RsuP?QFz+\#ߪgqC'ok\s^{;Bb@؜͸QE2Z5d7ʟe)u.pjq^ܮ,'c9E8q^@'F\i8khN"4|)̽KF U%`]'\j]\ky ]oj+TSb _KARȫjU_ֹSHv(@a 8w|l}6bRzIVEI 4\TANm5#4doKsBJ!'@^vY/{7q^ !>@ހEuj!Kb;֧dtA6SҐ ^0X'+j +*Sa칰N0`!R.m ejS)"z=z?6'bE{lݛ uAP<q',VGҫ+wWie0EZG=YٙZxdYHC{2z\@Tg̫ޮű^ vݻXXͯHq!һ }( Wkt^G 4TP5#MvtrwAq%ϘMM~/[~n|,7IX /($M껰$sXZu#iLp+r= IlghVj#$v3֐hl.[ .րV I ]\K -a`OQ_8D8jAp]!@֡w9N$37Yf\\u Q.&A` zU>:_;g+rp]픣⡩+k~eoʴ({Pt`o5W:_01^b@AE # X@?k_V d xD(6eTˋ 2y6!)罿o|x:HؼC6@C|tl>}Tb:@xs!e/WA~/Ĕ1WԕP󤊛 9qu? ӯ|޸m9=_oOeab¥yWBZQr<$c\vgH/sE=a[]iylzB`82+x|Q VPLuܺɊ+Dns,sI #c^4}*: ~ ÆB5yIn C3."^lQVI$oxO ?M2_ϖ31CSi?U6&u|T0[i̴{jpaƿaё\n u|B) djжkC\Y3V r(х5ıH9]SNɐ:31+]Xd|!?Be'.b|LLI^_k{M?ZtV)"[M70.5_'z߮=Sm٠ k HE}fAʖsK3gfpͳ UF>K,|@!  ]i$޹w֞6P{j$̀"M.G㷽]S'"Mǜa,؎Ǭ/, DdCbr`""|-j;fQ-ǁO}(}g*pO !vo˱wwW TZdž}C\۽R>N`nMdYXIFR-r/${bJ(eF=В5Ă ػ3]1Z[f.;םLT[Gyn# [G>|eb-Jz#yf!f:[76İq.dOEE,ډ7%3 CF[ 'te!4 /ʥJT[TsQ=1Sq 0|Cyx_d[#0$&gci0Vbdzˊ/0 Z[e?N?ӅpItxI2U;.X"pDQv\[%cZ$hGʍG8wdm.(1;ȵcmuWN,>=Q1PAJ*l&;JR?p~|HSkqf3Ѻ ⁱ|G 'VZ Q B1zBqQ']ۓt1N -?m'd:l5+ꮺgnSS/H#Vp)TX~A X!bS 昏M֩,+"|zphP=ؘ4$"Ir3nʻ5Q56.~d U6 cNA%ި4\y":8'r]y&Zh/|Um !Im뇡:jg/i$sF,_J :9}{C幨X@%SBcPSpE)IEJU]]?]m|n-ܗo)!qsWMg}}-rG<ju_ SKln0?tRݭy+ũ11tߑϩS1;2%D~Q K'0xqiOʭɎo>k{mEOAu/|VB v 7y@u7 sBhlrGmWEi9U>V lH98]&Y*`Ȣey͊' Pߥ)o=e!BB!_\_mrca2.܊ʃiN(5 Fណ߹i7sQT`]v"f,B@Wh_cBo9 VԎLc'S3[R 0q<ʛxA2.y|D |zv\C5/._ޠ&xvHp Y2Tޛ8Tߡ!>؍q5MD@m3jQ֤ FfݍNʁAX1<:mQvN_p9 zUrگc`Yk_-kz`1BrGl&m5U6J_q A WQXg^Zrkx tKwpq<e)Q?}禯5t3%J s29ҷ~g6!k^AQ 7P-*wrܝO|gMPax`xw πJ흹\l;WT-8+thRV2|,rSbym`) `r&QFvgsQ:[%tЅri1*sBi'.;ӗ^71]/))xGp[A'Vޑ; [3 2/EJϥ3 J+O]$S"i!N]::lkmu䀂9}Dc0#Q?ЂfX1>0wal x${S[WoDLJ2G5OEŽ?ܖֆUPTW,E1>۞5Pn&6],y{0Jc <<v*Y1NvrkV2evFD=uy\z1H lfo4gvM@Ŝtja uAovzI1ɞ5PEtGN(.g%MGX5LлTQ`Mő_2O"&|7A~Zzh]K՘S5,y!^ߓӕT!^[u(˃o6sIa~xvJ#I&(m=m==ABHx%$wS> >{#KQ]O(*o NUNDl疉7 WR|Q̳GPC@UɉmJ5;`~(r9DrϬ; aTjXD}I։(wPSm{^ $4<9o=@p%j_lyaez[e/Ld=n+VKńe,HˍVF6rU&EeN5XYY\+UMVCBq V{xz|)D iQqaz|2}s]Z?UbJNR%^ aUEF)&Gï9ǮEĺ=SjdYM?7}=6Y:(/\p57#.َfz0X p7Yaoc|eoj\_EE߮ؗbMٶsDuB6aQFP영TZh0bz|10>&nt/c?(9}fƙ[lVS)&0Z8B y8.@E"Nb{/5OvoQh{J@Sd]& H 3%8R;/2xĜk]Cyz |&@Ϸ}):#)P;p[g_z-`IKgXW uЋHN/:^ٳ & tw(- 6$xvv˲=+ UP!4 㤂iǴ¯`l€hy\B9O'ؿXy/Etd?1>A\sj{ߒ']x[Cg4O5_B-e{"pV˘zُ.qr͟6p@4v&LǠON-5G]{z VLš;bI3|ۿuzoeCa&:GЂ 1zU9-;S)ah_~=f*1+qD"3Z=eoTʕ3i͓1l0!Ehm%0,Yn\Ь9LppeDl5ed/dhWk{Y9LH?or;6q$eZTODlӷQ/;d7w&vlV&TF?]ڲah:fi@ 78 Iiu*kOW$ \mxcy'? 2fUPcal} F\62 nfiuӯË& u+WOK*s)cA]HH@x07?EB <";h4ΉYH~wf)u=h7]rg?n1YQ,PRݱRE dKȾBg}6w=eks<lG&ݱ3 U0]Db`,]Ȗ +Za|>7꺣:22޷+umY&aMo[ °} v\i`~~|>  ;gY%j;0Ru~: ]<X vAYieCgxfN3F7k^oҒRЩȈWqWtY쾪!dmNrTi!=k|pu܍"Sbp YVWsq7 0]LNi%X4]Ok9qϓ֖pgV#[^_bJ#Mn"tN1 `~PdsTR/<Շai!Q\_;{RѓfT@ 2lfa}H-4ב dLӾr t5wՒ{#"iӈ~M=ucÂVyҤs,m[ЀSyK_Ҁ\NK`6a ..LS 'T62{Vɓ,"86\`b;R7ԣX;kY͵sʅYF^bPjؖT3ߌ|sW{awrv,~p{ >_-&9n Bo`Q]nӧko_ǯi dHJF;n؉uT^ )紅wl*JqrfL"D^3D ,YK=sa)M\ 'ɹc7e 5;ISihglm`VY421uDjic mJ]W[7t^2_l(eU9T%!E=?#cEw"<=5`ҷ3pyׁu) Y ~H+|Ϲ‚Gav9Mlht5MR1A ͸Bjq5|NY[# dXˤ*b}ZgF]ݒ^,Dach@|X_t"Y߼9oH_q?y/w| ?WTLLW22i_A#+ye@@ ƻd'_AءtADRO\%o@֪MB_xGֳeM1"OMt:haw~W5PYBfgqmQܫzsN{c@zp-9}zGy|i%hNI#VnEwqGP?}FG0e9_u&I'szq;Cd#+ta`%&!4%R? qv\=h?~_iZߑ|>!bcZK~sdM)X ádAq 0歠&ym@]8?r{9Vg#/\u2.<ջ͊q$^g bHs?KV(Lβ|8@2AV9on^< 0$@ )a1,&/|_c~[+ yԣ>>G!Ɓ*Bc[=#ww!UZr~4(@vvmvϣE=\`&}uNJCۍCs[X2_(dD0 1 r=>. Zo~LOJ^ Jow ؾ4vbȁ<@ \Pkwpu{<<-Khp >GTZOgLUsb,FC$Nu<2xcv10WK?<һT;UWj~?&:(EQkjO6 8DƩHzen~E -pFpT7*SNC!h'Iik)M/~ ZcXDB16auxmg//8xSIwy` ({|LaV.6*-zlL-B$qOu\^~& x,' eZX[%}F*67r:ҋ,j %N?s?DI˳tW\"IpT^Bz{Vu1蟎nIM^᫥lrpL.3X#6y8Qޣ' D؟BPb_sش[ (f MfqjL}40b Q h7n궐iրx `3HWP~/ ;^neQ+nl#Knz%32Ug`gbŜnw nWL<rnFoEȟ#+D)'ZޒzX@܀/XUY+;Lib !=&Y_, N]<K%W~ 0nXz;|kxm>R. Ii-]ċU>|cZaT L10Skܖe,$'|q_y5ب'f7C@m|? Id$Hq=^]}#Z').1-D[p5uلÒP8:r@̩z ^bY0ӿ'חyo3'&xOτ=a*ɘ:6=%tHkϩ5D`$fʲ >#F@Wjo`y Z>a6* "*t>\'$r;^vFQÀ h}nj9b`T-a -d`2)#6I/ߡxx|n7_YzF)e6m0. jV V8U?ܹ*́>TҽW@ai Cy'&o7uCr!`KXj}[JU`a&VZ̯oc}od]] bȑex#j%3([NjlMpw@ q" h\ h.Js+45 AS .Ժ4t H,k ̠ti!! INW/=7~R7ݏG渨!8.#cv[ɖs:+ut")= rsVLxI~>gT!#=m BrwMcȟw ?1zkeyl# `7 [Na|3G|K% S$[εL ҵ*`:T [s`øO"\Ty3WCpt wn-t[{3ĮIQQTv,bM 4.S^e- ʻCedaT,tQʭZF# _vj][M*o?$L}pڤ80 +P30a 魁&6 o}R sm~}Hev-y]ZŷF!;CV~B?,ICk'ګOaluܱyjr6:֗?vqA OqkҒ/sn@ vχUWYܭwWuFp8q=/E?k x;{ IR%|0>Ah$x0HQ-ij+LfJ=:L~+@NAD?_ϞJҞ0]9p E{OU\r3ds~0#KMhoo1O͹RsBS$wIH 3+%RG#`MN;J=TH9)KsJxҵgN/*z%u ņM۔G{rK u3XlC2@JN_?2u݋3<4Ѭ&0FSuJo>u?'Gq@t-Jw8w}}wzD]?TPTcBmWDl֓מډ4=G< .!c^Z,R騙:45<}bc@k jR$}(E֜Y٬6ޣtUχb~m<\ܴl?/yd\BP@Vu +3og{26i!̻W6k3}A$0]PN}to7bQIyc=Kk߶c125 R 1_,5r݅ؒu"CθOcb ڸg>vVJe<ؑfcLx"2#򥡅CNmN埭%gTnKAaȒ̅'!mm$XRu=A:@F'%8s+[FJI(&˗rAgNMVͬU)5$z6jJ3ڙ&kaV7fIL3<SYuv;G%#QTu9 Xgc_:.zy/Q4})iY30μZ H4@ 2;& F_Xn<+Xvi&a38@Ly~5SLgIRJMS鷫"Q:ǘ:qr(YezH+ q|h8cr|Z,_jߓ|^U9QHj-XJ%h&v]j;p! LYP"~L L8{?&Rgkx M|00E)Qxe{R8Zibv2hcB9:SZ fulW=a}(Agb&l+"p,쀹Q*!5mTvЃ#IEDn3Vw-h<da3&9 0/!g;z>`rzeNZF+瞾p*!(t+bcn}!5/^!PM`*qrK]Vٯ# Mha2b6ގc5fFZ>}v;mEqH'x_ِxz_}7٥^I3#JjwCTnBޯÑVzCaq/jG[TeݳY1; Z(@ c+]OMK9V*:ǯUY[er@ "3dSZ-QVgҾ-C;-ҜhIH~iWv = b-5;U_jxi4RuOR$*u/7=.eM=vvq +;Nݥt{aW``rTѪ'y?C~=wSS&ɺy Obbu?Bi;gM$y,K({6 #gQi]~ّ j씽:+"n a,JXE)˃an%~Jn@ JA8MG+ Px8Cچ%qY W!zMN?3,K19I|,>00QoufoO=bϛNZSj`g]<–rbv'TGnw ;ڔ d*Dy?-=TO(Wv=A 6n(.a,I:[h%<1?B޳,E]7} Eo?~:8p5Mk2ǵ8U&-6ܘ &Yw]l-#%WwX_FXXEj]\k$`-3B?Ed/)$$B{7"q8wVn}F?&v3OCx ^/9~NB/ w%qf N^B;}, 樶A<4Դ>=Y= DF@ )u=EO3Dx` ߋqwڹ9!{}L@j ܪ>sOeEsvvOcha$!FN4p9=$=v˽q kDPe\_'}-$%-m`?r?m;qJv3l.S'ӿ:{䯂xCrV䶅h!W*K>*ʏT$INEYj>'oҔ3tf)(5@K?!a&nC9YY Aܛv!8w KuocOE=kf {SG )%6l$<1 qw2߶&|vlWtW]aqLPG>۟`|g$^Qޚ?l2IApW <3xu8 Fz@a]@J$%eXV1WT6=zn*%|7y?ŭ ?ړ Ds `\ @sԳ+z2whQ o=MLT ?蜜6pE Κ^SMp6;!`XMEp\i~{|.OGدvij[?6I2u)3yCB':CJn wD/"7KM\avULx&*[R`7FP@E7}.:|񈉪ZlΆEt]姤 8bJL#3\3T1x"-My ʲ*5}#rvtY #7=0q}ҬgPgN%)Ce}VկjyqteWB[yC l0y&Z~'ӄՙ{ &HxLJcr](BK>yYϡnyhWJX͠n"Ve0GtH{ yg֩>MEm ?+;3kT.q3$ WK .jٳ, 7OB."'WPY"2mxү>Wrjvi^]|-Q"̡y/J.g];|3`o)v]m@ސ{FSiq5#d AGҪQX? > NQ^+S`g ۨ~z<+Q_vph n=ѵoy _UҴX~q$D8z~|&<`e#&HEQK&1T݇xoeR6J~L'KkF6eޟ:8J#?J>WPo_RE%, \Zג;(&8W29pPљI)piQ};pz_3amzݓԗsռKt\N +'e\chdК:tii<~MnZn>a:>ףO瞓cpf@*:f#uؑX7YNT8SpWh[hƞ"L0Ft,! CwrzcMEWOV=ư޹(@IWry7~<^16k>*]E6@M9ǫ,GJ}EWAfi>^'m@{ҟۧtǽ;+Ws| W2 pMU2X(!(u.'nma6 :<IOcSeps ed/;{_#pOFa@!Lxޜ{ݸ (>|P58i @ 2($@Y_XFhY\+G 6VW \j0Fh??>_vZ_cz[ɨݍ+uo&vݸ0fwpyoU0b`O.u70wa/ L+ܹj֏4ZXS4*ju1-ʍ1ƶHt.Y.ι/;gkL{Ʋ)/-n0S6Q,"'͆x>~vKP9H{fb*I(6Dpl&կU'>6Umˀb C|e0zl.fVJ| +Lmgl6 muT,ƚR34+w @ҬYIX!ql|k:J D?saE N*_ӖƄ,<$yh4G{> 8Ⱦ٥v 6߳+āo(^4K {;r 85[^hlѨXK9|E*~6" cPo|_jp% g0RF4eRo[ۭvv{unVo}~ދj yyc~MP^lɉHJ7{P z1Fxs?;Va00 } mco,yay?*m h- n{=ojzLEMO-Q;[ @ёmD}Z:,=YF#S!4-^@w<'gE+'{o]:̓Ky#8{x%:S!Ǣ`?Wbob{tpAP'C/iߥzKnd oHw>.%,L(IDKa$VYUK as\@˗d+8O/,*9@iGa`dd[Iͱې݅{ڵZeLz3Ͷ{0,] 8 @z?k_c. a104Ӫg_}0c+av;ff3?k~n6p3AX bgk}0Zڷ`4ˈ/-jS /Uw(Hlg!^|9Ԫrա,/@ݾ_)** AS)SFS~ ?%s)0lcaen+bHG@Ut$z,Wr>sw o(3q&%@` O wr>߉~oKzBQC̸(> V|fٯ =2#S+V!i47d$ȓTT5x~MyT;*%gHHT0d<Jc(7PYV[+ލ@a>b%Jx6pjz(~v{וލ]; |mG:H-=7q҄w^Y;IёnsC@KJB_W%mFd0e~rݏxAX.* #vu`?r>|Ŏ48~)^-Q*uڈHUΞr_5ĻM CY|*&/N:+ %O WM^8Xf‡u쿴¸ u Q&*C &sS2;Z,ڟ8 }m= D͉F"e1^M;q_GKi&%["u-G1mbԸcKS=7@䫵NY G=c>p2n_DS.S1-!g{6:PkӲ4 ?o>m;osw;sw;sw;sw;sw;sw;sg3s9996rrrrqs9w;sw;v۷@ V̟b>>XIߖ_] `Rv5goJH HB}miwi # '"tV 逰xX8'\/lٳGcv۱8۸/88 D~ ׻?u?ainGJ@!$ A$%` # 4 P0AMn O.~"E kq!I@`( @FO ynޫŏѠg>~\M߆u?éC"=X]/־z:G8T]{OBXZm8B22Y8Zh X-|<0)̠ `Jy#iUe9M4OS氘  C(/At";^iDAX}mfjr3 *)p$C6Z֪3(5{ w. OBΆKDZ$hbW[D9p$%AUOMh- #W&;`ΞU;b4hw>I+_6cUIbSoWQt.OZI엧JjP|~šiRrlI1{sK2s\}|o8}yg4@x!aomgAwvrZe-v;LL;l8nb1ՀסuEɑʉ>3p tW[C񺊑xnG5xB"ӞxA T:WI0svf a)@dV)Q vĆ&nMW}hۋlY/aN|bg mlptnb\8[hIǞ ըa@yYL0b^{k^O5ծBwڭ|F~Wz. F:OFʵrJ /66wy+3O/5a b) 2?Ǿ9k7@Kݙ&IlT:rirSQ'?u/JD@Vz?=@ njCh)15QF^$iula `/3p!pNC<)x@ZECpy2-&]s-3&UG&U FF*뛏l IBYa_!y⤘0cP;Ԁ1> Ty>]LmLunsG[u^О-,t}<vv@%XD7A4=tΡ+yJc ES_NSF kP7'Mu PU/;@p@P1sQC?6^WN-k8Aݥ ؕKY;Io1"bNL(^O[=+3$K׽2|}OP][c#+#ɤК&V&Hi4a&2&WO|d8ڮ GW+h5[$ 4VLN18(n)m&zn171pN2[ƩIM ęeb`pMRmnj iiYZLQ[+jiVFVզ dbő# +ke,.*QP4NQƶEpMWdMEs\\3RiF0Tŧ3jMV)lٍN 4fg3UV?Z1c,cVc U4Z1c,cVXc XbYbR,0,1c,c)eXX ,ac X,XYe0\:ӜV&Q{2qw6L(2kE܈uj`0mv6kꕤk+(JDZ| HVd[r+x QEeDيz?l@hߦVff1%i͟mZV[+emm[Z[VյedHJ#%( @5?pѐ$ȅ osQ'@bŃ'_e[!}G |~Gu?[~d$Ǩr UW&*[22eaZU` QVG=('9%NUJVTR!_E *R7MөUyXBWJo] bzeGLF(bQYb*IJ`el&+k%j|,{+5*Je)VRqNbe6MQN E?FѺuӂp#xܝUU6d'TҶ]e7Q+Tr8%pN.#UIԨ (ޅ5ct1J16NU+oG^ murf6&Ffgӏ>:֒,V2 ~/aU~\N_%QJGRgQxRj#|ս33Lzg8cgZ87\\#ûKuYX#/0UnNRLMtVd Txrqr3WgV+乺:Oi\ ݴvM' '<Y:'*JePcFmeWاI^hO-X\[+23PrQ\. ]at5\li[+Jڷg;pr\W+rX+⸮:.V[5j湮k\\kE`[V7{<.*c=I8<7WT;IKKosUX.qQqwSu^Yeic#Vѩ7U[֚iYYZVVm[+jjYZzڷ+ry7V޵C+jmXVVսo[VVեi[Ս+z޵[V7o[lڷnnf˪!nEaY&8YnӄdnRo&um[4eeiYZVVյoXUejj޲M޷i[֫z UaYZjVz޶ՑiZVcEoY[VjLo[lڷnnfb- :Wm;ʩjvYJ:ԔOJvMUh=iܺ2,c"O h bbe;e\ҍ$i-yEx{+^Ax';g2f3+JxǼa6ZQ<:nŒH'$Qdz/vPxpx/#dFTz#Ĕ\tLLLM&&&i:xYԘnqzfNMųɹŻ sΙc 0<&%`` pwnz,vȩ}wAPk3墄/ո>XLKwM Oi%)cmC"/)3%΋jjVÝf1gx3k;fU.ANП͏и_~]naܣ:G>&F;k^{w8N@ dWݕ̗dKj7忙?:T M 6~DTtLK~VaHt`r#}^9LY5u~Ǎr9L #e~@%Y#,[IѷHi=hX a.&w]y nGk; -/Z< *i>jc% ^2p>**#{O4kٙ 7=D3 @qۣoMbk3=}Xjߚ޹*:Oe#iVWrXoIriMT~E7yLDY{9VQ\4$g™UۇQ4r6uw=Oėt[Aztf vu'V( %M.Wo{cg۠;z! efo`!uq.={~NY&[R>m䞮p=ښ.oWfi]GY3=P*:F|dxfo0|_0pJFZ%9֘K6gc?1.VC)kVGTΡ7bhGDu>,ֹMuS H,u4<Kc@Cg"C>Zzy?@_G.V,k'Zo8zqn1Hk] E!(b7g~4Rs) Z`:SPuBy~6#ss5}3k3sDz< AH*@0?i\QDT!=џ!Q-Px$+EV{2|40(hQPc+ cOjER2~"펇4saEG2?d_JK/<KjʘYX+e`eL= Oe`dbHO)yI|T.jMlqt=> Dw BU!QB XvoQ5^ev  &[q<-v4>"L캼Up*7oGtU<)z.@ ؐ}t=e Fa] £ Яf;DeQYu[p vjA sj̎(䅭VtҗjN&ҥK$ -_P;vGMLYFl*P;yj&J-Cb) vןH- XŢ&6'ObC?#/`F/M?$IXE_el_E+W&5 E Dd>Y20 U@ g#S9Y s*Vp~G$P^X?QISn{6R]yz@l2TVwnqx*/rUNT'M$DŪ_Q5}tMK==*P)=~?7lN+#EG=i8LS֫++j5ee-p0$`qPE6 E07]:{~)ܲ,++1R2 ,#ţ?MS)ܥCҧ_ZUOFF'`)#'?-u[wYk 텒)RDcD%f"AK=!Q-}Kl>X~Tmߜ];WOy6hʗPR.);EJIkRu^_,xRD%V@yU۴_2V=**zKJ鼕okJ !-ـ kIB߀v X=¼Ty zQ;hU4q_I>0'jAxi9 Yů^T:z+M˞g"ǘ)Q\@U2+v\|W3ڋ8WW}I+ƪznohD}I~L-H3aˌ&wNIq e^~(92 >oό~ԝ[b;. %'?1x,]j'MҬaVͯ/ߑCE'{W~_>:==JAzܦgzA62PUn*N!, R 2uٽ]\ L~ ,%iGDj HyZ;^6, U'#,]94R@) C85>u ;{DO}/G6 [{0R j2Ld+++ Y&DL.VM-ғ%uʠIW_aGRU/ ?C#nG}iإ=k_r?1ޫ)=>^;B^厺yv38/%aiQJDS€a ΋?uC ʹ#ˋF ^\<L% ڠ7>Ԝ1xnN@˲YGR8ISUĔսf\* ۳ݥXVd͎OS*/'1?Z5͚=|2ܩy_mG9T_=PR;>Pk}#7 MV>`|N4,‘ޓklUJ0` "/8Yf32Ÿ|+O{x1p|˜d/+>P5[hهs KґAϋ>e}9}|9V=QIUh4ؘEq8r8Hhڕ0o~8+tzZ^?s>e>a.i{^[(o`X*bLVת<*otxsG{G}kڽ:#~s7{6?v|z׸Z{@̈x@lf2&u2-+D@ H!Ұe~[ZiqRN#[.)XS{A |9Ev?RfjњEuپkT ĊFO僶Í/ t @k$~;@^x@YO-]>;ȴv%hlC HE}W`OG Y=̚V#tyj*FgC_N>276OeO;?)_( L:Լ<򉉂=|IG59ʩ9sn9J&,Pl+2ߩwʟqw;ga^iűjA+dM&C>C˛f kՋ#VF֬,؏z78;:}_gzBz`]zݴD . UhZW/G=z]<&_:nGz~NЋd!"rI\f/A0%ͅs]=!3h,P ;sӱ H+?QfqvK >`v|jҵڮeK,BOô&qKFpw^R<mY3g;ڍVI~C颚oK، dq 8Q纉}.{k{GN%u@: p*y'Rm2$Az2m `őucN_Ǯւ*"{|ٲ>U0؜CEZ@()3 ~af5kM37L%&Mp7q~W1MX9tWa-3fZ>T[}b :F H6;>cљȟdp?bB09.@^K MT_0sDlMάP8$ڥ96Fn\#t9(wڧQasgf\NdL0 dx6@}u4C~E_܁/+ |W'%ryR4 s%%=sar>(A#D ;kaN4)G):ΊLzӖZde `b.A( AdDWо%UQl^Kr>i4TW3IO)nG8YE9Z8q=/ / FT(B].dmI{gU\sN*I82B~~.WeV vs[jb1`n'}R~1p# L9aiO񒓤c/G:hS:+&&0K6E96.hď9]pO;S^݃biaX'i?@|NRs{7A"Q\RO8~d[Ih(]Yz/F]3F,LكFu%#%#0 6Ns(31~c zPYova e*SK{تqe<cO&!yh9,U|Fz\Ƅ\!qt$Sp}䘧IcJȮ,fX˩IwRKxV{}ө,NJQ5Qڔ 2E@|50T p@I,~òOy Ijgߘ\>~{~t=MiT.T|Q[?zC^%t?>6} ̧]ӰFz5[ɺXZYe#,2#,+E#Įow 竽rQvn_sT}[?o=齓>u@WB!")<5q_})tZPLs2@gn<ͬI "Bqb;зZiaNy&nG}矆đƚ/tξ S\cˑ 3ꏄi)珛EDLIwm@SMtٝZRx!S^ar ]~0pI1~ɮ˳5/ߑvT)~ƒ n/9ڲYFy?)& ;A |}2*w*ʕ$T*̘.O굮ɯ7NZAa Ex@d`Ȃ*K?-c4gzn}.ǒ1' N^ ĭ֙pI,thgovx \O[B'}Kk-էWf]*_H 0@e.굴;J܍^ ]Ls:>:T EgEú&I;lਫ਼;g;j=tlñ3j^kÖ́CE-a4dqh'>C`75!n)B͟s 9q`%`' ަ Uxɵ֎c3<¾ F*2>a(ܙL:d3 ob{,1PR1a'hȾիV1++aXe,YbXu߰^JGXlW叇$9Nrwto;!e ,ySۿ8WSa|cF4ccsq?Zi>]m7aM!vi?G#ɺqGMb_"?s]ZDz -AexEkX9I8c~¬>f]a9'7a]R R9ucBdԷӁ{ Rʸ~8ї2{kz@45?˫?Y o5`xPs3-kۊXRY)uufHxq{u3yd)nY~݈OA;mVWR&rݗ/V߃@ :YcT=_@~|K9Q+&1AWnZ:|ln]c*}+@|Mz=4_֩Nz>[Z&a?_bk!v" SP F{oc EQoDͭO>Jp ?oåOMui!0@1V-H`&ZT_4b aJR~KZX_gEmFۊ҇m牿Uܡ> NoI3a_/)SM<:+y)uT08EB(@O=#qyusUa OO~<_O}=۸eE*Q|m ]ڧH" wGojhN:Vњըjy/-0PA(TL5e]h:ø S7cV~Fm Gy3tcer2Q!UKuZ .jdiĎGD|?;AZfYxピaS0uBE'(|u+9hS-"YMI Hav!4u(,?p6آ*­J*4v :uG%Of~EraIePPOt\'XwR6@$8l |fvmdlwx޴D:y)5FE=^<3^fiZbfo| 3œ}@-\1-@ 'qdoFfJ|fSvUp&܊VtSz1ݖnJżG,iGT]>w;JuSaP#7H*kAu'^YNf(p:_V[0!Lh J TB~U>O##E?,a+A\?>IiFpzTђM쓮1>%r5cIăvlUGû]s?Tާ~2QS|SLLMmďw<W$n?{wqꇗ;lUeB|RRAUPоJ1<8J*LU,TRqa;COOUL&&*] 4ZU_BNû;iF&'i(Wuؔxj; '8Q-CG .xvE$:r+&S;=p:r.Ne4CMF'VZF{L&PFyPº§;{{sljEd^:1X aٱF;GG?X7R;f<66ޓlyjJj9'<0 1Tc xP:0U.w]SRxUg p?D_T]E*,L2 :@`QQѽ^Pb:WØiA9@iu-c}6`aVriHuRk夫7 dQ!GwĎ󘲠0:o=鄍/ ҇lRHӕgn>^Dv-E""S%3ݫ` ذfYhY&O}k;^8'z^ѨqHr,6i,Od=OܛtCʷQ`go܊&˻/anM:۠jX=)Mq_\GnU’<@(U \y~Rg+mAA*ԉx̨}/x|~Kef*lq27F -+d0=v^SEz0\}J-wGb8 nT^A}򜚉2l]ᄼk[bE_BWb$q̱0PuMnd| `CR}/mf|sX^@(}u` vo4a2[)pk wqQan9CnX8ώWqrq(m0B*qo>D)V"eY6>RCNYdRj R/uLbHf%1 ;m\KUiܜLgiIE;d6^QDN$l>qDvЌyNw80 Ic[VP[ZbDc|T8׽krk=^ܸɾ`* 5W=J&XN޾ μ/Vf<ÒD˼5c|t}&NzoY0&88t]bv'uvp]dʇ4Ή vYIeY8ۿRzx_=+-Un< j];d/=14;Ըb. Ä[s_Qt n(AE HWQTSY],0v }83~* `l\'8F@N[ {ʮdDR}"1DBAQ?fͩ`!8@ot ;⨞0>YݵlZSdӍA-jtIhW{{`Ҕ!&#T98B VN MDO)ứЉ܇:"-|nPAUS|žO0)<|| vڇ Z6i !ij1F[Үiїo 䜞dXWÚX-.3~>.*>J ִs<ܔ!-V@UfrB˳H h7$G_-`cNx:Sꀼ@s_fTgSQ1+FyYH0@ T2b|T^ߏӯ*r܋c V+#JF'N@e#|v;u^߶CxZY@0,&(&e\6Pbp&َ\1.Juo`s(sOs 3|MO$"]8(Gz KTW4\QD|8Mm7])EBBK-ȑ8=? $ / A!ERQ$>s౱,ͩ)?"Iȭ]TH}kB2snm,Hq~;Y`!b3>}]vT\3!/nǿOMREA-B>WȥiͰyKgf47t5֚$|5!?PA‘BL:u6-xy]Lb}K[deCޥDW3;0qD{@D/Nw\zWIZ<繙c.pZ>AH|3+nc5#4m+<@+:7 S80mX++߂ B*` 0DN ExL O {! ._FBGr<ng&t,J'3 }+ifMDҥ#znb+8I>ch9' >>;yFO#ڟZuAIJւy+qkfgjVx*jv}ç39+?@~O{/ &δSOdiO}o+wߎ4.&bQ 8u*z$?lONxj/\z{x0߶Ő#1bbmP.RE `<cLccz[nUݍ1?@: Д :yZy 11%bT,SR|,_We e(kN? ^e臋E\+ 5Y U2CU&1X+G&Z`VBXV+%)8V$bV 6lD>cʙ]J\z#&M*??u>2Eh"W@ H_sy)*$My뢛kJ32D(ʪ[" e]SHO)9;d$@"^0W̝;^mq9Hh9Ȗ _Hy(~Gpu=V8 ETxC̓FѰIZ{οio_gw.,xt#_&⣚8 zx/=X_\!#9q3}:(|,ig_?j$a "bG07c8^M6Cpk;?:7"FΒ{ۙ}nqV,0|Cؾ+ku7cy5u -P֬ <e "2*AX VeS, _z5E >Z e~A/`z$z$uV4υč|~;n1p4fɶϧ=>hDv; _x[I1Sw{ "H@Y&VF1W4bVF+`ڴV'^<)ɔ'GYTbTCfZA  nNNdz5˰rx! @"D 3 %g[gqY[WYio &gP@&zHdCP NwU~,$s%٦OWj>0 q+7ďOQUz4**T{E}piʲ#πGp?=wRs'@~N!^\.; ă=21effFF/+43nSMTʏQ%SU9L/{,RZrcSL1+9SI|YEr^vOƣ;RT*xWLUN$:%v!?vW)tJZ)U6y*XtJI7Wmq/>v<(M#dzyx9RS֮ g埊"&/>03?j7~e[GQ4!)KaZ7+[u$M f2c &18-,5 $قمFXElabl"ʛTk*J8Ɓ`ʡ4-'O6.L22XeG9K[U4Jb:x~s/;WL5A;7\6s) YHxVkŸ/>5/C[T˜-ڡąpu.Q6"Y~wCEMRiP_'u T}sn J=$k _"s} ꧷[[rE$" \;3j!0I#M(KG?nmYog Ip!ϗӵFx{EKpo}sL}d/[h (q2i,C~l'Zcص%c=ߩgJvn(u*t sq`7d7A^4RZ6>"w*]:mG+دOƯWhv-0@ ܑ , h]!cKrdjK(;9<س; 򱑄QJ-}KEP$Bj0,` c=뜘| s]CգSܿCa ̎TۈOmϦ[?U0[!0yK_r &sʓ9IpZ-+fI4o>J+2}]؄  JXA`,dU ju] z1%>Í"b08&Ad!+/K(`GfI]TS)Mkp8քM[mi7=BCD R+~uM\=ce=;J 7#ݙpw['O*Ƅ_2>3Teqc%oS o i>=ӆ񜙷k?ڱ\uaA?X7+ /B__X\omEwϝj3grRRa4b] 55×i+81z?;T7dmK@p;qSߗ "TIJS[x9y:># ,Zi?̫;yy=<ɠh{.{Gw~>E^BmA#|3ב_zwG㶍G&ѳhkŊb_bMR 6U6Jp'~Ye*9#DTQ9)j8F":>-0q':lҚe_Ze[Tm+bbi16u7{ɽ{>]x?-O)6 ,_iåWZ6w=-u}V=?Q1Pwzӱ|Wuz㜪X\ub>w[)z>M/20êxv4 tK7(nz>BdyHCuY 6%IC"#!?wxH|7GME`,S x=x]#Y d<&bĘ 2򢿳_|4v>b^\N{<7BfGT$XOY !hp'{b_#g@WIe,qoP>iXrwHp ~*,$!dYi*[n%tAOθ';| Am =o| keծh+ /M4@2FM2 ̋U27mclT#7eXdCW h!`F0G[.'g91uԳgHNSŃV|&fJe!1? Β(o0W2 Q|)$IDUW`_XTLP¨-V`?#yhCF'2v[{ŬvN*6#S2Y&@:"|V. g;| ^ "FP貛ώ7Hb=y'r̽]Ww_^논',ͩkOYlq)=&l%=[V &[*>t# f`06a4F4`WEd=fq`+DJ&[ޢzOSOڍ/?/_{kv c dFTz6b1IZ4Ec~?:8'~QF7Uxv*=<znzo;CȨ{Ie_.& DX @h^4bCD>w<~%n.2i$$Fd("FoZ hlVi( ?GBy=~z[#)L~'| =IGmLJM}:1*1j*1j*1j*1j*1jR̍b;dk &0bi+.xU1P/zC!2+*U+ajb[(~wgȪmZchQ4OTltQ;Q۶V褏PV]a?`ȷu򂽛O=䔦ħs a40MUݤ̓Da+nٌ/?w wcBNsr9H{#ͯp:#Œ䎫v+jW2냔:}T(☗}141e6PGpd|b{~%#'Q4uCxj*IO}HϷw{^~]ʼn4PXD =CMh%j-͞FpSA\%+F0 z)$ poْ]Y&1LH a#EoV^?8eM/q9ia^=!KB)Ģ_{Ȉ""ڨpnZ(l1_6ѶiCf?Z,g-73vQ|I\{6Q@-^O9ӋbdVF׊֛RZS+'ACcGdZÔO,ڒYu+e4a)jJf` 9˞{ˎ n}WǷv:,% W@M[.;S{ IUʖDwl7DT j TԪ{s!U0:{~%$cll17Ҵl~',SN:pp&j5ÞdNNN sm-F mXUp$Q!ƣlLi㱸a #`0)00g}LtD;(kt!>=&1P @jZzsdrnݹ=1铡K7:\' 0/NӪqsG]"QiU\1꫒u.ie[[Z ]>#J˸ydYzII/@`"˰!\EޔXM&&wDL.omZ&"`$ 5 i6MѺb=3քܙ 9FZuc1LXYbf#YeXVVVVXYYYXJVR!(pSj0OO5Wթpiˢp>}g~۶D~QFCzOߵOЮbe' O z2KЇe|O1g+ƻnǛ[:)Tp5Hޯ:ۨL[lWOǐL 2'DeS<__=W^|_=l߇F/éK 3T,xZא<$Q$@rXϱmb7=]&ԸP·4ٺ̚W S.8TQвS"YR$%8JR*n|s88j\k)j-#U8'!R1Y/AoGF2QeP±*bTVA[6&JɌJ';C[)Dݖ<=W^sД+Qi0Jn&Hw LE_`vUfdw.uĸ/uh\PRT::d*RqW(u}hl¹ε|mYY_JVV++ ڿJ++CwY[T±&Hڱ,aX oXeaY_l`\]t0 i6llO9G^WUTG):TvQ4/&7U-ՠlG0K hʙ-KG,*7e_쯡a(^˅zIEGOS9S"=gKz3~_ZT?}6.̞ȝ=Ze/Pd\*yw=io qzOBShOE]v 1&1ǟeGfQ9(읔?~_u|~!d)ddy)'`" F X@cy._B,Jx8,Cyj&o+joeGob|?'΀H25,vAēBm2aXgX"{}p!C$Fw0G\OJX`P yö@/cl.\)NfpOp>ܾXDfxZEoXBt26 dϰ1>&z VNwFAJH<@el|]f{0.|/ŵkbB|, Z";jmC+j%̬ MoP:^7xhVJ{} #Hv 9_7*:T`c#-3ܥy}Tx~K4lM꧄^V6T*I"bl/=Օ=Y+?##Jj>6"Wj*g#gBq"/X 4bcՎNp?wV࿕޷ۋЪW%#zzy9Ԝ.`r7ߋUe(*Mh,ti'w_rhC &ҝB'Q3{UmPBmA)!OQiEp=#Kl ֢ R}$h adr S&4:Y,:#r*WZ5/ei,p7]le0An>xpl`(@1\o-^]zQS VUb.^&B."#?7d,c?eL-hddc*fkE&BpjmF2CeCi4d_Bl[Yuls쩄鶆]k*EUj-&ҘOTOF!ǤsЅ wSDe6KuwjCFbف 0Ō1c%9BDj'It^Ui~~ŽwE;UVN"})o~_e_OMr]˜0l4PCGK=pg0 e:2yدg}LGMD+&U^)I~ݪ ex'-87iZZ<6T}1|_똶e$D\(3ѩ ,;xWA MM=*7_=uhlhm㯤;y˯w@2gƛA}0-Y\v 6 =mitn4q, '^h;?>''N9F/2jL/2 ݩM案:H.^Yy,ORdsTti&m@73DGT5yp:la2鋆2T'g^RY|,2%`~]I8,ItS< r9$ۗ/ Iz cHAud?DΪλ*X?r/ ^B_rcf5ިu[+GVA_ƟRUߛPEzPeBAo% c 5øM_W{(` K )SHXpYhEetIaoa| gX>H'׶vb$ҩ8ʷϘؼzG__߶XOMF~-Ujg>:JRW6T# =+rRI͗s@e3dPv4z9C*vL9HyCXu1-%-Uν6 ˠtRFWqQ?^rIL#8ˠA`BjSP"n],{ s6R$|:&[VF6d]3(gIPU򑦤ϐY͐Dhpc!Bjs ZrwB :zM|i!/1zn>>"qB*I0o@Y#~m.Koi"1 ܜ'c, kҀ؇@L _ I 掞b7 1ڈ@0 ;᜾Pta?zL@0 Y58|3g/Q\;#s4 ?䰪߭$'.1ˇHWi TZ"zr@!3T59W}Õ$<~h هu+ ("RYrB߫)RceB=n>G?K?|/('VXhOzȈ^q2Y6e!̹HM2'*fs" TP:~'z쫛 exy?Wf'x]}\8F-bŚ.3H*;MDxq+*o 9?ĪK #6Evp|bvz'f^5-ԫ@WCx>]MG:|ɸ]]yi UЧiɦO3VώorX$Qk/0-C^Ј{ryYiv`kU hkehunbWTV(ZCXk)FGFvqsNYkL9D4k2XYCpc(8 "#0DC ~nr\鯸$HMehr vl:C,vÌ􍱒JOٸǬ%˜a̯TD}947=>(M1u|liӏ F.7Ω0I$)F= ezB TbS}&lZ'ՎT[-X@(Dx1=ڸW("fB\DeD:h׺ Mk{;#&V`vF+VdzXwlE%bhNQ Z}k{mȺ$ 1j}NpzܿzI~C9<R gE:΋ySxb?8eԞv;A!z:ej6&{` ',0)-d[nmWʬ]g(]gTyەx*MM&#s3 !`-Et zq[z_ZXHj}Ա&C_abrn)3q/AsT;aO̝򑫄(L,?A&N'mw5]rǍԤIZv֐̢RT"(+K3JfdY#hrUYS~0&%9z<{./ Eue̼ 9|H2v}iA3(3gxڦ -n |A9m){:4ݨϮ&Qt!JΓFfa%^6m+gFV_xfa.4w UM]XT׬,4"^}w +7΄H^?!9 a]S7gH#K3{NQ6ȔH[ݬBD7 2Hf3f9Li{!Xrs'E{6*  Cͺ8.lVl^YH3f BYjO!\\lȎ6c%5@%tdP1_ :&V8t_~7:}]C',J<1)y9r t-Gwoyj!V »%gս!ntYCl[.jz2%T,e,"n&D&k+_+gHdT$ JXZHY'}CaQMu22aJgzR:*7` &y ?SRd{ @g Jܹ##2E~>.q .uCƖ?>)D"[o1#!Sa]|(Mj+5 :Y@֛_v_@h90֐)t~ dտіfM& ښ4{fHBu,!Wi3f֥q"Q;y˔OȿbLYϩGtӑXVz^X' R20$LCH񘄩nZ`gokZ=v˼-sY:Q۝ӹw-i.@xP~da7RTuoI&mD={\Uei:W[XKЪh fS$"sb@]GCv% ~zG:X _xr$0 &di{ FM"GDžX%gn蟿n,~nmE\{3Nʟsa7E}Uz 'W9[Q hԺ9XʵҋK|o3jU8Lmrfg̤- X>ZcMߡ&ӤG xς=zyZن|a{9TnR-yH!μNPCzgM,ͳe:ސ7|8R&4ڛb1j-) 40+Nl$kV;]hʶπG3ZC((/QN5ՠ } grY̗vnAv͇aڱH!Υm[B@!DehuolE-QXaꂢ+̺ڲQl6{cMuFRO DEn<ŗ[K ي.P,j*t/wPh`v )-IPu?n GX=+] &_^6l8=gtF) {Ͱ%O6JhXwJb#8hBDm@r.gHASG3DL ZOݲW*?0_i#yĻdϔnĈ~GXji}#&(4X-A0}g\v ݴLPO-cƜef .|Ou[zU?Mlh[! Ml>ĭ-6[ @QER2M-L1 t`wX8i$4L[QSuq+@u@>DlD=,?bI/3%) _}cGd%ݸUZm.H[^|T ,ubd^R73PX!ca#Fz/v[MIRP -)ϻ;#5жӑo\(9-nrU3%BbY ]NVAǗV7_U/X9SD rVJ]2-F(KҶv\ @D¹wR:_RfmC҆͝$&ecy !IY.2VD'Nٕ]Ko`apGd&+2nYj8wޭ29*0ˎh>tv Ҝwf=f"l/( ܃P8*aASj?r}tz%{09Ϸ~lXCd.ܰkz9?5|;UJȮS3\s:=&. Ԅ@'C8sl!GnW9\ܫyL ScWu_Q"8: _e(ujGVY[ NStݨ?+]U>bH1)Czz$Jg+i}-B13#?׊G^̇ﮘoȺІEƩ.WM|Xh&ՐD}n@'J2=+0ir`miR lJ=Gٮ]ՉdUM X:al[IHn\ j rv+-AHA7/ER1B,3L&;޿^D2W(?3w]/ =!!uڽ>%ӟI8= ,,g>zr?a?rCb+st >b 8Y#yz$S,$X,(@n]J1{9@.;p( ^qnjDcljU;xNlmԲtJ21 xSR1^x{o5NHyhAj& b. v9~] u-%ͻ?4Բr?S|)]Nv`mT2 "Z )FgkQQCf 3|#%Sw  ~L~MrMõ8O F1)~svn +|cb b'0M%ߛ1_'^X'gK1@X9I0p&tN&B+Mh XI2ip4T1Lؚ*#MJ;@yUwl.&v֟bIFa'qǤW<$3}^,c84;*FB :28MvEEώ &gT_ ]n#DdI݌j\Jܘer DҞsIrѺ58y5 xLhyCL 4ZO2x{p+y#ʨ5 @O;ǔ17h*$SC*n.b~vIwql>)#oZv}&`Q@¹s c=VGރB}AsQȎG)qͯ~*o<Uqbь~#DP1~$cG8,??Nj \:[V1Ҋi>d_o0e}I2:S #S%KF`*s=`CB͑_vVW4⽡R,~J5Ě_N73gwGdBls |]^y 0qm޴@kD^tPkTTۿrD%I2FkGlVFk\uƯqq?=Y?w#\JaB2(C>nj _Ř>=` d~/dBu,a8ntH⑶, L%S&M0Zi dis wox3z `( _oԘ Y+/P]i3n&4 Czhr0c{7o~?zF{#n7qT> HDژ* 9ш7~{C NLEn'{]HYth/[1u<.z^ )R'`+h*]'ZU<ԥ'OI*۶Y9)QQm{51ek, 8]|Ë(ܶv9UAcC؍2E9LD}IYB~@/#]?w!}- #2oګRG.8X3I+4R d 7OF}]JQ^i/b,Џ\ד"GZâbo)G i &DV8+CMs1piԝSֱؙG>=2~E+_YM-,[5[gWQ*fd55>qiLar9NĚ:B0>yɳ^va_/n~d+&a2P59ȵF<,fxOMj;xoźmfTfvpuC|2 H6kG4 ll }@HtOaL0-]2rURs'$ `4ȥl/tdct P(s[a@fz Dn4̗+χi wR8v <1:c Y|5\4(ȣNs*?W}7Xwᾴ Nn]ND\f~IJ]$1Äk'W~5!j]GՑ$a\>܂ trƁ%"` D^7qo&YvY=\4jӆ9!6}y_VjEj(@k*3 ڽR]7ҁ~vӊ@A+8_(3>ټp8v2^};pT `x ,?ȫe;׽û*0PiuSZ"}X } KpW37/vg~>XXK5$ g/-Ur5SmrxpL0!Gb,j1*uDJp}^Mp܋P˴*nI[rl[DVg2nGC/A}%rN -? FJB!4m( pB?ڞֱ /oFEJirhCBYNϼ!Be&Xct!!xB- c2IOf{Ӄ 3 wx*/o <*&:pYM$D/B%&%gp@Lէ3R}i4S M҈kanXY۶HoB~»z: %Rz 2} k 8)ŋY,*peA5ҿk,EoJ;?Q+by4A#ߠ,Z+tl\aeay&8Y#jURMEX-}s _VeCc` `$ Y;?hh} bLP=ՈMy&ל':s"$#':ez&&u?Quxm᎕P>lRcNjhm;-DhJ $,3ځ׏і_ 0q%kyĴZ6˿mvgv!CTQvx*0TB_P~L#0{ &OZ-B'CA [ nXi`Um:a7.,MVŀmR498MZU!g(n^}ڨ [$R96KMwMS9|a"ׄ7a>as(&u g_4L."=_Wu Q |G2je#J+v\i,3ø ~õPnoxvQwퟫ6ٽR B܂~Xf:?r~JAʃ? ᪇*LeT4k)|D g8 YaɣLHj)g,F0[_ [v(Yij4Y0}d%~㉡^EV-9u+PkFYntZ+DUNݩ_)yObqg%^9 5B_3+&1~/88d=.Z4U,e-iqF k4n%.(Iz`}cLyqnj"q;H;!PQ7<~mk@92N._mT%a+š-^m6v!P,&4=D1>SR{2‹ez*%'Gʷ<}JP:Lsy\i똁u[q5UcFmHXwQ Ϭrl̫t(ߥ'Z-m:f30}w|hQX)FͲӚ_AAv[Za)$ZxjuWvMLZCJؖ51 Ȯ%hf8^%HB/z{Gt4ǧU]@ F5!/kv/'~5 4"ǰ[*1&+`ʾzo9r(!Cbvbp|0/]]7SFA Bi`{J()œ9ms-QzFI⪁ލe(#`!b$o!ͥ&5⍞q!Yw?ڜKfՉ} Mp$!&FG9>+khX0<^2}=hJOŋl?E֤Fr)(>)p,g;⢘6 }V[lzA˪o8 H~}}*AuvNC.#$<ީvW rSs 7.Kf;w@Ǻ.L4a(#NWqCƋ,'} $I-`IV\HhU3ڍ0a{!ww qdjp}Sဋ'KhފbI#wF)3!*|2aɎG;N;(Y QK$#mDF ^W_$ Y *&<7YCĸ2ҩR* u2q|6{l]s ؤSCRcuN>YjU[+{Ŝ^[.'V[MFZZ*tC#tśjN, n!_!*TiޥEl'b )xRK^I͍MD^kqÌUm4u]ͳX==D 2:n4 bW4wP"hf 7饈o ;[{[lk?ƭ[ J3W2dSpR__$F~갅\Px@ q0;sT6oPiL%=Zrn ^N]$IrjYk S[htOH41\.B4]kz8dh;?\a˳Ew.;4"266a#T5[-#/= Z؎d9˛Rq%pU];S<Ёj*™5vRn~+yU36˙ ĖAɢ5ߛ3MݐG9+hL@HT0틖Ks? r`g0!9N=_Z\~̯"ᨯqLj>,?5vW:!lo(ϲ\V+?|eN*xʎ#О| /J ME#)vZ"d( WNq׀QXIޗZl?/=kђ~ ,b J-y<*|a%3ȪU$HDe5ǒEP1!zIևN3~7 O_miU_Zg(Y32 c ^*T -(e()X# k9@xa`7ovmJsg5Hd=I]#?AHپ^;p4T򡼯VD_e0̋Rӱ 3P̀Nt Gʰ{C?mrΠf\q?VL>IQLFW~iKaBQ")kunhǺ7Ud M'KKf·GS.&K!Q$*7bt bF~~ŝv?!yPHQ۟ uV~Dl%IO{(g`N߉gFRH3¦wY=JO?BCK-Aj1&_0MO{̫(;io}zp]/"{ dn0%[Մ4WX*;+!:b8h$~Y%P&RxŴZ8B7h.[+Q?#>ݣ3/7]Ix;Z;UR)`+0NkX:cdv(Z!^ؤZ-6Kk0,YƆ@PgTJp pRG 0„AK͵+ ĭ}b2NI ̵jGM_MFhvRUZV,CC0] aBWr;'oyhmY: ϭ_э9f"W|%2 0~IYN;{ жYz{bjd}0nN^ m'k1vsMh2cIxm $G8zkCcpY&&Sj}?3)x]nXf]/Ɋ?eku\gӍo?W 2SحȀ#=3/cry0=N{SIKu4v tD4<Bm*|6`,rs<]bkHa@X0F)*rbQfdm xC B&A-&#g. &s0Nc.]] ?y*$ ZH-@/,&0ݺ% FL9hwkWoGRfCcl]a\H)G[s=~L0)5/ />kC|#dD’k*2.zlWuMr0/[f\䥄/0g b IsfpI£ə7k &J-nɵaTsHՊZ}|X;\nj׎{BǍleܚs f^dQFBj%ņ0ЍC4$i*?]Uw^!9VؠqP )x:ҐOH_wk! +?Cҝ}>47>..#˄aObV2`"GmtF";iٮ-Y _~'/ ?׵gja'Aٶj-|DŽȰܿvoUq&}2 o+/xP(?)vK¹Ժ(^o Uqܑ$y|ٟ ]?fpJ>0ؒf'XJcj@:x}̡!6j:)Zr֟mjXy[Bg]SZ&2@uV/=oXш,6VW ^78O (=ymN\Kg S$mN|G$ c9#R *Hn9=Fl/l 5dKoP*) ֆ+I,[onDd}?Da7.@EL#^xP}=ԓ:epj'c?D@Y\,UITw|&A+"Ѓ\am$K<4m?#j6Ddk[Q5$sf]Es;*qY`׃N`)C{F-328"9f_&z8|B}ƻwGx $6pvǩ 9z| NV>}NV躕b(Ih4¿6`4MxsS6[JU$pjtu,WJpq{Xյ?Kt-K+<n'3Y2L5b@ qېB<}/w#"0'dG3eחzHT;TuzA?zї#zKbPxf^1Tz&sFxI.XdZ ݐ=;o4W[aҿ@ F9AyVrw:Z$,z`Ou1Kx7DT1&QKhmK{~N#lա醷01Rc6mcFBf9JCCR-7$ڤGO9+[LB;]@[=nDQ领8BIijY 5r}gB {ӟ4I:9@Ux"@lܖOJ[% m2shϖl3 X'H%d1i q .Z",#POo"G+*toY;w*υHo1k?IAD{<[n֑}BZ#(58U=*j Og<}hkd7ۋMSBZfAߵݥdl3۰DZAk?;>ZW2=[΃fV@' hcK|ބ ^Q.-nH)n ֞C"n5uθaU3Ƨ`W 鿊#ۨ8]6m#u0]_dV+aĢP/K@t]mw]&c 뒛v\ܹ%isͷ>b\]^LX)%5s/N9JBP;\NTn+y*EsLvpщƽm,F*(H>~,˸D;d:jdYjɨ@_=p _$:[:Nen40A.A>9L^XdjV G¥a- Fc C7j_J.FP K/s= e~Q!؟^ϔB Pplb@'`G:+P[l"19F䕫(OJt4f:0tAN}<\!mhg;(˽Q 0HQnEV(l<o m( 6܇s̖,a@N<&ʠ¸+r-$=wO v%vUگ~˸$(jd}6j~i=1֒%Ev+u C~il}&'s:nq9X?Uɯ}8kFtqHr,zDcZJZ愚0o\muY5a5f[۱\XqPvPhVXmI:ZɦvvR ?J5EiTD2%SX@6ϐxct=)g juљ̘!Ib/s2Ӆ/vݲ|'sK||L_:I%NripH=Qdsnr\S 'vQ0XE(B -q&6{9"X߈ Yt6SoLewQw$i:;ͬԾzW+C|r&m>pa  RC?D7#6]L4FDiكjjictKSEAQ֗0+El#;^dp$漐o$uD62R9JQQ*FS>7lԧvC:cBfC1OU`PS9FYDg%ۼ?q~^+jC%V1Gt+/}>zZ=p4.'jh(xO |m;Rb # &E*K_+6zucJkiȺ=mߧ'oUm.?fs:>M2Ȁ.aAcQ.)l_t{(3b@+'mgnOu1,]9r -!L«W\<{NyW\<MWOR{{s~y=9vQuN߼&tJ~_;})1Ɗe,̭ەfvr[u/,8ɒ}zӹѤ vPt_:YZzHٞ5];So&܁6ɝvietRcJO|a&.'C$ H+xZ_ *3t$r >] +Yb$7 zKy$zr:ؐ/[j! =CHp]OwP浩cE$y+}YO6|N_@hY36WV̫H(N{^>.1T0i .p: +Q:Ԅc003fSǶGYe&!=g)J:qƿTq`,/9@D<Հ!v0lfcHmh/ ו!zf%nk鉝7{8S;-Zadp7bR)]҉j/Sj s1U)kC$z!<&oߨ~/۱0Z[dy( G)6S*&cua/"Xw ~Dު T7-< OTtC6kX4aJGrQjUKepyOxy5hA, 7G7psxducτh] L5d~uzsFe0^& AۋGXZj3͘wO*j5\=fm狤6$$Zp%kڍ]k.+VZ=Ƞ8]o3X'UЀ!CθۍA*`e z"Ds͠h;=oKJjF3*F ąVb>ZJ= t@0F%v#,|s6cQDi"C[PYZ0Keñ۫ !UıIH̚2F*NX^hIR }^q: geGXMߣOWíEib@b`oј 9 ~:˺z0=#21ie~m#to TBt?$ʡVdv@;*yv}v9%Yh\&c. HoOmCBϦ]z[Dɷ bmMvIX0Хm^D vk-mY=AmscaK6k buɬW+}}2,hW9UB<uY bǙ9!4\ SuN@P KD?_ߔ7pijj?}jz͏6*ۗK䏯 ȓh4#|nC5jAMa).<2(lSkLLF@ tP;AXt B Imŗ*_H;<z>0UMMeĒ)NR"`˹ r*k(Te{[y>Ґq_䷩IoxRB|PZ`gjD9.@i$38ѻuWH"k\ȣSLv/g`o4'1zDB\fV?Ny u>*t~9"DYt:k !u{HmmWƿz}-N؂ i\WxjyYܧiK9ShnI=r Xh:c iͯARmo {BXa%2Ǘn~jecCjZ24xyle84$#ds5R|Al tm[CJ\e 32fo_h۴V3;+UNSϐ)9;`462&ό9p8丘Σ]B+w/3(_)Qd`\:ŕ8#b*vbG 3Ko͝ܤRqA/GGꋕ~/.yf0FR.( #27\ [[P8͓60:|Q"#s#$+#:搩\lJ׳w59OfDNIY_gTGil0ί b6e{)ck0k~@Gjl)#39ӵljno10J^ ǵ'(J@B$d#0mm(Y\Fད+%zdךu Gcuu.SJ_zi?'R]AJbNm|pv ,Qǽ9]zXzkAy(Lmr绠qE> }Ƹп#rz5(QC[i:ǡXM|=YOhU0c&ڨD;_E[e%bZ*HO>"Hvc;_SG+vbI^ti Doa"I{8|jjt@_'I¥L?XeI9M3y~oltm3ׅY-a RGs:d|&g,L_'_tQaQ x͒xO ghnhز8'ݠ̲&NƑdTGtKȦk٭H|#q.B6UN Lu/=QT=<2|'8b߽.%v)K~oTXh֤U,IG~f/Pc7[I&*, 8y~e#ݦN6͉UEq^P7%ϼq}09 ")KCIզ[זlLjX-ʊ~B Jȃ`w5e_[>c:LJ;ǘF'/pOT505Qrp!U<@Iͪ,*:<9賐FlK|qUQD-li(J@L8+rg/d)CM/P kP0E".H І8IL-TEC_~e1fx7gP.~]GIy`M\4RTsO-ʾzDt6]q2bv$ ~bj`w~A՜ 8}D?Cu&w.~:4'Uo#~iv_ PE?C%f^E lMKpr’yϦ6@n#:< |[t~W7ɻ$$ޓzG ̘1-AGd4W%|[YHP yw򏺝/:rq@u( s _(C頏,pn7D9cЈ%LfAȗNp#]'G0j"H=mfx&SL[y0e\&S"Zg<$654T5HjIrv/ĕ/447H!Fؤa!y]os^~%o6R|,ٝǘ>֞~ 59 8d3R$5u (?/bĉ:jutwja⚲/?@ fb7_klx^N[ ޵`Z2wI1py^kc- opbUG,Yܿ> 8d`usVd҇&AZuGrDlqQ#pKHh AzP$C}!F~X(ǧ%V`眞|rHUKm)n^~Gp' yBVH=A#z<ݨeFImiH^kYev aYVʑnh}@&wm)(shqFv?6lj SD@'2( ;mV8(z3=Bq4?|0~5o*ЂBD †`@hth-*0DPqt>)ϼ/};b)!xP$Lc7#Ѳ*UQDD>~gyId4xİb%'m4{(EHfw>woA -V%Τ>%>]p nOi:XXG} Xaˀi-G=?1bsfpJ+x)|s 9l^o Cy@~d=`G\g $=oLLx/5|R4!|x6vҒҰ !RJ3H lNH9g.eM t' dxdIgFml]hF zP &,)k%pۺ J lj\Fsy)* w{v!_ Eϫ8>K-څ\,}-I,lOת%72y[%y:%Y=H>"GsO?(L ߩfغ\˖x::)tviŎXol]+ ;Us!W7LI_?(yU$SȫUTe#_e%H:%=˷CrD|q۫LA'Y}1;[kkPIE%(Hdn= lԕ?,/7,'yPBڇ6P_u^ Z!\jcbSaj܂ScE_%\o="$ zY6/U-7!Fg' Y`4PIu`-h؆ *\ynT󴝞; {e-Zq3ygJG+7ەCsl?P=vƳN(,v8-')h:8*/ݰ4U~W(:Dي ڌӌml2mRDb^oďr-e)T:^CP',yJ6TVp!^>us` aÿYv.aVufUG>왭EA7+؈SIB;tDH'b /͢F3cj#G_<'5fAk/xٵ4W(PaM/]"`bχ_Z(}cE2m|o}c%!ַV$U)?yƦ>FaLB6Z7rrpス' @ cGX ujO6tSA-j Xjyyd`%s] ~cMstyИ⢢NиpVCsN?f} W )ŽGUDKo:^zZTȃrnyV.H+i|V=xqnԩ땇)1L {M!>: A%""sw[OR{=5*{4f8m:~ $#:rXh)Wd#@ S:V,Շ1-'AEYH~ty ATF~2&a)Ҵ:Y%2\m]B\5<ŹVAX'ֆV[=iz2_!7D.ù ĢnLZF3m@420 șRcP_TR*R#g"g [t+hEQ%sa7bc3!-j 6\9̲6I܁]X@0t'Ȅ톩49ҵNċSv`:n/$"0,%4UH~#"" l2|7+D_H9["M27V .J).F?4 >7~&RL6 z1T#]2MkjBO۞<,fۢu3GdF3yF\ӹRgFW:#F+`7ǿq̕ Jd(#% `>J7wv]di}ƴ6~T}Ȥ$mXt<`n*z7SL߅ B'Ű8*KSY$4{QW!3K*4aOQ5n93&y.'gLOAд;e6r4P ,rou1?HBs|U 〟 M9[lU2Х!tjY(( D+bWL~)8S p$e2q6˹@3t/PyZg=xܝ=^I[Q‡ȗvل#&Vܬ(3K ~{4 un ]X]o *v=!% R]g‰YEMs$s_77_s^׃0*U2vOkq`B3(xOIYF,*#!`؜%웈NqѬGR8!(rB"vp+xe@ +R"&ꣅGrؿk3".6˔ %=Baw D9!{N2 ʛ 03Ax7 0ɇ3OV7t OX@M|'Yվ0q3rgG J@`^ p X^]u"ۘt\E]eO* ~6GT9 A2ktQ56MX3[lԎ(e:lU΀_h:Rv:b^]KURz#7~>{q;%X"bC5O n-;F[7ަ.[*VFd41C""tőV]pvh{j|yw9Fb&JSFB̋ma|D[ᢕZV VeLanLkȮo;9n6C1;NotO>S5vQd';v7ȕ1F$4iF[4K7L[NjC5§"(MV=4hD%1*՘"d#:J$URW)FaGIz=k_YPXm !¤!@J{Cާ R}@ҟWn^$˄m}.Sj#娠4aJ=ڃ{gLchԻɠ0,LM1 D]0ǿ3뱈ZDb]r-K4znoPbT#gfǛ%4=>lXW4Vo*)a[VɱܨZ=sV$"=3 E4O=rEC24 <,=8p!B];BH :'Ѷ>PxTar=bw5T=Fڰk@%B6#v<ͷU 9s*zF_ёh)zvZ@9~Z\9zR)@z7Y$^2?@VZ_`p UZ0`FIZ3K$UHh"<1;㠠ɢA3^QS\Djr0tʮ%m>)зny*e޼liTLW;rv*1-7_3RC[!\y\_2Uw7d't;ܡyj{6%5:w M)t8_EnGDHq/aeH&CT~|G=ߜ3ډI)wݤ-$y$^$`1ŤWpF/8kc;& f7P£Q qoھ0=תlYvoX{Fi}e*@@O\PsI ҷs:݀!x9 A-J(L u>%D{:NbGamWпF)F7$춑p p¤ u$r`Ǚn>/'L4C_YNJ*]WD*KGi˃39ANe ;U+-@rj XԫܬtqPvYw8i"T'ȑUdD%QZ Oq-^ݥ@r. +oOkf 9pVD5eiШ[ru0OJ_8:R^Lܵog̟VNjiڒX䛬=]V]Bˆ;<Z"yfX(5m]WOkKSHy5p QZXnTTxpN" ZEsB".oEba'$bmJ f]gv:3듣qf0q^[}J/lEԧ/:UBs$c۴Z;Ọ@mg~,. ]sIB5YtT9ra%^"81IMTY~uuIb1/-6^}ձ&pH6F4oC\Ӿ4?f満=:x5)uxvNFUđ05yن码ؐ38|J5֞5y~"r+TPH [J!%H?]G0]4_N1)Qvek~xH,m[~Yf+eA=.Ed58RR4%=lA&/~h{3"{i%0\юeF&z3m(h'}a-SԻ/=MFR|Ƕ%W1bGޞ2@) j̊iF[FyU;gTDZ jEՍ |O),}F A+eAг2;htycPO,0uBg[7+Ia z%Nơ8ata_XvI?~',Ksu:D~/Gz`6RmsX>!FIl31f8 vkv'Zo6!V}4I6i>_ E^2G0v?sMo zߝ2h_eD-e6n.F8m"|d񨈝7l]vş*$Rmd|>Pz#`'^> PyyZR a&g"JA4`b+ y@ rKU%ҩ3٢u0ʌe|!Kw^m?coIε/5qHەޜMzndlt<1B>`;TT|8)|;xS7;$_GݭL܃[!rz_a K/ Q4Qev2e+YZM%[B ֔FSSˑĩ@!BTE9֝'3Kvt쫍r w UkiLOBIP;Ta%qyr)BnN V[#/ JG1 E$'Y O~W.!ABޕ^$:-\$@B{د( ˷g:~mhU }[J`p{}3tczPJ,{^=ϩ).7ʶV{RƸ%Wy4l VKk) z,"lБ \8ZuCAtOLՕH bIZٷ\[T'hA3"ln&_XCvsJ2">%9V!IQ)żEg@KbR\rՁΘRA{xv4 cvIwر۩b)l '% L'6!%iX?7GcU9<*h4_mNEzx?P2k=%3iCV$딖C vm+0 PǔFb0|vҿ8#|`k})o(h[%PQJQƵdKKPoj;Sj̫ q?I8Zߪ E*iX!$Z^z9 a \A@kJClS#I9~RA?d1t{Y4 &pHxC;LCWk\Z'o@EW B5te|\ӽawz<dwa /`I{ZW-4rӖ+ዻ, Box^x zBY?UcPȽha'Ri&{1ȱQ( \l;!of.Zc3`uH捌#OX}P [Z6kƱnʒ x~CaAJ$oyMш[!~1ÏΖ9 jA#똡3֧B .+@Xد&gF-wߤ 2zt)'Br|}P'cǟ)`3y5u'B*B1Q"S#ρI Y,K@ӱ3@qkK :>Q3x>ƌG{e+M>Q]z( r%6-.az,CS0h%iʪaTF ^pJ@ϡVj[1Jz!HߺP䬮)Z\sla/r>>/IjR9~FH&DP+aZ9فA:14 Tt-p 3uF$iOmAD#kIF W rL @wBs.YӕiW@} J&&~of1ںUT6wޚvtHc`WM]wN􂉼%YdMЋOzPCT^pB{$xTk{;cl0ՆA/0B/&He!u/HǤv0n@hbE$"$r'F +tHFi~PBjSJ^0>DF8VMh3ǭA[x}&*v˴ h 0f>D.Ipǘ b6m nϻDiWteٮm:^Nژ*꫱%cmXsZEAcU`()!B/ &R (ik÷ZxAhK@?1J21Ayă2,13g7t[yoEGZ߶oxEWSu:MQ@ ?_Sx֖)x Qt7-r~4g3qFLw~eHxN:t'[!;y4}h#rddϹy>튼:l$=_[wz2/ K)=5_W(@pkQmifeC*hKtLhiDkȦ3鑹v(q7b0dVe@sʕsgmB߳T6ˏR1`!%,Λ[kq6"7:k-vOYՍ:"=IIUBU]՘mO*귛g1vϫBQcKXF4@{e7~, rE#$8tvrb-{ҩlp!NEu1(VJ7uخ_wL<'*9iNDs)_px 5U݄>!s_N?j$~?Yr)÷[=qח=N\C`}.d )ބws5dEĐY}mErRR@I%nwh+y ѸT x/B[\h單:m嬛Pj!ԚxNdJE<&>O$8"Iu >#6PB0~gO]RHYGQͽRJӾNG~6r4R6@,sw *l+~:X7_zҰ"V‹}}Nv@6Gh*IcN Spjh.@);ƃFhEDBڈS-0i`yVbӷ>zKFtApBC&NI+|TVP5stНO&ܹ=K(t*_|VkqW\~@z۞P\`d'62A|0U /р+k_0^,Ŷ}~,;PjH xG=}.gy]@"V@SI#IDnCoז(DNdwX@"iHžEs bHjgK[^c6íSt̏Q ^΁]7&岶Lw?eP*؋TKqеxbнF\CIwJcX6Х>#dw/ 4P+2>{18~ OX\dHTvHo=|I'3AWG*htYg)J)وG 7SkLXIwLmuÂ6d(?7Ix {[;FD)?% v3~Tu.Fa,_X$wF>gYuza)}x9GwOs6zK 8mVXL4$ƒj+VpgCKm3YOEuƇRFVRW$*YE5ss0 rk2L5ܠ m +䤮4k\F9s,, Yo:W_g= ތ@3{O Ϡ^[HFb"--*.W(nqZeO)Cʆ[*0BStmT]aP(rKI9 N1.AF){|6pY /٭r\hs 21ؼ,lF@5f(+{0|^j1&~280F ? kM9l?\  \ Ȓ5DQp.KJrezG5qRɤ<CD0y"ó rPbA^Y.C'ocI[SB;zaY/wCR9= Xx:|qWt ܄!hkv#Kּo=+/T) Dv 09I\`R+Ԯp\A TqE+N)#~htxuUaitd% Dge|jOTRސMuD{w"A* nۓ9eH4ElQXnֶ4 ˺Y0cUu2uO퉔D[&}8ߕIcUQZ\Зb,\,!.ul fSnÌ _G')Y$p}Ч2zho:Nu*6$<طB"SPݽѦݺD2NA-^ uDc aFbmV<STS'܋"tsEhҲK8{!o&]A~Yp[N(&O"&SpH̖f.pF8V&;@B "gE D}ZV?k dQdJf9-&+폱}:#2Y&is+ Bqg+,Ua`*K A@\W5鎱@bg#|ΈB`=? %!0N]= )^  7qq,<ȐzluRX ow@A{pa dox2W` ,"3@+ߘ/' R=ȬNo  sE)R* :F.U05CեdbW.ʅVebOAu׹~V紃?uRsPDX,  L_P y/+rj"*z_YױQLD1ߋ&{Xȓ?gc8hY^1{S-_0xdB j:$ckl鞎N0*ŒC9Ojs6xry71#Em*Nat' NjySH -F?l>: D\J{bb/D# N o&v?W[vZft9=#QOŮh{nG ԢqF\; a}x{7 ,K̲ drbΠ;>\댴hVjSf_*,$''ÊlƜz%O b0Ү:Ҋ.ubR6:14 f5TO|Pّʡ慡V*6# /W0І]_^0O4as3":Յ81z*&Xa+66ԩɧ/3t65~Ix֥:Ҽ–) IZt{Gg|7S}:exy{PBJˆ< HC6Vmd)GnOKCzעVuIHKt)\D@M} 8lk^4;>W'TJC(e13wlʄ% Zkײ󔻔2Dܔug'&h<Կxk"~~ֽ(ot T%n2ɩR>?%hwjzEs*l#:)Ū ! SяC螌zl~^dD)!@,Tx=RpȻs0U1&.⟬UÌ\|1δzkwԴ4i1N7Of8M=j5yc<'H/**/oe!M"H~H,YpѱC;J g衿`GE׵*grõ ,ǃi3BippQnHx "_xVIW.xFJXIm&N3Ȃ:i k{j|!v!1܋*PIm|bd,GOL/khJ„P8]WS4㮱Ԇ\FP42I !i-GG-`i©]ͳ^93QN耜쫈:1/uf_w/1Omͅ tPR@iW}@u{~:х:+(L sLJ >G֚aUEb~)&pbC]GӬs< l@V9i8̇c&6C{3d6T~ȵuVk%<*j\?<#89m=s wY>[tw KW Vr7c-ٮ{{# *3I &m/jZD-<3r)D&aR8Ʃ׶FKTƳ<^!{k~a] Z>*8]VGJhcq[*{KsR"M Pz-:ݞ=Rl9b()G_ׯ Am/ILP @50y)qrݢ\_moNj0wM.lM?@0/ E`EZ4NpV=47ېxU=#1վgQvOlf `Aӄ).s9UF7y4-goP}*AQ~ǣ+qJH, b4{IKBvlCK\PmV1bDT&3'Q}g^s,%VkW<C"-8j$~G04zPKfBOU hĭmg&~n̷qW7*nF^@盺RQeAK»_K1pmP~|TdV#;.4a -t|53>X_svBvz*U)'D+UZ>GB61|9̙W5s ,_ڹ=]b@f3= {Kb{3*F c{MQE|o^Sݹ'eF0gڂt#G-WW@cq*Fu9w#qgAau6T^USCA6FOCIAʯٸ]'`J6 4jmk]>lXk! !Cu¨'zBg5G.DSӗyŕb5(51B1.+ WQRg$ @a,k/h!@ֿ^VW $؄I˼ .) U;fCQ9>*=ojᏓFGK/2jiX)?;3Qq鲺6wj:@-bYDTQ 5xPTmtd=pO |'XY?I.75R"wLqm^`jT/l!+xQ]AМ(Y}P ~~)lͺ2u&%Da]cMoaG? :72WC]u/긠~4`Z ޚXDlS{uj=--#GJ0f$_R|GFEv!ka,tX=iC sݪ<#o+0rr; 0GT \ԡ 'e#!593oxE՛-ي{{b~"hSzt~\\;ϔ=^ٷ0.ѵTߌѵ_ W6ל*~Dz- vo PEQ 3lGl;kT@|γ]e] zNf7tLs2Vͩ27̔ˣݱGǥΏ#lWxv^ukв&F 1)=.$|/Xo'f7T?ku!TC9L$dʆG\AA/v^H;}n=E?J*K ǧy/;ju fD2lx8RaJL&' dU2)9w4'25J_|NA5bSn׆T/uj+ w_cS]gdTxcG T#x+M Kۧ 3Z$,*Bheū([p19c.,A\KKUzJ3 9( `?4`y]kvVtxmmپZQ[3YFxV!4h_E+DKL}V,EtJ-SNXnOB{t#]l3A3!A/lUy}d)jzzi?HPΣZ]»Lo5x]ZrT?*S^r LKly);4e{B qo'#QGeJgBf&FOPmދg(s[TؐTH#u:`gڹ1]U9˰wVtrT_K&ȼKLL>_A=;c>*5:7D4˃oVS{uyeHD: .LA[zR*ܬfzdBJO|Lf\HhA[^`3˰D5em _5qc>Zbfǁ.%iCl Y˚*f 23ۯ72@;X};t] ϓu/ 6žE_+80aoMJ,24*g&za[.!7Z&񙽗fݳ7[i] ܬ./b=he$ZSxc1G7 Lv"Y國E&WRcTd;؎ QEskF,Tv_%{ .BJ͂$Ͷ| SG*s4TA/ÒT_{>rz66? uI/-C/'TC/XMӘ<{$Y3 eS~"^#b{ kGJˮ:b?ޫ\Xϖ]N^N6\~x˖Yߥ!W'ሀ%0p1F7K/FoGT8 ɓjuPqc ->6[ anRv䦌[;rĮr_`$3 g~U)̢{JP=\cLӊJh4'Ý"tA +{};@#mQ*j T}, Z#ò/Sut%:¢Np)ٽHkR[ܹBErYA0m!}7tXOj4 jq1$FU3qܿÑ3OBbv~2_IiE"Ύ-ts>M?we0kMc?9ⓙ{K6NPE'E"+r3gQ#ro:#m!~PS00%~ex2XL/j(=FF"ZFLRUw̭"Tw6R2QuZ_wco_ߕ?pXӬ;5vMCp޶Zi5F;Ro,;x er쎧<=:9%h䋜7i=ppk\Ct 7 ̄+[QE+f}Sz#;S؄Փ5Gd՟,Xlw#Sƻd{h{tNZ[𫅅 Kto{TUƝ9Av0_}gab56G ҇; dPU'鳼r'ß<Ɇb STV8U~Q_ &6D=sq5Ԍw.VwRIxVH5t6 ̩M#".~ڞH/<|9\2ѶYn/̴+Nd_?wHmbb"mԭ&=&2)6180P"n[Tԏ@L}(vkYzU\x#,k"|jN)nï> _IuRq~\&MR' `%f\<Ѳ-\>)FlvޒNY4ӓfO'z~!vcM h6 {yOW'E-91]Dq+EAqHX^A{sBWq%qNWYgH݇9;w_ڥ5NYhmNH#rnݘ&Ä{I;%26a*X>!'-6E?tCrH?baO֋D>NJeWoC w.1 5i|{&}5Vn tܮC5y "QyyޚV?"Qn2|hPNw<|-Vf\/ Ͽ;H~ 7Si۪ze %7E]~eXqcӘ椎/v|ln <;LZ Y."{Y: $#Ÿb݊Cp>K '*Ǧ=dv Kȳ;ձm`K1*m)jKggfm][.TVYq[m< 9{:-B'r8tE]%~&8>0m_g-dSG).=^_o!-NOGne_gx28I)9k7ݥcrL *üNIl)Xvg$QHֻQϾ]xkF\8}I1vgGLQQibBAxH)~3)S; `1_z`(]׍(SY~hFsq=/jc{י = Yī& I Ȉ׀0O:^SpZFm|ԓ FUVk,LcȥGҊ ^"ZLGyXCdcSr_vtNRe3?U<uZM׳ؽvՀE(7KtANbD^=K;frCkjkj[3^rHeu߳•EeiȌ6x$Pa2_ޟh:W5rj?*Aҁ aVjXhe8fc S~I4CP[s^Ai</<=6tr8bEbn5s~&ւ[EkP^5o죓!k q@)4nˆ~ܲiҞL~Ijo+>6 "5kTr?RX^*Oku9r ֹ'W15|& J*L[یiO ?3'%=߭)'/U&2i係y`s~pcT|A<c?2.QDW xF&jA}OHflqŞҔF{Bm6KƱhUq_4TЪ tw7ti7M!ʋ^Q)JN׷dwLC(~2pf#Y5/hD3Ƞ,TFC-דӧ#yEi_Hgj~u9pqێL=.%dphK:oV1JM?;DbR*7GKD[[ZKAg_<2o;v덓-WwgssuKMuHofћq5QO P<By&cv! `_aS|`{MϹ۴zWդU°Qx/zAg3f!CT1f#]T ȦMݩ JGs0_ԣ Q^y?V\e&3A.Z:-O~nMۢ4C<]B[C 9΃Q,fn+K ]o&fjr~Ws_[{u6$ܑJ#{CJl3Jg_GCK/V`֘c{_yp*;,kھGz'z3U[$1Et _`^[*=_+ Z7lw:Y`#RG\ 3W,򘟿9(-ٔJCM(WUϥ! 6G8̹+k'')‰}ɉ%[EnL۔oZ;%ESmߟ /:$Tmmv\Ev% ^e Lq᮸47)y"uM2 ?$-3}8˪5Ǥs)Qt5󴷋{0Lʤc0▛=E4vQ:A9~:( "IFXo Z+P8]ynnƞɩbiFUOFt=YtGYYx %Mo8ū4Uk?yVD_-a6ZT22@Ŏlށ<ܵO4p9Flʓ(Ȅ(z,#0a8}W^s z)c;PqSL\.oNsEc7|+-II@.b%!mXݢvo"5,\w4?/߫A- d?, ,h찛8W4s͉ټOcf#`M/xL-Vj,ԚG[˽2AO Kn Sj8d:<;1t98}e2Щ۱W؄ءA2~zӽӱ;#C}6 ׫Ɩ9fƞH!6Le~Y_egj7SUɺb 2 y6O7⿣b9Wzz8O8{eFroۣ[͞o5\1{~Īqt(gZPqJv>HFHגOs4JO&Fn*-j\4<] y*986d/r?򪃻^ɇ{Wf*y~!ňN:.36[ ?~VN,͹0B-"^O~`17OzV *O v5ڕ`֔C ֪})S \ahFlpJ&we),1OMg/$NCifp+ޛ>ϲB%zjk72)HzPy6Є&WIfE-Shzr08ScOb3# KҪ{8C9h?aEQ+GD$6Զ; *?=oԵ𙧆 Z$~Qj^o:wab^^a :J9E?"H]Zb[{00^/YɄHkbHz^d\Sj.醫8sYfVb xH2lp[V!{kU+qrX=2hsYg [Dj(vhCeZ4œHlu9iϒA.է- t3c%T6%d sb";-~i')̇Ie{ wf^:[*C4qZ)<>[r$:0=+0٢u.G31ukcuBwe3-(f |ˬ) "ƶX)6yR5Af8C:*f`tN[Rc1`>W\0`H`T+ Ǐ&T\7G[,zs2g-(t v0oP&eh 6ZIJ]M(8 `VɡK2sߺ crм"U.Fs/ƀm9RAу C+•dk^mԏ 97Sx!^ܣІu8{y ?#Ђ8jXx#Ł5x@`.Lր0*n^9.reZr7{'n!:7vDL?ywLQ!6G}4 GUQ&OsPt_k[fvw.ۻ_BsϫF_hsO3V䕝lw򩘱gl6ӥYcB"nV i"]eIm~[2i.Y8ӖPx|YbXi&Ujg 6Bׯ*,Ft?H1 .wW pWEzw9K`JK\`l, kO _*QCM wp3Ks ]lNe<ȸ&T]ݯ'wL$6P'JN{SR)g7_ΓAn?V\z١YHh փLާe"Lh$λ† ')iCrD!^TVj{d N4Rvf/}: 7A7b_8]^`ƅ #*9m9jVA\'=YOx ™=7PCyU=(k)wv(|x.]7ڵe9S߽?_{ 9Ȉ` (C&Rhe귆BF&. !\]s ]!ux^]? E,ӡ-bH7WZFehs{!gi_imJL&d-UnjRVwR' I" #Io_+RgEiHp.l/|GԌ̖0 : P~oo3U}]Kal4bY[=P;&1I'ϙ5ݿ3PDѸɃ2:+̑;9Mq9x ,x(/,‘҄Zэo "{5^]~+ܾZ#¢i1g^"j%L.|,㳺1>!p$38VWUs6AFxgs8CYi|IC_uK"z6o^bZjO(HWdꊺ˝sW}%׳R[Xo ‚3X+i)V@LkЖIT538W=ȸq0@W/;ܓm4RHoe.xk}><*Tᰳ%V2ENҋ,_¹#x;.Yftht ~ݧ䯢8/p0c_d'R:#~.MW^ a;a`тvأQWc?zCd_c+Jja(g'pҾ4axRmP3_;%( ٢au!Sl#%|.:\.)m_X [tڈrkSaĨwXQX0@~? ݘZJHٷNsYiwi[;#Ǘr@X BkCoODDٝ'ȣ%D-UrY(@(u"B9. $ʻKbbpM=Aq` [MuaҞ[·jr*NCJ'(]yUDOs w Y>UdAbB ?Tu>,!^Ҏ`~(wkdp*pBzSdHkP{pAɓ#զtyļe^ W#GJ:"ۛf'[$V4ob弶͘wEX߉WY]kv SjP^gILQAYĐgGbz!&F!F",/*&o?wScpqV$e Co̲!`c{?~CtvL&mLZ^PeAwݦ!_C=}o:?$&,p !0QҬv`{LyC>愨F.KH֨J7O+nn?KKNwEŤaw\{" aqP>QLse= ĥa˜Wxnw X_ vOש: /|PXjmT_݂l3w TA) 3t1e $yHWL׈mvi3:W8sRVGy!Jlϗ\ .ț]bC^3X_5;6! NVU>V7a* 13hHoӑVL4 H ?L81Ae/N,'W"9 ŵq @ \SUL-q[B `aU^Z9ޙGl˶T;2|̚P1HyǪ*e֩kn(9>n@43Gr5CEzU]$)G|kFöJOSǩWK?3˳)ɝHB[r yZbԯ,qjDg_(arbcqLpR3i?@[n5~=c@EQq6: aUSu^z˥ϤcqC2+A v~̗="}xcaha4 &%/3W͓1OFN%7S5 |muyZ`{=E_67Hciu os[ޭng:~ѓi; mΓ=!Nf>K5k$)oT&Z^׹ua< C:xTY%dBؓ2^CDa׽R?3fb[0hofJP=M{$'P1-)#2uW. e˨g(Q$\,kOkZRxDZz0T&sIMebעqO(ktL075`Yy/ HX !@bv8nsDgu$zG#Ȯ8* Ɲ^^c$IJPG9f7 "@qҜ_ΪhtR w l}jrʩNѽm0ubo,*NOeU@ޱE24A2]{Z85H0I!=Ԯ/ oP/G"qtfNIZcv:]DLLoڝ2 w@˜"9;!Xz/2:AgiG-Җ,e)z*,QUurӇN"kd^1{ ɔDz,2C&!'gLg=>sutѕv[JX2ll&R Ԕ)gP&0S#nޗr R>1s$lq62k}O '! ?ڷE;DPp.) #zeE>M?t`Yu?_PPS(~&Mb㌱* w%3M&{h4B{`P0hpET]C xaȜ$ _flH=ȊlU2Ff` B}~ -댤ȔgU'NfŁŚ4n~ ҅-1ưM&/lBL\ǟii`I7ע`&?M(nשdqQ X: &D89zĶBNZz'O\`!Ѥ.՟?M`rReWm;RQ'A 0"D@=34Pż/63?Q_= Ű˟ (#^=)|«El^ïҬU}lÃJ|u04jl3Imhd$V08ĸ1溥ml-ӭK?9;#@7x<9cH@QuU<6RxHB`CĻ9b y0c2 mm #\ 3 EA@7|/T`E@pA\,]OONPPmCuD?vϛUpژJYKOdiR\>=,C>Р@\:MekPq4Oc=&p%]E(+*x,:LpP$/KRVfr14) )ѽӼ:/u | )֥>[ 2e+)J7E`\ Ȥ2X֤nyK=$Z WO_&S-vB.b3feű[ښ*?Y0ݤ] y.V\sƩ-+yM4p j3RJǛ%m*;ĨNR&,GFIhT](*ͬ"L6\yvC"R'<[7Tl9ue G:t(4 oeGLO H4MZ\{oILK봢RCyL1gyoRFo}RB|uȽ]$$HTL>%~2Eo~BRmUj1 x ]>PbؔX k(0hCf!MJy>ُSځ+1e} '2ܕ7攓OzD*L2V; E[Pjf\fXS45Fu/R?vLvr/;51 g_8+[MM]s6[9%Oﮅ7]j]s%F{N໯vD ++6̕@5I)/_ضwq-yL^ RtMv`[ӳJR|浒xB$VS5c.|bm Nxe ;Jb _V+Δd>z\P E 1M3[t28P`!PshY*Tt<XIbKrJ!QN2x}RD=l*6m3Yc!1h ?t 1 S3 ,hI6) w-BN]#=qBa"77?WĦjA/=tU;g"*VH$x%m}!Қrtc̜d !'BQu\煁,-4,A0+$6k7BT=$f/eoz9)UJTӐtsAzu++s{\&9|Om,Jq~.Vf{$>84?n4G#$溚 `  EؔA2 ɉ9%k6Vオpp3+ɳNՀYZnJEd0O&*nV990) wƆz e$K|IiF64' -Y6.?X02}@ Ko׸.}$Ⱥ;X$|ӯʼnrRԆ`nz &r]J?I^f=t/Ҿg3WO`7Pe剱1]XMrV:2j,  "Z ^DPS)q|zu=QU[% 1!𺄑evֻB)E|>ni9H`8 G-.X3Y]6PI ʦZ깑 x:}!8oyFhٞ6ոKzx}?q^צmjα byw;3x?t?S~m(mP6^ y秏R{}bN,gР3{vq %>qulo0FOhϷ*@/`{=FOEkjc4?F+jX9׏_vi1ߨl~M5aKBdČ۱ 5U9|YAk!X\@z(̪pU8Z+jk?pjF Ar>kA* ="#;߭ܤUAo;@bpkM;/(${.jN^/sYra²c"o&X X!72Y. x@=6a9sCV@H=pfx;"кۨ,TO>6|6ɥmrFjC+gSPeOa{\nwWZ(/>[>=_} x\{d!l=eҨ݂=_wښ]JJ=?cWK&jS\iRsgzDبq}2fBKh]j@@z:ɢh*-qf ɷE+Bǫe)r"q gzmejB "t:V ݮ4̳i2mYj??ن.Ygt?̏cPcW8?n,jWrI7w ܴznhҲtJٔG 5I|_]lƭ1oj(Dy( ?SKl OAn ޹2="PLrǍ%-Ce3g9u62ϛ"5fnU>]߀ >K;WcT<9xH.8NϾoy>0n.Pzӹ_5..}7:WMWNKs8V265Y2FSU]=_7sz0s?fNa`۹y̪a;+^I9=iHD/0_o~'$]; F~-d{~g}eYO`f}{(Dn 1s-܍ x$jlԏkp=9 JA(<jHHit< u:5 XTĀ9g}ֶeKb_A@RuƲ4uAVpt7,*~V֙{lz6o|qTjl ߽'Ul?5f嶾:ϡ:ɪkty:'QաNxc\Q7<n> #`lslK$𻓁kubHƦ,GdC\ +`" uN@qcOay%GwU^E/7-˒Ե@nBl$r׋ A"N!>H8 ɒ4Qc}r9wJFfԵiR´ײ7H ZMRFOX`_yܑs h~dV #Nyyf Sv?ɸZ,iJ {&_E_a6@?_t(,,$dxІh~pJ,& YwRRi Wc>X!ʯ9mRҺc^D `} +^T+x- O`l.ڮ!G6vS-yaadx#e`-Tho  )Y^E۔LW7ૢG^YS2"X.q* {JM5f$4QG4э}m K lKoBBN[mrrX Tk6R ,JVzs 2P]5V6ob>ޕf~F (Jhx߮sT]1%7;i'4Ǎw^J)8}w}b0J" $Un6 .U/ X {$J/^}N+nWa+3Gw5b[f_K?ʷ^hh>ЛfB1}o}ԇ}zocEHT"O8& Y5ҁ 4n#ݚ*0+?ikycFH8R_Qpu+7鴍J/(K^~9amL"Y^EK8}POrTQݬ1T28S ?e>)O(sn?U!pD),񓰠Tqu&!fT<1d a&gewvf4Sg0hd?tR&cPL,[GfN]7=n>zh8Aez^+LF7E}oE6&8 ^&Q;FK7RMDqQ1b!SQ07L(*ԉNp':/=0äi+eV/I*'rF k`Z:-$wP[iܜ]@$לJOEnҷ}l&дpۋ2Vdշh\KtS9e]MBY}cowm 9g)<[ßz2W=5 ȉay2&NDSًngm0BY R6fҍJ2>JAA3տ};=2@ZבUyrwx{SNj믙H(9 ׭8nJ\V+ܶ[V&E\$` : ѫ3|,60JQ9#Պ> }S,x8%?b@4u1`:5+Vw^{ csbLSTe E1jdv ?}̯- p?Z\D=A)(|6R4Z77;V !. !ide5o{Gs(77iV?M I ?ĈȚ]dB(̵b _("/}t649]<vf@ID  g4Ռ@;Z)B2 ,<'V.`.0Uh" ݉1;|Tlb\UM]Q}tšC)NI|Ϟ䭈ō2j~fT0S#c1Zk@gZ% [ j(AJyKP%wof;0?d%/3HCUJ ! M{&*qo8UXϟK2X<8zɬDCˊ(Vte!"\vS2mnp/Gb8ܗ:uG xp5j^%$crqା%=b\}^Ȏ|W6!Qs뿌h,?.0`3@n%<1YmRоmiJBˆQ~Rx~e4@n\`#tաAm7$UxL{sLq6|y]eFZ 2cw[gPlWגm+5li=z6 ^`JP#'▛Pd\WI9:8zu"#ŜA\ sFTks[zRtD}EIV>ȡ&tу Z[{֬N # 2w3*SZs!G%~"T[1ϡfS+Ϊy*ڤѿbJe'n;ͺHFAfת\;n vFQEp6n$N [iwAeB450:;@ˋz7b%\ k1[7&L?pu`r g\B92#ڬKS2`ٚѪR5tZ=R4(o(L^sDh{hzn+ag[0mO;Uy\7}iݶ⒣q46g?8@tTEȵ۫=K?N1y]Mcs5?=[㠘]!˭hGU7g~? VIϩrt>|qO=f|ATHiv3f1Q3}2vvV,]sњGQ*%9h[yCHK:FDlj'F֒8!P+[sh+Ep wHM~&!~p1opX&D£3l>tǑT~V=1Kg%Ɲq/n˭QygVkʅdrmQdLVW]2I8`3+4c 0ef;4NLז*}jȅED;׭ToA=CYx]S**ӚC<&)ggS( {j[ELdDmkG9$(PZsF`̀Zɂ'J?x?k^;>B9ʫ v_px\?}\q3Ʉ[ g,3~P~BA}]#Q3I>C/{U_k՘\D+j*]: 0fK8oېz $e*NeclugED/ Ei.\&Skm^fXmq~y&Y:7Iݸ1r|sѝGИ7z!Rա?lj;|){M R_cfy t5REM+gоyf4lKdZ3u۫Xk@v~NO+ĵV +ϳyM=Ff+yj8v?'_z+q7ÞAiNO6/56E95:;EWy|3k-G,)*3Eٙ?VM⤦v3~GK,Ј[t.cs.59+=_וK&#y cq^](\jVYql1Ӈ>Gv}A(g__gAwdtv>IOTFHٹd?,~ _xn%nQb}k]w-V ypU5KM.D_p.JM=o`gTÇIYפ{"^z9g ,W:ĸ:jTi{EcyVZUwit톏y@q%?{(.qK]^._7ZJlB5MÝUeiM9'ߤmzc@ُJ , UtarfxT peOE)ˮ`8hMs~ 4(t7?\JzȺ[}IDpO- /7D鬔.OnWԴrܚjĕ(&Xgnyo5Ӆ#Gµ%2*5O&Ri iL&(*JOUo]6;yqMYnG$UT.Ș:g{sSmSP8;pȄκ~oKzlx%;yPyG0CLvs-,'ɇ3ITJˢ1Y{C -!FҸȻ{곹@)DxkY $XvSl\{|L-.P<73Zt+\qtPDõo]PM׏jcGyOpӯ3+P4ٙ">iߣ*z;wʸZ|,Wm#z\֦ђvS>[^q΂70QJVy`[&PK8>;+inKE:64BQϞ?x#[f'h{UGUoY{( !ӑu69[E l׷\†I;:na~9S2w=<\jP[?f 'XntRY^+3s#yksYRhq}a_H;WժkƹŞs~)7 g$K#RZ EU69`QTqKW1ɂ]f|G`^{z0X`99QZ>M[ za2Ԕ nloRŤ` [WD2lzetNYG_gH-iGUxszye:g#JR].-y$ be\j6Bt6DOW|/@cv5U'VaX,h^U 0Kd`oc eHKƥs~RD4X \߱3 aW!}zt]" } ʢk,bznfRYggh9 G$~w*[+7=F]t*fs>۵?o>V+7s g*Оώ:({^=4> =\Lv!W%PXq*fꔶ1DsU~E23K:r-W>5FtHtCcC fnr!/(߬hZK,ʔ6ڼv3rzBgm ,sNILQ{2F[QD1%;x̾#'Ixu?W&G֌?5(M8 X)[Z,#gTA;%nvv_>3 uPp>v+\mYUcY[G#:KLw=9YyYEȾCY<"m08%[ը|bD 5\iݪG˶T07aI]xt;3{<%~@*po^9-G'*wb[7QID55Ui]`4sNLDye ֑8} ;̔ R+GӍ:fg˙JS1-x{۵՚)?ATol~g[ ՛=ާ\>9E8 ̨)GrO!y]}Q wyh?* ڻ zz7`^jPy p0uwlVe#_F'L8(P&U}\MJh9z?-`}izӯp&U g4v߂%5o\4f\VR?R5S=ۮ16(h16[h*צ/eͷuf/7۲-EQ+RZ UIv \[jHD@Tڀ!婀Y Ʒ"UNNd8 Ive^o>hhZ M'g.D鑻ɥ*=f҉z/+,Mn!2ޅ@rrիVUK7-5DB/jRRwAs'+:$w?>;tZ7i넲nf4 PٝCPJjm9X0R;Ybmu+<*} l񀚀((@ӣSj2HCvY۫+oIe$7}}5 #Mm( _ 19UA{9ʐU;OYyaSN3vG&P;~`?ȬEO.޲7|+[ZN gmb3Ư<EeWlJmb(6~}iZCJL\ kE3/Ry:F6T_S.aj )l- O ;!A @ \x(!7[3 HnZ{)G(y 䗬N~7o0z^o_5'E@´Xz yގ` ɤx4`߂`?q^&gƺ\v.g]I:ܚ;Tnz+MۛhKD,f-X[u79B@Eޠ sq,S7CRrwTP~v-_7/t>눧hpC&[i/pgxj;}DŽ͓ 08}y'S1ucy=I!w[fbfU펋w-)V>P}oފ]6[6yJRF¤ln``w6ͩgCD/ X_4>?I1m@@42>ZCQ"  Gk?><2: W75eaDZD㥚4%G[bĵramb{D]k$@38'cRehN{bǸ[r\m7i-p:Ƕ_B}3b P3ik+i/qOǝ?qڥDb^x_H1=!l9ǻ//TD뫆*ROQݘ LP+uf"xb?hp܏X9n5?y};i9NgEQ x9oϼY%i?Kjr;G'YΕ=)+{zVwԳKc:^.-9EK8qG)+h-\qV`,'s]iAhixji5,FD;avO.<=lBZ3VSRυFT'bψaLow-"Wx[NZ}"YP@4<+g>:kDL*r$6M0t_dHLEO…U;ǵ{QF5ncK$47jOce'  t**>]2V)" CahN@2m KU R[! \>o1E-FsJ7)Ak -Z[ù۰(s @fZLPUTqR^~M]G䬾]՜9!S™])LH.Gݜ:}US|1䥤W.[CYrek 'YibGc/No+C$aYMz<~%$ hksC7΋^0ZGQ`G='m | ^fWY@yvPSbWGV&H\xm<˫}ԭ=mF}U4:2OѦoA:e;}/o-%;1i7k35gIQQ 99Rb+m5Ħd-/rWݮVhu xP2J2jo{UɄr͉9Ekj^p.'bb U@ֹKȍ~Cڷv-d3ټD]V$$wJP(ɮP+'oޫiFa`Sj2F/OvM[I&mzW<ޭ_JWlh3|4=/9 J7 lT栐Q͔j3KUpgc4؊ ΣTW̗RĿ{oݝ,H,GNfSO!uي.OCf6Vbۋ[R<޲XJ; 4p8C!b.:Té T5IKCR`KiPT]JiO *6{Òu:N|vgWQ[_u y軉ߜ_/4LiN<2Xgsf8O(t=cd,Bl:buY5qL.;a>|DN {S_rǺu1iB)%ѯo֥3zh| ۴hOTfϙaf% VR}0bīh癟] v ZOsteqe䅮v z=yoZv 'S~fK11qH}L t3*CpˏK}fkVMyAt<(o\!f-QVLQ[~ MUGUŕVAJL2Fm´`kn8y13kIdG0,,#Qm]z@٢FџxJVTI%jxߠeA6xo 5ƵV.{b3M\$oiG%tve|_ =`&e DSg}>{OV{tD+qݙZ-N,2\ωZRN=q f۽P}򏟵+J1^ [m)D[gAĬ_ff!do+붫$>_.E\ˢ_ ]5n7=068~f?$kg,s5mke뒮$_k3 [߽i(J/"EeYjUT~iR3LuV|170m=1Oŏ_k0r\Fִb^Mv3s|U#9uYbgCH8e9GWu?g"u?nA2 “u3GGJ, ]ki@*lGeI?V,12sJ{vg?Ѓ1XپAԞxޭsIy׶`x PGaՓRGNzJk[×V»8$WVmߚτO%,)7V?mXX INmU@p .V٦qRUZSNj<.؁1;#KO˛v!꾯iPp|)yڪW Bd#1??+-?3hdI뎞:]9a#G hi=J3.iN4?VG'"fgp`Q7n'nv^Ǡ\(5Tbh6U,CvSjdQѠ8{xF,^ɛ ŻQ]m tMxq' 9mMfdT&2z,F5| 3>LcYs\T7G@?񶝼c5̜㻴ϜDf*u< ҅fs.eGp&w3TBFʲV/-|v`yOg_VAX⮫-C"⊪^'<TjUe/7;``p^*iLdMU^ƂUӼX]1QdЃTuv"2|wj|8ibO px Aמc"m qGJFЀ]7 l7^ rY ftfmwi$?TC A?~~DZxᣆuÛ Si涁B^FD\g&KH0m3ޡ'vR˃3ql9DUTA7_5qoˑH{й"vGU׮ǧ5$[(2kd@`/ aSus6IZ:fUGܥ MIOۙr̋)֚Dtߑv\P7ؿ݌_WlVT:j4^@qdUڷjΉlFEdSA|6;g>ߔtӞc6-A!!+ԚW=Vj!i<)x4lkcuSeߛ>wD@j.u:쯛+_!:10Í>R [5LcLir5\"^i9cUAil}]5x;}X lP(ıD uQP UTBGJƇ!'p(h'p`U6A !u` S؉KРEV0zYwˏ6D'+'j X4B˵ؑpgH*'ibn[St:h4ueCk6og*z[ x"Sm1=g}Y w63P93E[z+P`,v|&ߧspذnM1sp@5Dq15创|^/ӲIoN ř堕16i' =XmPwM2wo\#Ej5fWw#&ٝކ-yC{']xѵԷ;aܚnl%>vw:c;ڋ&Oi3:[#R4G g]*Ys8j:iR-M09/annqXQ(|_l cpwLI>ߢN0t8DbhYRPbtb `F6"-Kv~{jo~뺞11r֢Pč1([ C-.9`M҆&YUqpMOY׹\p1Z*LJ|h]:qII/8 NƼd "~EH~V`^X ųhF]+eaGu|c0XO5mN SV6k,K3Oݵ6/Mf>TkC\4IToٷC=wm_U^eO͎q8)(.Px;ي#i t/8}SGKQ0gC#ƒ߼:z)Rmp7ōoB]y?r(GRZ[.ߛ[$C:*F]5SɸFg?֍MZ~pz̄&`BP_Oי94a>gGDۓ]്k)vVe+ҶbR%Vwmz%:2}Op?Z Gzaĝ3BM d'| ALb)""%;"`j6ƛ$hx뚐A us#7 Iw*Cx>;W@yrX1dڕ,/`ȿMJݨU9-3Q6˞މ[v;= |;~uA|q~Z^mؾ[Bw9k[7e}tE Ssǝ^}NHĒs.J#DKUKє"UD \ML+c=rTr!Zz*;a#uSQhm _=yT;eTwf 篜t ݥA'fv>rDn<ȃbBMf P 1٨~P6~B|4n+44˻CriFWa ;ΦoP|zVuZYP<ӡ31UW )شXf}^SjMQ*DM kR&I'#H(@\늢m f;>g+D+OD@*罌;YTgyU u3=,뷧3&`פ$ib0^%; 9OsTQED]/zO/ TJ[-K_ZsY ef!A6o,R}u[44Jh5oNJSsUx8ILܮcogl 4|V$ց,Fjns.N7ےZ2/MjScc. B~)zt9S*kl{Ud=y1ʯ|7ǑBM3:򒲿YC8  e <[}; \Wg(r$h~cJc)ɨ׺Ix>r ޴tdHM.)#W|doYo=DfciBQ|^˛hp6,Nm 7ע6:}. ~W=dT|4Ix<2`ʏs:.uB/d <>paq\O֏v@΃N\(JVղ=3ʏxaꏲgJƷ `/#(3JK~^V&R !m=?ʙF].` d{&hw^ uu cL8>*351@\7paW S7F>ss=3npAdt\zs9O B\dA~A4Cc`ՖqgOR|Kd/'4,@eGICxaTy˓՚mm 3:H&Cݵ)(Q 7:JlU~ꕺy854:B*Hun%C)3"̔#+Hc1H_7kX0uhV@>Jj%I KQ~\'> 9m_:|%60`)<۽\Mq"]~?>SW5!<o r}RETfPȩ SjcѾ<(6Y|E foY{T budiܗ:}j9k.9\Td "M*=~cA`P ,>#OPzРί=)ju@Yc 'ǔYȧ$\Vx&S+XfX>;@j5Ocu~Pe̒x#/:ʌ??6]~iSE ͒?cX2&"GpBc-3f*GžW~I4PdĦwq-['QR/ OvԸZ[EbK9,T$7};4棕?;]V1"ў ٌw; DiY, }5#co RPgR3gUРTMSj,\T41ZrZޘJ/ {(3Jybx]hڼ;DsYv|5&~ .0M4e,?jƬQi%1/}܋UˮHn%%LCS=%Oxf7ʥU]b̫tn?7djF]Wjt@"ڸ*onZeRZ5Q<70Ys1))V1Q*q¬`u<}eNsCcJ 'z%$-ޑ'3}L$R4~a[*}"."e ?^Mr r 1eWxxCKbIUVqPҴ`hccco, zz=WH Rx))?Z6̰`ԥ  :g@IKJ '_uUL5> fT-SU3ZSz  ;"BRj[:U^]/1PWƘ|Ʀ' q|嚿k֩7O eFmj 1(^P&@jT7P1H]㳣.'Ͻ']rnAV-C0ML*mNbj3ېUgEƜ#urӯ<4gKj*ka+>> ]Xd<+Q̖] $+~r9.6449*j:1xYPB[pSO|E_05i0%K]賞w!}gQnK ty]g4ſi h=钆0iOl߉IbH1d=m*<&&|xϚ/#F_# znsv'Vzŷ b0[i_cj1kך1+ c>>i6Jz uȫ4{ %o܋4ToJSQ gA!ΫnW1#ʮ}yAKo|A0Ңtkbpif@N1eY2-[$-&#EդeݶN>{;Fi]N >7r0~5p^|X㜞~qnDh?5ndCz[TD&V@?x_HF" -,4đy^p׳Aj?I]5%CON7] \c{$/:- r7뉅XO?11W;itUwAnnjدJ_\چuj EM} kNh5NW3xVT3Cw`a+ 33]FA)tUu8E6 :j3+ ^q=̏ -kY3,Nԣ3-WН|q|%Ipy S~Qz3ߘױԃ2C%d 0h!c>;3=GjR+CPJk;>?#HP:93b+B3Cn^c T^+TwwXC[,H1mC$g٠p^fwX#STU"& l)%MfƂ.Ƚz撚ڟ )/i6 A>O5k|Y+ ^Z*] Tsô溺vΰyYGJp˪pr!439Ri'5{c)ό{de6ۯgݪYAt6Up[ZŨg}$nHKiHRVt<^޲E=zfbcUKKM7GG䥪;uzw̺][ z;{ KžA)!S1J[bȫ!JWSU[v_ S{kT˿NW7t%]KM>]"W $\By>nL!c >( 0/eYydٳb+I]nNeᤏJl&5]N Ձ7&(@a;Ҙ#IKi~wM[%Ԇ`޻ʬwNtXcv~+c% ^VE @c%SjuY&NH%gVNv0a۩MJ$Yw`FPbU@UGf}6QS4Y9*k;x: [{t#6mۗA,}ˋɥ#~$bAG{6"W M-LAftUzD1Kس|.hj[Y=ÁRs-x,VU&2Dh֢Eimؤ˶`1 uG+ 9Ϲ'/[hCSӚJypN[,ƽ^xXF®o쮘5b[9[ѮwK_xW'S_D9L>6F{W񫞹6=ڪuh(+ߛ,=CkC zׂյTA*ZrWeeaF6Uz~'Ӈy7(j:KP,JF5utCmv1.@ag"7B6-Ac@î0|XLGxjTy>/qbxlР[>tP+)F`̎A\YR*56 ʱ{nOpR+;SE)J y%9UusqV6OV)\]xE.SykAxcju;5][/ܽ!ߑ@%oǒctH`f;%n'ukEv9ZČxqOZ;Zz @5z|9o.-jƎ?,I:\iϦ\ >|c!GlT ݅?:II;[ igѼtWϫ ,\ Y 2)sIdx(f0"99iUYJgK |:]b}nVYrO\iTV)Wd lj\erhO2Y=^oNE߰+?yz ŭޯjL'kUE,~Xp7慉9Vڦ"_ٗ?.QOe=X.Oձ%0Y}ad`6,t-P.OĻ{3XP2*<u.kc9"cR{.OZk] %3uͷK3?ou``?ֳ'Q&OW>ʀ75w}e3⩵xHt}~Fh>~ww 4 tQ;~F~h~S( ģZ]a{ݽ{$׷C D䙬btOF<(YuCr,q Kzvn(4 'q<~]n_) {Iy 9-*|P8b#A8CD"B!Dc&pe7p<&LVNfs^tYqui}7EvA;.e}aK[c7(6ų{PZlA2'OuI(5xux2agtlʞ9HlNᎰJb/uvgv>|L莐U5Ǥ6ͽj#1A"tu Mkzv;vىNo͉eӥ)Z0jLΗśdȓrƛ|rS(mJ#e+|Mc[:#>a~D?tObF})J@s [P1DȞ'+ TvRBmUd.1l.!j;YG58P:.N%_XP֙GSءm2IiOW95ZOox=/݇vRpfpYh`I\Fe'4 KɎk4uE5l\G(OWb{5&eoT}y\ 1߯oM/u.ZխXO[ v#Q"+rݔ(2|MMEV_ԋ.r E1j Zdpsߢz8'|\7t^<){#M|JFp0;OBD4X f \^/Eol:H{BlƧ=ናʑ) Kj|zPNb*G=|8,_Xhq(+aA& "\0VP,Myfvt[j]>念]}.Dy=ȡ/MV&? g5~@Ŀ02BM}/ !]vx(M~s/~\=56Wy@(*@УKcsev1F~ T _cPs=xJ LS_QP* #yznA0_Lc{dm4LY .TNgg^ķe9$)לt96. &^lrkkU>g6|] 䢞"2`gCzX7/%5YundW~!Guص#ɒ}()H/9y2iLJ#F= Օmt۶>*DӗsRy<;[ඳNdNUrL3ezIc֮8դ7, {@! (Eȩ)m@h=`Pd.{KUg[Lx<3E]e 5MfFX %CyT+d9I*-Ir\1dRƛ7D;BJʄS~Eq&ޘܛV\2Pb1\bY(@m 2"!kߺ0 LGU ȸ+90W0)Dge gjaqX+A4_.ʬzg!aʳox՚aswwYRZOT^$4˦~&j #X~G߼rؾۯBVV CK-LEXGgQv ԭO-j&W_`&x8T>Lai .&y 3 3 AVdx{R-=kɸU,25W6BPuјjb҅M!`T=Y`\l.ժꤥ֋055<;N~𛻚gЯ6 r< tELXֆ94Tw7acZXoO~='cq^c b47RIΝH󸿉 D=¦Y?[eيͅ%i?WZ@SnW\ܴ!YV [S]X1T9L0l$*,-QJhZ?uXNlfnȼުf*ǚg_*cyUݐ`MKrhߏIk#լ9MF$ zHljq@eSS 9ufn%葐rV.3j//^Cs,苅n 7%ؽI8L}j"M(FI6۰HZoOk(0Zyu;Uw֙\j?,cTOZ* sS0cQUĻYY<@^~On5[7w X\PD4u^7m "_[E07}8(Τr:{vtԌ~eNwC7L*D(Q C H иcG?!#T r1 nQ^̡2(_V>пI`iV:σ!@ Lvg d黥ܑ{Z`\U*v~&3bU6]2YVw2 Wzjv1E6VT7F: #b2]nkc罕IhH'IZ+)NiXǬ^\ɹ?: nD|R&Wڮ9A H) MioKU3OjΧǨsN*mPvKoυARuKnEt  #r|^9D0U\ţr0ј3"r)Fo& N]g$߶ Ϯ"gL@S*Il\ꔺИkM<[e*r2i V*^{k9#F`|&Inܺ -Lz TkXЅcR{[Ht ߦX\i1hH-cS5Dϓ~ML3N5կU"Tݔip󠢌xd\oFq䡭o0qNU8%f<`%۶kdLD(v3RF# Ij`PyJuaZ;~/ c~ PM~`_ܗg;vtKvjb;iFk3п1͞'.;[uWc֪ܾ6ya]lMw#Nq L#046 ۘ|x`7Zd{>{3%TM ݃!|hcf ɪO2YmĽ'IRE۶D*Yzre@L :zbKT?C#DC|k\:~2Rᆵk ( 6n5Xxެ>ᩯl%<]N>oul^g֥Oo_ ] uqg>MLaNJ6s?svD^}UY'TeY·@jZQ!k~[M ""?h*KBt ۓY6I6cEdc'8\"PNtQbK~l(a(!3(|2=wZuBB6%zMA+%BUSD|B&HM1Ԑ$R!;';#C1[aW/u6f}}BexGPu#`N+hr_/rlT%qsng"⯫p^1ϲլqk)P?wJ-Y 7&¯mVWyzՙUKBzPgpl Ǯ -K=NvNʏg17bnBS}өv6x&P6`yv >-Y+=I8v3ݓQsdji5M0{&Q%L3ĭaF>#Й ?^֞JP2a=߮=^I%tL~ f3ZB5˙NEOO`N*};tw.+-Wc_m ܺh}ct|#r7&i' 6T) )bnM%3&zVԊ 4{sjzf8_Ys&'5SS˝{|8x<aɝY姕.d詪O*GdS98/u~u:+{C9Uc#!Pue7bu?(z3tNHOKk0>9jP79,-Rߛ=ܒnMQh`vz=)fvikj`PU'Уf&~#$&LW$r욍Xpk+l%T\3s"s<".se;fH/_}۵|MkX\\uS;R7V+.JKyN f7jie0/{~'P:xlV*6A٣Znj]+QcNKX3]U'PLJvpPU=Ρiv@ړC.@ň:U5S*O/Lƀ;%P8Ԙm%MPZͿRUPf.h{`ޜ w2em@I4("3COLp7?ui=OY 1%Nltxcvq^wΣ/%'Ca#5e/2#1H*, @gDTr&lfՂskV17Wŗ}SFCR]#.ӓB*r!Tꮃi1ͺ]I+3g`-wxߒa lJr)QL5g*j>%jD 31xp =HE`·P% \y8KP;e\o5K& 0⡘;kP,C闘$ҤKb],aEKzRuoG!7H?Ք?jEf+pBg+LZ^\Z}z^޷9h+$ԤDd]пVh{OfB 61pA$)eUs)i A Yp[u0h]P d'$]eۼ;e;OX~!2V ӹkfxI}&vQʞ*:sHJt&0ǻrˑS Ԫ΅Za9>3b_K9|]ûp`SͷMDQ,o'l:F7*Ga5 <|*b/oc){.5fym;og:Fy]B?VP?&M4"*\[6bL,յBwҪn/GNp>FV*?71+Oonp5<< C1ȶ.s@yIj03PwiHio t[p-s^e*}xcقuKRnVV!:'Yy6Ѹ9\5۟̌aSY`PB*^7> ~p2O2՚z|ӏ"#~T#!k瀬5ш3kP{F/GoZ} )'u$Fmy€ڮjK9 c_MU|)1x%v, hYRwѽ>5\&u%]_hO^˹V19 0r| }Njq Qj؝F6~#_|)/X_GP2vGԨgeRP- Pt|#BfzU(QGmnlo |GηX5U|Y+‚9 j:JT/KFcPz,Q8@ѭ,>҅x&Bsef9n]~8h4M"٠|\K %ͭcESaT6fe71}':7M0I3%Xz?F{ |#I" mlsYl=K_uT-U::4^ *13W?uU+r ֨84 8>]4i3'$h֊_o^åv{N?:Uj)Ekk_jmEѢ.xZNΘ%Z<:)P^XF ̭VrڧN ŭ6$= @U{*%mF2V #YP}4e[&*\G x+_-,K i<>ՖhE*v+=?Ce %:_:ܟT+>V&>ujpƨgÛDϚNUClrZN!dN{c{0$Oj,?n*Γ*2.|a(錵&ׄLq~ICit]?D1'75Mڎԉ!Ao/Gc9\ a` Ʃ1|ȽQi`ElTxm2Z=yԱIx;4-PM_b*}e,MxW^]vShI1BsF%& Ho)l&o'Fa1JyZɈׇ,Fh'6/*8`]uG )· C ;"_Vi*Yv\rZʅU}e_Nu2˲iCܥ@ϻ9LeˆF&%`y|瞞 BF2̵eH*X'QP-tk]\Wstac3w]#yxWS,zhy)lJg%[vŤ3źL*KH}7At?{VCA4Nտ=r2{IJ`ALgͬb¥Yvp 6wMZrR"M]Byc1B=!̇~_k2(4&8 7Qqc\B53l Ar h:<ASEC3ۮiO+Eħkkpcw?MnPKdA;m:.w-E@\K ҺyRǷ*Ŵ8f6?;5š%rs=jy-g">|6 -(VlEl=WoӇ._b^8}r+lxz-[E%ac窥D]Е]URYߣGA>2TW!f?rځ+bxGUZթXgOC"B8K*Tg_`O :H4Jlav 0lRA,f(%(FiJZS09° I|fM LVmJU G QleAn&q,ҕڡT~/3ez$yU1fS΂+xba- bքxJtb"л2Jv%\LMp,lB?L 0g>pջm8u! Y>jưbySYkKL C1v F ԹƁeK91 gnufO]Ĉ! S ];c|C@ e@AyDf\ޝS8u2d\{dIAmqg3t|fjp9sdՙhLJԓV}<-WSvz5X1g/ ty5<њQGV+`W%o=E -/%ctPWvr֘_ g GlTb%;;癚δV=3K1_jLM ֫LpP)Ϯ:2}4ݦL,jS`ED/gmBE+x_(~f !29X RjhQ)rsjs ѨooRuy*f]No =J@@\쮲ceV/oRi½ws][5KX_ϥJ$߂6؝٧\^׆|v%P" &jOS3ʚ/Njlw  ~$~k˂!!ZԕMYY.ylටiRL^"&GeL˩ `\%Jy?PS_FdbG.chdaד2ʩ"Jv4]um\?-u 9J HcBmҍ޲WWjԱ9IFT҂Ib0(:*_'d!Lm{ާcf. VB5$ˀޣxrYrL, bhr6(wMjjSŀq(0'- u zhA UWuX(SOUcC[,MS,[ާ]T hTe?2uy!/Z/U!(r$ (1 cz3u0ޡO~$ ^9va}]-UWsEaK)fC7?=\TYp?U(GAH!Xt(? xpzN,C,(麕["bWYmp9:f\`8ʬ{}kڹW-rR~Wa4 K4^nuMSVlc0rl1!1JaEJSlvb7ml=m I9ݺA+̱>1ѦN"m]MOuYt 5L g\n Ysu 7KqO9: p)H[;.d5o|<;O-z5ϊo #i7oLN ΔpJ ~R+կB@q55ֺ;JP2ޗx>D^T%bL@VmVRĕEǀ:QF6&:ŏ xn ,*yeA&<ש(^+5i|>R)y cAlt!/UR"lRt2挜45&545]i̷:6c?5܏,-KìAf-v &qH{;rۢHrTUCV[//ud` 7R :gJud_R =HS܋+ QK/1Oﲳoy/gbu0v y{/xw'Jɍufv5xT+o-b7?{UekMF+oXPMŧlF쾌5mLlzK_誃1kY-9SI lKQ%FA@څY8FR䪺r/6B[7'StAa㌗ӽsbEvmЧ5u92҃lθoA豩MpAQ~q=쾺3^'.u5ois7{n>k6o>&-b@[3sC_e=au;\*eMɷH߇CĵZ+n&:b@W׬?.8^X(_LU\>й9~~fx 9e~Xh'4Tď[sn.l T<>p ̨pAre(3^F+pn $I|R?1o_0*ixR EN mh݉UYsޯq>,eN>;)rGnÙ%˃ VH d07u6r ,!HiMVc$KW_ Xu1'.ʀ*&7zOS~HJD(LvQ dž늎M:Bt,n# ~ɽ0;{(#_|o'1ؗdmҟf_f팂c j4Xݼm,!93?$qtIewţka t8 {WD) )9S Z{wVRxtsNIjpݛ\*eikl?#UaA1=`Eߔɑ܎E__{_zf{zNl;MѫjTg 1ƞW;+(~s'I:%دOulǒpL[AubHTa+zB%Z؏ːg\+&~-Tay%=_.q״Tl?ժ8Yi% !{z;Ι+ED&f:B&DF ` }ca^qz!Cm`P<2|{ F-"̻Z'.8އ売 u6|U?Lb0[)RyX߭ ~jOHK^$OڭZXCPgϫM[X؏mO$^.(gr;<|i=]N[SCqJ|V)2B}+WL7 P8x8Qޑ >'GU6_.oj^ EC.I'lZG( G{%񗉣AEplJ{Q`/`)F;DڜO|}Rܴ =?S-]jd`YzZ>]Ems:~EV۩cCW==`񛴻oU?6X#/1Wo?ԷYW| Sg[T\BkĢ9ڋr*'qMj&1c8ʻIqMNYo+ҋkSIt+)>Ql+|M̂$q-(VIr0O)U/6LC>bbKYTlh F68urS 0^PfRv[&^m,& 퓤̯v0>+WO̷߭>V>_mS&`'˯]b#6Ø2ܲןp/Fbd%kmVB,T2"le$ Oz9OX܆pzq-eS?'Uj2X]]⫩u"G7"Vݝt0,glݫSQ&"NcDky/>.TQ3F\6F=NE[: oyh:y 1/p!'O(P<\hRV؂I7p=wة5NK袖:)1/ Gt2Wd8إ¿0=ZP;5 [8Cm1 3ks-VMHYehZl \s[qM ;6:IUDTdsoqok[>bY[ٰw$8F\GQdɅ C`w|ٱ(/@{SԒ󯥫&iB){ZXgLn'\- z}.&Й*C2/T40P3/hl[TSWǽ=8>G=ʮ[ d; $57DH+ -Lbhjr7YYuB?{V&YRʙ7#EghNbl7ϪٱTE.bXU|G.gRN Bi X?#= y_7$/ֿ<\nf_A7j{r߮cV8@"Ra2Cj8Jd-bQ쿼O>/~fdO PA;X-nP,J$2#. ʦ ν E?`{{ 4ȝ(GIvxtcen&,Z+Ϛ xZ3:Ʊ+s?kvEҷ2BR{CJgc+>v;8~hy 6Nwl 8Ȍ~2a"ss@ѵ}? Kk2aȸ2 oXn Nه% =T/0alOf.}x[iOmܘ))]Pk^1g2LX~کχdK["ۺ74|*$xַHΟ~8D qf)͚bACv:[ۢ4L\ %ʒ+,4tvv ۤH͂b[㴒5*I*qg1"Nsf&8lkg_Wv̡t47a3ED^ ʢHy-H 6ʧekƿ _ x)]e.Ovxʆ6n.DՅ ƅSwfdVp~Iw6 Lk)A'۔g\}?bXS򪂳ll [o=(?'g>kXP;ͦOa~p?G[ewu]PI2vC70a0JQX{G$'jwFZƂzn7s"gڠ;qu]r|87D_BS/Xwa Cra\fmFa ZG -4-VȄvyWmY$H( 6gW79>+-Փ4ϯtJu2K6|Ws{@8}oF^*K60{ex?f|@zu;ԖME]Ny>5 Z{kVN!h{fN'cUi~4[N~͏fU$%gHpbu)-_gS6kM٠թR:ww7*qо*ZkK;,f|/cWkB7#5vG̏/5Z<;HgdG3H S3He)hICL`lk^=j7I5EaPc'+$\%?Y3lD~T;q>4{ʀ_B\fj&yipyEiSn:ӼT5V; )AK/v4=iV ftAv۔ZWOyJ~Ec[ W&E$qxEǒq{uw;{|"?1g%됾h_y&e0Zj0 Ƶ9',ClR탤O<"`؅)E g&t}-V:.L}KRO$ڊ1)^kmXmHh] w7ߕ#Lz{;-H@N?p2bI*pvvW2L\xc$Sd= O+aIFMkH~ӭK4 TO#-?|BW˪;w~ =X^h oSU^98ٕa8~RuA=Q';W%5mW%+hu+w'cT4:"I6U4\dǍYx X D%3d_#}WqN:xҞe:pIZ1P=/'2Zvצ[GE4- O<{4ʣ@9a8-pK(z[;8 wݙzis0vG]. z,Q$b ~+8#4 ֫jh|^%u+; ;.kpiY5(t8E}|jӆqXc.nCOQ"I*A,&Aa 0,wf<%+HJƦ&7uDq~i}c҈Q>>Ral8nHHC6CS#{b2B+P˳P@~[^,4\C`'Sq1|U\ͅed}J9Aj=펒4rL#=ǐKz*zV۟iwK 8 MWha1 W$>=ŕ$Dk`'rWW:H(>-Pks\jQ 1H3& kPΑ e|sBH>L%/cXQ+U1Ek?-Yw\t:uе c^2P&VQZbGx: ?r,}s^U <ʻEn*!:T[1k00sZ=+ItPRxӫBI;͞2~zMi2SЅ!d"h *-߸jW!EZFeRjwxnx$.99{"='( |_qHk̟F#jO%GJ_N`]]6580ZVY-U7j{vM y:qh RWGD"iOotyt-"2#u`T|S#9*;·F^:;W `28 Q.yrJd 5>vHl7~I`nT_cS':vSԸgWM/wsgߠcjdn N˺3~?W%?sVYN@JptvG)B /,W`E}U\Я+%rGLf{K]Yo 3]JNAaTc#tB, wG󙅔bc `nY'k:iV s4O_Ź} $ rȒA(m /{ugf c~^)̘1y;$(6L:-aKXvz*wD;QKgo_z[>.kZ>w_YPDd-\ @(Bv?Rt DNPfYWZ5mWZ ɉWՎx6u3'G#!>۽SyZtoy={_`UR]ȯ|8~IAK,9hJtYz&NN6?4>w)|`_Lt'˗g!0  3{zl?w{?Tz}pu<hulʏ `b@]3~:c&~B:  C:(*i-g@ʌ0,T([r̨NȅDmt ĺD*,!ʒ (~.3bF)oniqC+A_\wo0I:癎ڿ|GEOꭟ|מy|G3333={t~z7-΍ֲ&v;"y{y`^KRwTS1 7-5o|=nN2B/R$O9`q6CXv{1e%_)?`f{yg~pf{ {e2\bĄ, |w/"{;a @R9P͍\ˀl *<QR┤DbaBAx ]#c,|'H-""}m0f߷_Uk[B)k}[?,=g"}nv嶤kKUV]}sd\VERaAhNS"v#ӗGߠ璘> 9SsR0/RphCég =/p E)`neT$n:-L7|W(wJleװ;y%(t$Im#N]ߗ 㹃oRu;28tx}[{{) 6a1Ymfj=^mqxyݮ\3tx]-e,?v4h+&L>%]5|OnglHffw{0Iu.0X2?e0",{>.z}{->g-orxTg㾬7ìv;]w_1NľNǎgOgqoⵎsῃӝL نZ +íw:g7snP΍sԸR/^V}l[m׹Mȼqy~zNR1kz<h< g{ 1x.lgeâuk5xn{?쌛dF=}fC1­xV.O^f?C aoݓ1|]Ըo|K^1ю[:M_w{|irـ@ċ쫁B8n "j*]} [:i]{Ng _.;:ksUuLJF,n*f c ``Qa/k>7ͧl~;1mf_26G]'Iܵ[emgɗٞ;Ut~l0,clj>-] fSQۋpro6&}=iusoK&ikL[6Nn kv؏@yKccbTN#dxY ac3fY U#w%sqoZskm7p-2>{\n~˖k/bݫn3_99gYijw5|۶x߁lp=Ϗ-Fé9럯x}]'>K'rMSY^exgn7g3McߑֽdU=_ڂ&200fO+,tn,I s6[Tw÷l6437mɵ43FƥC|db٦1%YSzͩ3ź-[rG;GWGC'=OsmkoƩt[GޑJqntyrvyv7rΏS1uiс.نO/ qjnPz%(g{SSW8InA´Z٭5-IsrMdLc1͐eV ṮYMe#3 !]VSfFd1bLl1ipqnQ\p7SpĬ`_&U- ,J㕈L1`Q7ŋ,X e%]eLJɔ̖V&# RC,"bYLK)b01,33#Xf,a&MEjѥfJ14p#o`-Ud)ČUEw#)k'?w{[gNEس>351%1Xe Yed1Y2̤QFV 3( bDd#*bUʧQf2Xff3 סddnclnY&2³)SqbL33fFeaeXa}*aTfX& ""Y3RMA咮FhofYԗ?2:FdasQѣ ڣSR2j4a5aGQ7aVaTa}FQɓRj4j[MMս;fOMO>諔<:L6ܙMue5 iShSaVơK*?bRh0 0t`9F-Bh------Ħƌ0J)d0 62*ތ#\4 C)VQ0Q!s$2dNFo60U%!jeZ8Ko o Q*j0U&L%%%Z ʪmH-)USS+¬9nlaaaalj51-ToXXZPsR)-̊bC 0 0 0KTPj%qUbS,4 0S#*20 1[F0Ԗa-u80cIi,222880cj6*⥪776Je %0[-e~KՋ,}ilyᥲq~8\p8-6,o "dZyljrjR"*022ęC# ʕdzp1aYC c3 f3*c3( ذVZV^f0Xڴ4 Y2VbYeVFLCLbb&fe4n+%1X#nѫ5LJ،,j 7.*Ve+fحI`|V EJWL(ʤCHW_}KaNiL#w|VFRL222QhcImFF$20OQ)h uֲ]m^N'y5IbUIh#Q+?~&pSRMK\@\mJ=2$±iZ*ĘFM,7'܋I+ߓ+Љ^2YJ;5b] \п$G$nV)Ma(1hrƤ6ꊴh 2K-Ķ64hѣscc<3u )h66:a8Iy>O(Q>%Gt8TY? ɩlpds=E+ctWKyGmSsL% Fh="ݪFNa>.s(^^|BgYֶ~CW:ɇa}a 6?aaaXda0QFj4|OEdjl&%e+ppnpaaLSF0u:Q:6>V:YݞeXq<&<\J*xT7q-767Oq75t>Gr'<'?NITs5ExK TSݏQcdG;g NrʞG74hpz pa\GG)t%C#G:wrlzҧ#cc 666:<2yNs&L? :TNX7&ɩ8 S{9Fj|3&OLɴOsՍ`:O1o+L۞Jv""U\.!FƍbQ6'(sJVTtRZ0єpqOE;L|CLbU7)ɴu;No%*ц0\TvQ[]0729Qdtfno86X+MSHL0ay+$ْ#llm4u7KyW jѣF.++r9IFlla49QX9Ilڍ1.Yw=Ol˜ dJa Jܫ,QtQlT݊[RFK_<藨iqO1j}&H r\F =U7ԗMBaJkSbXaFb + aXaaaaaaaaaaaaFQandbMdMdpaFxddh\LEoNݦT;᚝Iaڽr ea\:%Il(Tj2'3##ߪ7o2*#(F!2s).ɴԫ(624eL*#?2~<'1>IlT>IUDj+V!ebVP s(mFTadbԚeCjЭ++(bI*Y&V"5*K&Tc5aU4|%%4b\v1d'IycQ"dr8K+Ɇb06<EZKzMzNsSn,aRU `]~EFt7xWsH1a&$.eh@ylbdd&~Skm<7<}E%2ddô(L4u6;)iSYQ뤲"hwEN?!Idy K#ȧIaIhyt0sR߭WgFGiM\­U<֧E,62a#&2a1ީ^tRua9I{ciIG0C&否%p#v 6\JN?+xpSSxUEEJsE9KjVt9nN6>v0s\X}޴e0^Hz77O)ZoCIdvJB_7xFJI+Čd>xMI5Ha Bi^ Q|u-±Gi^}ZI2LXjPFTU2 2y/2ˤ-Ņ&%TfFfG0KbZ%捤SU=%FSzVSjdlUha#x VIs*ű,/˶¿"HGޢ龽ڗ?J}_9YwOusv\ 8|ܻW2y?Kt7Kssu?*:/<}ז~뚢䌉8+ϟ>7gy]W:b4ByOG6)|[:BUJvT*/<ޭ̭ğsm,%̙?OJz׿}%W7SW1Il,Jx>u{%RKSFnl ̣ g3K (sF7t/_ErKчC(-͍nb8s⏤Ksxލ6xIJ$6hIJ$[%b%# _ Sy<%->'}fG.#lsT#׉^(?>j2E|)nnX\䋡Ҍ6TѱabX2bJ'(Fƌ85%74 KcFy>Od({/f|'\j|ŀZ.wQy(c#]6}E/2\<]7:Ý+CLZ_Y-ڣa'7,i4"~)S=S_쯫+cܽ4ԯuj{ܚw_S*&TLnLfi9M|u{/߼  U}a#(hG[|wi|St>-aex288KFdaqpUj606FllpeѹuS+.F277Kcst_ttUFhцFx(S_~ uI~Q)T2 9aaah*hF*djK(ʌFQ FFFQ4/- ## 21,20,T221,+# K$IJ K *DRFJaaV%a%aT`5ddF!1X+e II" KvW>FFaaadn tps2?*W| *< %h#eFR_E}5Gt\D"`:2BRmTj0Qy;*`hS;;ĬTĬEaL2Kxh##y5%WiheE]%jcQÑllny# 㼋w u Z[Fw+J狢s9:pyrHK*[@KGo2 vCnr669C< "'2wFѐm2dɓxWXeebL+$d09V)dsKMU֏i.IhF#ÔasssѣFƍG#xGrK0ܫ 775FFFƍhlr676ͨlr0h788869"1-G+K=4bapgMa8 St)EF/{١]cy+~QO,JɽRz̏)W20 VFA1'o¼rj4j4leFj5hccQaF0 dmaa Dz+aQ}qwgӝJޞ^VQXaQFQQ(%>#INjS}Jܸb1eXarRb\IC\xмDs'z@x/ib1abG*^)aLvN± rRw⧶%ė|yiM9a0j4vW'vpl.9Ghj00pt::F74lnt:#ѼtQ,9 :t99 qܫ  Qcsss:Co#csqas67 v%X%Ia21R#VFaVb+ İ20%0 *220 REIaaP4FQeFQevѤ4]UWD/E-x"))AN$E$ݩEeƯ񗂶Z*1}5r_:A5)纡x+b؏_R8}W)=EArBYtҨaS^=nl.*Ҵ(dI/ jU+3RwNqΥm%EOVBX}W1,0 # 0 0ľTmG+lL2TXWJ+1y O\R*bGO(c *;l%^z\*/]U[{ЛPM*WܩSNBow.%}Js( &>zk =~j0ĶKTj*9%*0r4alUB-K)-QjG0 0¬20ɐaU%$&ѣÒ9R`/honW_킿z e{7DbTߨOƯY&DLl#i,llbX Tbؖ(q ժwDQ}FJ쏐$>I;aFsg^"t<|sr$?/lXZWv,XtRo/%)CbV+ʲ#S&&ʩpѢ"DHpQ>oQab9c!mY=Sʭ;eV iWu]5;277SɔU*2a2420# 20# iS*,#Ťi`hNaڹNnod}u~PŸb\F24TL\4SIm2@jr@OD8:7}$K.q%s+d8#Ԏ.ep.D{+#=ڨ;OZY1B^3xeEn\QעΔ/d{hU4";M R7az7+ż*Q9Iwi||>cWAI[:r;l yJrюq+j*rjrdԩ|fIj5;ުIsKVJSi.n'dro2q:*UL2b+6=0K Q5N&Mt쩓cc 9ѕ6FJцM&(2V EfIejU5Edô-SFMs𒋉r9%!ΫIF諲4TdR)]KQ-V݄jP[ʕ9"*xb1Ҋ5*r/7".T8{y/ p|Y*xޓAcs1~m|Lř˂l\9|U_m"== WEU_za/'.G^ybz\q yc67)_rYWffY^l>JN|&ɒ'"j2bS'uV/;V:)V'8·tW]a+XWpCzH%st)ʢ.(/U;[m+減mF҆n2m"7IRs IZiKj&"`\UArU_M!o 22I%xy}4y,0)_-|ᔃ 3yW3# 2e0ʏ_/РJ:+ʈKzYS"yh6UEڬ2R-MwOaX$,'y󩢞⼀SFL2(6(l"b'\G9蝳΅𞹒%bi/I\Hݧ1 x 1a%dT=}UE1s"VSQd%tPĪYֺV J=KdF K#jTE"D[yjSWloF\EXeh܋T+llE~c +P0{6++,Y[,UhÈWoQl:F ^. \ 9iG;GXbYCY [I NMboPYC. J5+w/.+Yx Y<"e>y:T##>(iZY%OU[%诠+i8>TIЫ*t++ds?85E^\ʑԔ$^Zu<j#߲ Ѩ|T \S;CZ,R+## /4([RF2ABUUw/ ^5wێģ YYAE1 EJjWz4lamFJP+TsJ#%"U ։zysKdS5 >4|(14wKąEԧ䦩1όM`P?1E^8MW J/1{E7iOs}̉R&t d>9sc} VxލJʌPyU%2Tj2dCP-Ib+ i-K*1D,20jK+h[Ό7 ST]"0Ms|<$1WWe +֟]5NEu|"MU{F]}8W .d1{Dț*z(r)w$  XKSؕ^_{sRHEdΓjqzBxaWʎde&JaFAip_XRwxCJrC0a߁&Mn_3-}ËN[4jxͺ_Ϝ^U];&So"kTKӻ\T86mt[V(`^r؎HRםc1)K&vyLdnvSuƣ(8Ej4m""h72<RmNFj6e3#&ѱ%aaaLh|VAJ|aEX}&K_G0y^YyMLSki+R{5 ;ychdy0ĹeJS[,MJxtr^乖#[ $'Kˑq^j{ՊLI1T"zE Y9=eMO,zM2XjeO+ iei[#J1lZV BıjMn,[aXWbY#*_ٚR *"wDO}]ez= r('Q;Bw>>ϣf|m`ՊYhVaZS djKQ26Qd@ElEaʜ&QLThi/))YL2aVLSj:IjY9JW 2KTޒj8C 2*e%ަM%a#&FMTț#sS Fڜjq9΂*VR\Fq25VLL2č#Bd%0VQn69VҦ':7W-A綪~¨Yf&(NVJX0O]6K+4UXUU!Xv.TSŹMKrSpP}^6;oG$]һ lj)xX6Xb5lQ7ΡS')sy⫞)a$ w"t讅{u#=(I~K K^mFIΌU/RXB;>yU_*"{Ѫ48SFfIfC NQ4ƻIJEjCE?g=Q65#^~GRxSÝmλWA <Bx#[-U3_*ȯ1e?$a KFM)aLRn#UFee Uv% ]RX9>C#QҎ ls2>#YE/&TKxu67;UIIpUE|1%IxW Q^E;^{^yj04/asOE+^5A4Pr*—AyJ;| U*e;y{mfWFAJ_ Qe,K'(Ɇaȟ_ܢsJTaQ%oQj$[S*C>j4ab($أL6[2%lkZF9__u'O?YnLKan7G30pk lpmc6y30ffc1Vef?1ŘX3a0c_sNf5U5bٲYZdmek,cLfYFK111- U7e*U39?Xd$@xX?_D(wdL 5v6>~& }ٱg!{Ϯ< ŽG"5GH3 N\̃n릮g@ͅ?uSՎ3a X.zqC~a) 'B HЊ6Vg 4-"Ћ90QC G߼?7}mo֖RZ/~.n,&Ab" XcbaI@H)HUzع#n{gd?fs;TmJPLILr B0 DӠ#:qCD07AH#!be\-i4hS7@ 97@xs&BӶ&ݥ(k֕ʁ #L?0dZ-dэurJܟ/?@1` 1U,q"(H A>brms/R/emmeans.R0000644000175000017500000001733314111751666013337 0ustar nileshnilesh#' Support Functions for \pkg{emmeans} #' #' Functions required for compatibility of \pkg{brms} with \pkg{emmeans}. #' Users are not required to call these functions themselves. Instead, #' they will be called automatically by the \code{emmeans} function #' of the \pkg{emmeans} package. #' #' @name emmeans-brms-helpers #' #' @inheritParams posterior_epred.brmsfit #' @param re_formula Optional formula containing group-level effects to be #' considered in the prediction. If \code{NULL}, include all group-level #' effects; if \code{NA} (default), include no group-level effects. #' @param epred Logical. If \code{TRUE} compute predictions of #' the posterior predictive distribution's mean #' (see \code{\link{posterior_epred.brmsfit}}) while ignoring #' arguments \code{dpar} and \code{nlpar}. Defaults to \code{FALSE}. #' @param data,trms,xlev,grid,vcov. Arguments required by \pkg{emmeans}. #' @param ... Additional arguments passed to \pkg{emmeans}. #' #' @details #' In order to ensure compatibility of most \pkg{brms} models with #' \pkg{emmeans}, predictions are not generated 'manually' via a design matrix #' and coefficient vector, but rather via \code{\link{posterior_linpred.brmsfit}}. #' This appears to generally work well, but note that it produces an `.@linfct` #' slot that contains the computed predictions as columns instead of the #' coefficients. #' #' @examples #' \dontrun{ #' fit <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit) #' #' # summarize via 'emmeans' #' library(emmeans) #' rg <- ref_grid(fit) #' em <- emmeans(rg, "disease") #' summary(em, point.est = mean) #' #' # obtain estimates for the posterior predictive distribution's mean #' epred <- emmeans(fit, "disease", epred = TRUE) #' summary(epred, point.est = mean) #' } NULL # recover the variables used in the model predictions # @param data only added to prevent it from being passed further via ... #' @rdname emmeans-brms-helpers recover_data.brmsfit <- function(object, data, resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ...) { bterms <- .extract_par_terms( object, resp = resp, dpar = dpar, nlpar = nlpar, re_formula = re_formula, epred = epred ) trms <- attr(model.frame(bterms$allvars, data = object$data), "terms") # brms has no call component so the call is just a dummy emmeans::recover_data(call("brms"), trms, "na.omit", data = object$data, ...) } # Calculate the basis for making predictions. In some sense, this is # similar to the fitted() function with new data on the link scale. # Transforming to response scale, if desired, is handled by emmeans. #' @rdname emmeans-brms-helpers emm_basis.brmsfit <- function(object, trms, xlev, grid, vcov., resp = NULL, dpar = NULL, nlpar = NULL, re_formula = NA, epred = FALSE, ...) { if (is_equal(dpar, "mean")) { # deprecated as of version 2.15.9 warning2("dpar = 'mean' is deprecated. Please use epred = TRUE instead.") epred <- TRUE dpar <- NULL } epred <- as_one_logical(epred) bterms <- .extract_par_terms( object, resp = resp, dpar = dpar, nlpar = nlpar, re_formula = re_formula, epred = epred ) if (epred) { post.beta <- posterior_epred( object, newdata = grid, re_formula = re_formula, resp = resp, incl_autocor = FALSE, ... ) } else { req_vars <- all_vars(bterms$allvars) post.beta <- posterior_linpred( object, newdata = grid, re_formula = re_formula, resp = resp, dpar = dpar, nlpar = nlpar, incl_autocor = FALSE, req_vars = req_vars, ... ) } if (anyNA(post.beta)) { stop2("emm_basis.brmsfit created NAs. Please check your reference grid.") } misc <- bterms$.misc if (length(dim(post.beta)) == 3L) { # reshape to a 2D matrix, for example, in multivariate models ynames <- dimnames(post.beta)[[3]] if (is.null(ynames)) { ynames <- as.character(seq_len(dim(post.beta)[3])) } dims <- dim(post.beta) post.beta <- matrix(post.beta, ncol = prod(dims[2:3])) misc$ylevs = list(rep.meas = ynames) } attr(post.beta, "n.chains") <- object$fit@sim$chains X <- diag(ncol(post.beta)) bhat <- apply(post.beta, 2, mean) V <- cov(post.beta) nbasis <- matrix(NA) dfargs <- list() dffun <- function(k, dfargs) Inf environment(dffun) <- baseenv() nlist(X, bhat, nbasis, V, dffun, dfargs, misc, post.beta) } # extract terms of specific predicted parameter(s) in the model # currently, the only slots that matter in the returned object are # allvars: formula with all required variables on the right-hand side # .misc: a named list with additional info to be interpreted by emmeans .extract_par_terms <- function(x, ...) { UseMethod(".extract_par_terms") } #' @export .extract_par_terms.brmsfit <- function(x, resp = NULL, re_formula = NA, dpar = NULL, epred = FALSE, ...) { if (is_equal(dpar, "mean")) { # deprecation warning already provided in emm_basis.brmsfit epred <- TRUE dpar <- NULL } resp <- validate_resp(resp, x) new_formula <- update_re_terms(formula(x), re_formula) bterms <- brmsterms(new_formula, resp_rhs_all = FALSE) if (is_ordinal(bterms)) { warning2("brms' emmeans support for ordinal models is experimental ", "and currently ignores the threshold parameters.") } .extract_par_terms(bterms, resp = resp, dpar = dpar, epred = epred, ...) } #' @export .extract_par_terms.mvbrmsterms <- function(x, resp, epred, ...) { stopifnot(is.character(resp)) epred <- as_one_logical(epred) out <- x # only use selected univariate models out$terms <- out$terms[resp] if (epred) { out$allvars <- allvars_formula(lapply(out$terms, get_allvars)) out$.misc <- list() return(out) } for (i in seq_along(out$terms)) { out$terms[[i]] <- .extract_par_terms(out$terms[[i]], epred = epred, ...) } out$allvars <- allvars_formula(lapply(out$terms, get_allvars)) misc_list <- unique(lapply(out$terms, "[[", ".misc")) if (length(misc_list) > 1L){ stop2("brms' emmeans support for multivariate models is limited ", "to cases where all univariate models have the same family.") } out$.misc <- misc_list[[1]] out } #' @export .extract_par_terms.brmsterms <- function(x, dpar, nlpar, epred, ...) { epred <- as_one_logical(epred) all_dpars <- names(x$dpars) all_nlpars <- names(x$nlpars) out <- x if (epred) { out$.misc <- list() return(out) } if (!is.null(nlpar)) { if (!is.null(dpar)) { stop2("'dpar' and 'nlpar' cannot be specified at the same time.") } nlpar <- as_one_character(nlpar) if (!nlpar %in% all_nlpars) { stop2( "Non-linear parameter '", nlpar, "' is not part of the model.", "\nSupported parameters are: ", collapse_comma(all_nlpars) ) } out <- x$nlpars[[nlpar]] } else if (!is.null(dpar)) { dpar <- as_one_character(dpar) if (!dpar %in% all_dpars) { stop2( "Distributional parameter '", dpar, "' is not part of the model.", "\nSupported parameters are: ", collapse_comma(all_dpars) ) } out <- x$dpars[[dpar]] } else { # extract 'mu' parameter by default if (!"mu" %in% names(x$dpars)) { # concerns categorical-like and mixture models stop2("emmeans is not yet supported for this brms model.") } out <- x$dpars[["mu"]] } out$.misc <- emmeans::.std.link.labels(out$family, list()) out } brms/R/backends.R0000644000175000017500000004635514136566222013471 0ustar nileshnilesh# parse Stan model code # @param model Stan model code # @return validated Stan model code parse_model <- function(model, backend, ...) { backend <- as_one_character(backend) .parse_model <- get(paste0(".parse_model_", backend), mode = "function") .parse_model(model, ...) } # parse Stan model code with rstan # @param model Stan model code # @return validated Stan model code .parse_model_rstan <- function(model, silent = 1, ...) { out <- eval_silent( rstan::stanc(model_code = model, ...), type = "message", try = TRUE, silent = silent ) out$model_code } # parse Stan model code with cmdstanr # @param model Stan model code # @return validated Stan model code .parse_model_cmdstanr <- function(model, silent = 1, ...) { require_package("cmdstanr") temp_file <- cmdstanr::write_stan_file(model) out <- eval_silent( cmdstanr::cmdstan_model(temp_file, compile = FALSE, ...), type = "message", try = TRUE, silent = silent ) out$check_syntax(quiet = TRUE) collapse(out$code(), "\n") } # parse model with a mock backend for testing .parse_model_mock <- function(model, silent = TRUE, parse_error = NULL, parse_check = "rstan", ...) { if (!is.null(parse_error)) { stop2(parse_error) } else if (parse_check == "rstan") { out <- .parse_model_rstan(model, silent = silent, ...) } else if (parse_check == "cmdstanr") { out <- .parse_model_cmdstanr(model, silent = silent, ...) } else if (is.null(parse_check)) { out <- "mock_code" } else { stop2("Unknown 'parse_check' value.") } out } # compile Stan model # @param model Stan model code # @return validated Stan model code compile_model <- function(model, backend, ...) { backend <- as_one_character(backend) .compile_model <- get(paste0(".compile_model_", backend), mode = "function") .compile_model(model, ...) } # compile Stan model with rstan # @param model Stan model code # @return model compiled with rstan .compile_model_rstan <- function(model, threads, opencl, silent = 1, ...) { args <- list(...) args$model_code <- model if (silent < 2) { message("Compiling Stan program...") } if (use_threading(threads)) { if (utils::packageVersion("rstan") >= 2.26) { threads_per_chain_def <- rstan::rstan_options("threads_per_chain") on.exit(rstan::rstan_options(threads_per_chain = threads_per_chain_def)) rstan::rstan_options(threads_per_chain = threads$threads) } else { stop2("Threading is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } } if (use_opencl(opencl)) { stop2("OpenCL is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } eval_silent( do_call(rstan::stan_model, args), type = "message", try = TRUE, silent = silent >= 2 ) } # compile Stan model with cmdstanr # @param model Stan model code # @return model compiled with cmdstanr .compile_model_cmdstanr <- function(model, threads, opencl, silent = 1, ...) { require_package("cmdstanr") args <- list(...) args$stan_file <- cmdstanr::write_stan_file(model) if (use_threading(threads)) { args$cpp_options$stan_threads <- TRUE } if (use_opencl(opencl)) { args$cpp_options$stan_opencl <- TRUE } eval_silent( do_call(cmdstanr::cmdstan_model, args), type = "message", try = TRUE, silent = silent >= 2 ) } # compile model with a mock backend for testing .compile_model_mock <- function(model, threads, opencl, compile_check = "rstan", compile_error = NULL, silent = 1, ...) { if (!is.null(compile_error)) { stop2(compile_error) } else if (compile_check == "rstan") { out <- .parse_model_rstan(model, silent = silent, ...) } else if (compile_check == "cmdstanr") { out <- .parse_model_cmdstanr(model, silent = silent, ...) } else if (is.null(compile_check)) { out <- list() } else { stop2("Unknown 'compile_check' value.") } out } # fit Stan model # @param model Stan model code # @return validated Stan model code fit_model <- function(model, backend, ...) { backend <- as_one_character(backend) .fit_model <- get(paste0(".fit_model_", backend), mode = "function") .fit_model(model, ...) } # fit Stan model with rstan # @param model a compiled Stan model # @param sdata named list to be passed to Stan as data # @return a fitted Stan model .fit_model_rstan <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, inits, exclude, seed, control, silent, future, ...) { # some input checks and housekeeping if (use_threading(threads)) { if (utils::packageVersion("rstan") >= 2.26) { threads_per_chain_def <- rstan::rstan_options("threads_per_chain") on.exit(rstan::rstan_options(threads_per_chain = threads_per_chain_def)) rstan::rstan_options(threads_per_chain = threads$threads) } else { stop2("Threading is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } } if (use_opencl(opencl)) { stop2("OpenCL is not supported by backend 'rstan' version ", utils::packageVersion("rstan"), ".") } if (is.character(inits) && !inits %in% c("random", "0")) { inits <- get(inits, mode = "function", envir = parent.frame()) } args <- nlist( object = model, data = sdata, iter, seed, init = inits, pars = exclude, include = FALSE ) dots <- list(...) args[names(dots)] <- dots # do the actual sampling if (silent < 2) { message("Start sampling") } if (algorithm %in% c("sampling", "fixed_param")) { c(args) <- nlist(warmup, thin, control, show_messages = !silent) if (algorithm == "fixed_param") { args$algorithm <- "Fixed_param" } if (future) { if (cores > 1L) { warning2("Argument 'cores' is ignored when using 'future'.") } args$chains <- 1L futures <- fits <- vector("list", chains) for (i in seq_len(chains)) { args$chain_id <- i if (is.list(inits)) { args$init <- inits[i] } futures[[i]] <- future::future( brms::do_call(rstan::sampling, args), packages = "rstan", seed = TRUE ) } for (i in seq_len(chains)) { fits[[i]] <- future::value(futures[[i]]) } out <- rstan::sflist2stanfit(fits) rm(futures, fits) } else { c(args) <- nlist(chains, cores) out <- do_call(rstan::sampling, args) } } else if (algorithm %in% c("fullrank", "meanfield")) { # vb does not support parallel execution c(args) <- nlist(algorithm) out <- do_call(rstan::vb, args) } else { stop2("Algorithm '", algorithm, "' is not supported.") } out <- repair_stanfit_names(out) out } # fit Stan model with cmdstanr # @param model a compiled Stan model # @param sdata named list to be passed to Stan as data # @return a fitted Stan model .fit_model_cmdstanr <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, inits, exclude, seed, control, silent, future, ...) { require_package("cmdstanr") # some input checks and housekeeping class(sdata) <- "list" if (isNA(seed)) { seed <- NULL } if (is_equal(inits, "random")) { inits <- NULL } else if (is_equal(inits, "0")) { inits <- 0 } if (future) { stop2("Argument 'future' is not supported by backend 'cmdstanr'.") } args <- nlist(data = sdata, seed, init = inits) if (use_threading(threads)) { args$threads_per_chain <- threads$threads } if (use_opencl(opencl)) { args$opencl_ids <- opencl$ids } # TODO: exclude variables via 'exclude' dots <- list(...) args[names(dots)] <- dots args[names(control)] <- control chains <- as_one_numeric(chains) empty_model <- chains <= 0 if (empty_model) { # fit the model with minimal amount of draws # TODO: replace with a better solution chains <- 1 iter <- 2 warmup <- 1 thin <- 1 cores <- 1 } # do the actual sampling if (silent < 2) { message("Start sampling") } if (algorithm %in% c("sampling", "fixed_param")) { c(args) <- nlist( iter_sampling = iter - warmup, iter_warmup = warmup, chains, thin, parallel_chains = cores, show_messages = !silent, fixed_param = algorithm == "fixed_param" ) out <- do_call(model$sample, args) } else if (algorithm %in% c("fullrank", "meanfield")) { # vb does not support parallel execution c(args) <- nlist(iter, algorithm) out <- do_call(model$variational, args) } else { stop2("Algorithm '", algorithm, "' is not supported.") } # a lot of metadata is not stored via rstan::read_stan_csv metadata <- cmdstanr::read_cmdstan_csv( out$output_files(), variables = "", sampler_diagnostics = "" ) # transform into stanfit object for consistent output structure out <- rstan::read_stan_csv(out$output_files()) out <- repair_stanfit_names(out) # allow updating the model without recompilation attributes(out)$CmdStanModel <- model attributes(out)$metadata <- metadata if (empty_model) { # allow correct updating of an 'empty' model out@sim <- list() } out } # fit model with a mock backend for testing .fit_model_mock <- function(model, sdata, algorithm, iter, warmup, thin, chains, cores, threads, opencl, inits, exclude, seed, control, silent, future, mock_fit, ...) { if (is.function(mock_fit)) { out <- mock_fit() } else { out <- mock_fit } out } # extract the compiled stan model # @param x brmsfit object compiled_model <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { out <- rstan::get_stanmodel(x$fit) } else if (backend == "cmdstanr") { out <- attributes(x$fit)$CmdStanModel } else if (backend == "mock") { stop2("'compiled_model' is not supported in the mock backend.") } out } # Does the model need recompilation before being able to sample again? needs_recompilation <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { # TODO: figure out when rstan requires recompilation out <- FALSE } else if (backend == "cmdstanr") { exe_file <- attributes(x$fit)$CmdStanModel$exe_file() out <- !is.character(exe_file) || !exists(exe_file) } else if (backend == "mock") { out <- FALSE } out } #' Recompile Stan models in \code{brmsfit} objects #' #' Recompile the Stan model inside a \code{brmsfit} object, if necessary. #' This does not change the model, it simply recreates the executable #' so that sampling is possible again. #' #' @param x An object of class \code{brmsfit}. #' @param recompile Logical, indicating whether the Stan model should be #' recompiled. If \code{NULL} (the default), \code{recompile_model} tries #' to figure out internally, if recompilation is necessary. Setting it to #' \code{FALSE} will cause \code{recompile_model} to always return the #' \code{brmsfit} object unchanged. #' #' @return A (possibly updated) \code{brmsfit} object. #' #' @export recompile_model <- function(x, recompile = NULL) { stopifnot(is.brmsfit(x)) if (is.null(recompile)) { recompile <- needs_recompilation(x) } recompile <- as_one_logical(recompile) if (!recompile) { return(x) } message("Recompiling the Stan model") backend <- x$backend %||% "rstan" new_model <- compile_model( stancode(x), backend = backend, threads = x$threads, opencl = x$opencl, silent = 2 ) if (backend == "rstan") { x$fit@stanmodel <- new_model } else if (backend == "cmdstanr") { attributes(x)$CmdStanModel <- new_model } else if (backend == "mock") { stop2("'recompile_model' is not supported in the mock backend.") } x } # extract the elapsed time during model fitting # @param x brmsfit object elapsed_time <- function(x) { stopifnot(is.brmsfit(x)) backend <- x$backend %||% "rstan" if (backend == "rstan") { out <- rstan::get_elapsed_time(x$fit) out <- data.frame( chain_id = seq_len(nrow(out)), warmup = out[, "warmup"], sampling = out[, "sample"] ) out$total <- out$warmup + out$sampling rownames(out) <- NULL } else if (backend == "cmdstanr") { out <- attributes(x$fit)$metadata$time$chains } else if (backend == "mock") { stop2("'elapsed_time' not supported in the mock backend.") } out } # supported Stan backends backend_choices <- function() { c("rstan", "cmdstanr", "mock") } # supported Stan algorithms algorithm_choices <- function() { c("sampling", "meanfield", "fullrank", "fixed_param") } # check if the model was fit the the required backend require_backend <- function(backend, x) { stopifnot(is.brmsfit(x)) backend <- match.arg(backend, backend_choices()) if (isTRUE(x$backend != backend)) { stop2("Backend '", backend, "' is required for this method.") } invisible(TRUE) } #' Threading in Stan #' #' Use threads for within-chain parallelization in \pkg{Stan} via the \pkg{brms} #' interface. Within-chain parallelization is experimental! We recommend its use #' only if you are experienced with Stan's \code{reduce_sum} function and have a #' slow running model that cannot be sped up by any other means. #' #' @param threads Number of threads to use in within-chain parallelization. #' @param grainsize Number of observations evaluated together in one chunk on #' one of the CPUs used for threading. If \code{NULL} (the default), #' \code{grainsize} is currently chosen as \code{max(100, N / (2 * #' threads))}, where \code{N} is the number of observations in the data. This #' default is experimental and may change in the future without prior notice. #' @param static Logical. Apply the static (non-adaptive) version of #' \code{reduce_sum}? Defaults to \code{FALSE}. Setting it to \code{TRUE} #' is required to achieve exact reproducibility of the model results #' (if the random seed is set as well). #' #' @return A \code{brmsthreads} object which can be passed to the #' \code{threads} argument of \code{brm} and related functions. #' #' @details The adaptive scheduling procedure used by \code{reduce_sum} will #' prevent the results to be exactly reproducible even if you set the random #' seed. If you need exact reproducibility, you have to set argument #' \code{static = TRUE} which may reduce efficiency a bit. #' #' To ensure that chunks (whose size is defined by \code{grainsize}) require #' roughly the same amount of computing time, we recommend storing #' observations in random order in the data. At least, please avoid sorting #' observations after the response values. This is because the latter often #' cause variations in the computing time of the pointwise log-likelihood, #' which makes up a big part of the parallelized code. #' #' @examples #' \dontrun{ #' # this model just serves as an illustration #' # threading may not actually speed things up here #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = negbinomial(), #' chains = 1, threads = threading(2, grainsize = 100), #' backend = "cmdstanr") #' summary(fit) #' } #' #' @export threading <- function(threads = NULL, grainsize = NULL, static = FALSE) { out <- list(threads = NULL, grainsize = NULL) class(out) <- "brmsthreads" if (!is.null(threads)) { threads <- as_one_numeric(threads) if (!is_wholenumber(threads) || threads < 1) { stop2("Number of threads needs to be positive.") } out$threads <- threads } if (!is.null(grainsize)) { grainsize <- as_one_numeric(grainsize) if (!is_wholenumber(grainsize) || grainsize < 1) { stop2("The grainsize needs to be positive.") } out$grainsize <- grainsize } out$static <- as_one_logical(static) out } is.brmsthreads <- function(x) { inherits(x, "brmsthreads") } # validate 'thread' argument validate_threads <- function(threads) { if (is.null(threads)) { threads <- threading() } else if (is.numeric(threads)) { threads <- as_one_numeric(threads) threads <- threading(threads) } else if (!is.brmsthreads(threads)) { stop2("Argument 'threads' needs to be numeric or ", "specified via the 'threading' function.") } threads } # is threading activated? use_threading <- function(threads) { isTRUE(validate_threads(threads)$threads > 0) } #' GPU support in Stan via OpenCL #' #' Use OpenCL for GPU support in \pkg{Stan} via the \pkg{brms} interface. Only #' some \pkg{Stan} functions can be run on a GPU at this point and so #' a lot of \pkg{brms} models won't benefit from OpenCL for now. #' #' @param ids (integer vector of length 2) The platform and device IDs of the #' OpenCL device to use for fitting. If you don't know the IDs of your OpenCL #' device, \code{c(0,0)} is most likely what you need. #' #' @return A \code{brmsopencl} object which can be passed to the #' \code{opencl} argument of \code{brm} and related functions. #' #' @details For more details on OpenCL in \pkg{Stan}, check out #' \url{https://mc-stan.org/docs/2_26/cmdstan-guide/parallelization.html#opencl} #' as well as \url{https://mc-stan.org/docs/2_26/stan-users-guide/opencl.html}. #' #' @examples #' \dontrun{ #' # this model just serves as an illustration #' # OpenCL may not actually speed things up here #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' chains = 2, cores = 2, opencl = opencl(c(0, 0)), #' backend = "cmdstanr") #' summary(fit) #' } #' #' @export opencl <- function(ids = NULL) { out <- list(ids = NULL) class(out) <- "brmsopencl" if (!is.null(ids)) { ids <- as.integer(ids) if (!length(ids) == 2L) { stop2("OpenCl 'ids' needs to be an integer vector of length 2.") } out$ids <- ids } out } is.brmsopencl <- function(x) { inherits(x, "brmsopencl") } # validate the 'opencl' argument validate_opencl <- function(opencl) { if (is.null(opencl)) { opencl <- opencl() } else if (is.numeric(opencl)) { opencl <- opencl(opencl) } else if (!is.brmsopencl(opencl)) { stop2("Argument 'opencl' needs to an integer vector or ", "specified via the 'opencl' function.") } opencl } # is OpenCL activated? use_opencl <- function(opencl) { !is.null(validate_opencl(opencl)$ids) } # validate the 'silent' argument validate_silent <- function(silent) { silent <- as_one_integer(silent) if (silent < 0 || silent > 2) { stop2("'silent' must be between 0 and 2.") } silent } # repair parameter names of stanfit objects repair_stanfit_names <- function(x) { stopifnot(is.stanfit(x)) if (!length(x@sim$fnames_oi)) { # nothing to rename return(x) } # the posterior package cannot deal with non-unique parameter names # this case happens rarely but might happen when sample_prior = "yes" x@sim$fnames_oi <- make.unique(as.character(x@sim$fnames_oi), "__") for (i in seq_along(x@sim$samples)) { # rstan::read_stan_csv may have renamed dimension suffixes (#1218) if (length(x@sim$samples[[i]]) == length(x@sim$fnames_oi)) { names(x@sim$samples[[i]]) <- x@sim$fnames_oi } } x } # possible options for argument 'file_refit' file_refit_options <- function() { c("never", "always", "on_change") } brms/R/restructure.R0000644000175000017500000005614414111751666014304 0ustar nileshnilesh#' Restructure Old \code{brmsfit} Objects #' #' Restructure old \code{brmsfit} objects to work with #' the latest \pkg{brms} version. This function is called #' internally when applying post-processing methods. #' However, in order to avoid unnecessary run time caused #' by the restructuring, I recommend explicitly calling #' \code{restructure} once per model after updating \pkg{brms}. #' #' @param x An object of class \code{brmsfit}. #' @param ... Currently ignored. #' #' @return A \code{brmsfit} object compatible with the latest version #' of \pkg{brms}. #' #' @export restructure <- function(x, ...) { stopifnot(is.brmsfit(x)) if (is.null(x$version)) { # this is the latest version without saving the version number x$version <- list(brms = package_version("0.9.1")) } else if (is.package_version(x$version)) { # also added the rstan version in brms 1.5.0 x$version <- list(brms = x$version) } current_version <- utils::packageVersion("brms") restr_version <- restructure_version(x) if (restr_version >= current_version) { # object is up to date with the current brms version return(x) } if (restr_version < "2.0.0") { x <- restructure_v1(x) } if (restr_version < "3.0.0") { x <- restructure_v2(x) } # remember the version with which the object was restructured x$version$restructure <- current_version # remove unused attribute attr(x, "restructured") <- NULL x } restructure_v2 <- function(x) { # restructure models fitted with brms 2.x x$formula <- update_old_family(x$formula) bterms <- SW(brmsterms(x$formula)) pars <- variables(x) version <- restructure_version(x) if (version < "2.1.2") { x <- do_renaming(x, change_old_bsp(pars)) } if (version < "2.1.3") { if ("weibull" %in% family_names(x)) { stop_parameterization_changed("weibull", "2.1.3") } } if (version < "2.1.8") { if ("exgaussian" %in% family_names(x)) { stop_parameterization_changed("exgaussian", "2.1.8") } } if (version < "2.1.9") { # reworked 'me' terms (#372) meef <- tidy_meef(bterms, model.frame(x)) if (isTRUE(nrow(meef) > 0)) { warning2( "Measurement error ('me') terms have been reworked ", "in version 2.1.9. I strongly recommend refitting your ", "model with the latest version of brms." ) } } if (version < "2.2.4") { # added 'dist' argument to grouping terms x$ranef <- tidy_ranef(bterms, model.frame(x)) } if (version < "2.3.7") { check_old_nl_dpars(bterms) } if (version < "2.8.3") { # argument 'sparse' is now specified within 'formula' sparse <- if (grepl("sparse matrix", stancode(x))) TRUE x$formula <- SW(validate_formula(x$formula, data = x$data, sparse = sparse)) } if (version < "2.8.4") { x <- rescale_old_mo(x) } if (version < "2.8.5") { if (any(grepl("^arr(\\[|_|$)", pars))) { warning2("ARR structures are no longer supported.") } } if (version < "2.8.6") { # internal handling of special effects terms has changed # this requires updating the 'terms' attribute of the data x$data <- rm_attr(x$data, c("brmsframe", "terms")) x$data <- validate_data(x$data, bterms) } if (version < "2.8.9") { if (any(grepl("^loclev(\\[|_|$)", pars))) { warning2("BSTS structures are no longer supported.") } } if (version < "2.10.4") { # model fit criteria have been moved to x$criteria criterion_names <- c("loo", "waic", "kfold", "R2", "marglik") criteria <- x[intersect(criterion_names, names(x))] x[criterion_names] <- NULL # rename 'R2' to 'bayes_R2' according to #793 names(criteria)[names(criteria) == "R2"] <- "bayes_R2" x$criteria <- criteria } if (version < "2.10.5") { # new slot 'thres' stored inside ordinal families if (is_ordinal(x$formula)) { x$formula <- SW(validate_formula(x$formula, data = x$data)) } } if (version < "2.11.2") { # 'autocor' was integrated into the formula interface x$formula <- SW(validate_formula(x$formula)) x$data2 <- validate_data2( data2 = list(), bterms = bterms, get_data2_autocor(x$formula) ) } if (version < "2.11.3") { # ordering after IDs matches the order of the posterior draws # if multiple IDs are used for the same grouping factor (#835) x$ranef <- x$ranef[order(x$ranef$id), , drop = FALSE] } if (version < "2.11.5") { # 'cats' is stored inside ordinal families again if (is_ordinal(x$formula)) { x$formula <- SW(validate_formula(x$formula, data = x$data)) } } if (version < "2.12.5") { # 'cov_ranef' was integrated into the formula interface if (length(x$cov_ranef)) { x$formula <- SW(validate_formula(x$formula, cov_ranef = x$cov_ranef)) cov_ranef <- get_data2_cov_ranef(x$formula) x$data2[names(cov_ranef)] <- cov_ranef } } if (version < "2.12.6") { # minor structural changes as part of internal interface improvements attr(x$data, "data_name") <- x$data.name x$stanvars <- SW(validate_stanvars(x$stanvars, stan_funs = x$stan_funs)) } if (version < "2.12.11") { # argument 'position' was added to stanvars for (i in seq_along(x$stanvars)) { x$stanvars[[i]]$position <- "start" } } if (version < "2.13.2") { # added support for 'cmdstanr' as additional backend x$backend <- "rstan" } if (version < "2.13.5") { # see issue #962 for discussion if ("cox" %in% family_names(x)) { stop_parameterization_changed("cox", "2.13.5") } } if (version < "2.13.8") { x$prior$source <- "" # ensure correct ordering of columns cols_prior <- intersect(all_cols_prior(), names(x$prior)) x$prior <- x$prior[, cols_prior] } if (version < "2.13.10") { # added support for threading x$threads <- threading() } if (version < "2.13.12") { # added more control over which parameters to save save_ranef <- isTRUE(attr(x$exclude, "save_ranef")) save_mevars <- isTRUE(attr(x$exclude, "save_mevars")) save_all_pars <- isTRUE(attr(x$exclude, "save_all_pars")) x$save_pars <- SW(validate_save_pars( save_pars(), save_ranef = save_ranef, save_mevars = save_mevars, save_all_pars = save_all_pars )) x$exclude <- NULL } if (version < "2.15.6") { # added support for OpenCL x$opencl <- opencl() } if (version < "2.16.1") { # problems with rstan::read_stan_csv as well as # non-unique variable names became apparent (#1218) x$fit <- repair_stanfit_names(x$fit) } x } # restructure models fitted with brms 1.x restructure_v1 <- function(x) { version <- restructure_version(x) if (version < "1.0.0") { warning2( "Models fitted with brms < 1.0 are no longer offically ", "supported and post-processing them may fail. I recommend ", "refitting the model with the latest version of brms." ) } x$formula <- restructure_formula_v1(formula(x), x$nonlinear) x$formula <- SW(validate_formula( formula(x), data = model.frame(x), family = family(x), autocor = x$autocor, threshold = x$threshold )) x$nonlinear <- x$partial <- x$threshold <- NULL bterms <- brmsterms(formula(x)) x$data <- rm_attr(x$data, "brmsframe") x$data <- validate_data(x$data, bterms) x$ranef <- tidy_ranef(bterms, model.frame(x)) if ("prior_frame" %in% class(x$prior)) { class(x$prior) <- c("brmsprior", "data.frame") } if (is(x$autocor, "cov_fixed")) { # deprecated as of brms 1.4.0 class(x$autocor) <- "cor_fixed" } if (version < "0.10.1") { if (length(bterms$dpars$mu$nlpars)) { # nlpar and group have changed positions change <- change_old_re(x$ranef, variables(x), x$fit@sim$dims_oi) x <- do_renaming(x, change) } } if (version < "1.0.0") { # double underscores were added to group-level parameters change <- change_old_re2(x$ranef, variables(x), x$fit@sim$dims_oi) x <- do_renaming(x, change) } if (version < "1.0.1.1") { # names of spline parameters had to be changed after # allowing for multiple covariates in one spline term change <- change_old_sm( bterms, model.frame(x), variables(x), x$fit@sim$dims_oi ) x <- do_renaming(x, change) } if (version < "1.8.0.1") { att <- attributes(x$exclude) if (is.null(att$save_ranef)) { attr(x$exclude, "save_ranef") <- any(grepl("^r_", variables(x))) || !nrow(x$ranef) } if (is.null(att$save_mevars)) { attr(x$exclude, "save_mevars") <- any(grepl("^Xme_", variables(x))) } } if (version < "1.8.0.2") { x$prior$resp <- x$prior$dpar <- "" # ensure correct ordering of columns cols_prior <- intersect(all_cols_prior(), names(x$prior)) x$prior <- x$prior[, cols_prior] } if (version < "1.9.0.4") { # names of monotonic parameters had to be changed after # allowing for interactions in monotonic terms change <- change_old_mo(bterms, x$data, pars = variables(x)) x <- do_renaming(x, change) } if (version >= "1.0.0" && version < "2.0.0") { change <- change_old_categorical(bterms, x$data, pars = variables(x)) x <- do_renaming(x, change) } x } # get version with which a brmsfit object was restructured restructure_version <- function(x) { stopifnot(is.brmsfit(x)) out <- x$version$restructure if (!is.package_version(out)) { # models restructured with brms 2.11.1 store it as an attribute out <- attr(x, "restructured", exact = TRUE) } if (!is.package_version(out)) { out <- x$version$brms } out } # convert old model formulas to brmsformula objects restructure_formula_v1 <- function(formula, nonlinear = NULL) { if (is.brmsformula(formula) && is.formula(formula)) { # convert deprecated brmsformula objects back to formula class(formula) <- "formula" } if (is.brmsformula(formula)) { # already up to date return(formula) } old_nonlinear <- attr(formula, "nonlinear") nl <- length(nonlinear) > 0 if (is.logical(old_nonlinear)) { nl <- nl || old_nonlinear } else if (length(old_nonlinear)) { nonlinear <- c(nonlinear, old_nonlinear) nl <- TRUE } out <- structure(nlist(formula), class = "brmsformula") old_forms <- rmNULL(attributes(formula)[old_dpars()]) old_forms <- c(old_forms, nonlinear) out$pforms[names(old_forms)] <- old_forms bf(out, nl = nl) } # parameters to be restructured in old brmsformula objects old_dpars <- function() { c("mu", "sigma", "shape", "nu", "phi", "kappa", "beta", "xi", "zi", "hu", "zoi", "coi", "disc", "bs", "ndt", "bias", "quantile", "alpha", "theta") } # interchanges group and nlpar in names of group-level parameters # required for brms <= 0.10.0.9000 # @param ranef output of tidy_ranef # @param pars names of all parameters in the model # @param dims dimension of parameters # @return a list whose elements can be interpreted by do_renaming change_old_re <- function(ranef, pars, dims) { out <- list() for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] nlpar <- r$nlpar[1] stopifnot(nzchar(nlpar)) # rename sd-parameters old_sd_names <- paste0("sd_", nlpar, "_", g, "_", r$coef) new_sd_names <- paste0("sd_", g, "_", nlpar, "_", r$coef) for (i in seq_along(old_sd_names)) { lc(out) <- change_simple( old_sd_names[i], new_sd_names[i], pars, dims ) } # rename cor-parameters new_cor_names <- get_cornames( paste0(nlpar, "_", r$coef), type = paste0("cor_", g), brackets = FALSE, sep = "_" ) old_cor_names <- get_cornames( r$coef, brackets = FALSE, sep = "_", type = paste0("cor_", nlpar, "_", g) ) for (i in seq_along(old_cor_names)) { lc(out) <- change_simple( old_cor_names[i], new_cor_names[i], pars, dims ) } # rename r-parameters old_r_name <- paste0("r_", nlpar, "_", g) new_r_name <- paste0("r_", g, "_", nlpar) levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[g]]) index_names <- make_index_names(levels, r$coef, dim = 2) new_r_names <- paste0(new_r_name, index_names) lc(out) <- change_simple( old_r_name, new_r_names, pars, dims, pnames = new_r_name ) } out } # add double underscore in group-level parameters # required for brms < 1.0.0 # @note assumes that group and nlpar are correctly ordered already # @param ranef output of tidy_ranef # @param pars names of all parameters in the model # @param dims dimension of parameters # @return a list whose elements can be interpreted by do_renaming change_old_re2 <- function(ranef, pars, dims) { out <- list() for (id in unique(ranef$id)) { r <- subset2(ranef, id = id) g <- r$group[1] nlpars_usc <- usc(r$nlpar, "suffix") # rename sd-parameters old_sd_names <- paste0("sd_", g, "_", nlpars_usc, r$coef) new_sd_names <- paste0("sd_", g, "__", nlpars_usc, r$coef) for (i in seq_along(old_sd_names)) { lc(out) <- change_simple(old_sd_names[i], new_sd_names[i], pars, dims) } # rename cor-parameters new_cor_names <- get_cornames( paste0(nlpars_usc, r$coef), type = paste0("cor_", g), brackets = FALSE ) old_cor_names <- get_cornames( paste0(nlpars_usc, r$coef), type = paste0("cor_", g), brackets = FALSE, sep = "_" ) for (i in seq_along(old_cor_names)) { lc(out) <- change_simple(old_cor_names[i], new_cor_names[i], pars, dims) } # rename r-parameters for (nlpar in unique(r$nlpar)) { sub_r <- r[r$nlpar == nlpar, ] old_r_name <- paste0("r_", g, usc(nlpar)) new_r_name <- paste0("r_", g, usc(usc(nlpar))) levels <- gsub("[ \t\r\n]", ".", attr(ranef, "levels")[[g]]) index_names <- make_index_names(levels, sub_r$coef, dim = 2) new_r_names <- paste0(new_r_name, index_names) lc(out) <- change_simple( old_r_name, new_r_names, pars, dims, pnames = new_r_name ) } } out } # change names of spline parameters fitted with brms <= 1.0.1 # this became necessary after allowing smooths with multiple covariates change_old_sm <- function(bterms, data, pars, dims) { .change_old_sm <- function(bt) { out <- list() smef <- tidy_smef(bt, data) if (nrow(smef)) { p <- usc(combine_prefix(bt), "suffix") old_smooths <- rename(paste0(p, smef$term)) new_smooths <- rename(paste0(p, smef$label)) old_sds_pars <- paste0("sds_", old_smooths) new_sds_pars <- paste0("sds_", new_smooths, "_1") old_s_pars <- paste0("s_", old_smooths) new_s_pars <- paste0("s_", new_smooths, "_1") for (i in seq_along(old_smooths)) { lc(out) <- change_simple(old_sds_pars[i], new_sds_pars[i], pars, dims) dim_s <- dims[[old_s_pars[i]]] if (!is.null(dim_s)) { new_s_par_indices <- paste0(new_s_pars[i], "[", seq_len(dim_s), "]") lc(out) <- change_simple( old_s_pars[i], new_s_par_indices, pars, dims, pnames = new_s_pars[i] ) } } } return(out) } out <- list() if (is.mvbrmsterms(bterms)) { for (r in bterms$responses) { c(out) <- .change_old_sm(bterms$terms[[r]]$dpars$mu) } } else if (is.brmsterms(bterms)) { for (dp in names(bterms$dpars)) { bt <- bterms$dpars[[dp]] if (length(bt$nlpars)) { for (nlp in names(bt$nlpars)) { c(out) <- .change_old_sm(bt$nlpars[[nlp]]) } } else { c(out) <- .change_old_sm(bt) } } } out } # change names of monotonic effects fitted with brms <= 1.9.0 # this became necessary after implementing monotonic interactions change_old_mo <- function(bterms, data, pars) { .change_old_mo <- function(bt) { out <- list() spef <- tidy_spef(bt, data) has_mo <- lengths(spef$calls_mo) > 0 if (!any(has_mo)) { return(out) } spef <- spef[has_mo, ] p <- usc(combine_prefix(bt)) bmo_prefix <- paste0("bmo", p, "_") bmo_regex <- paste0("^", bmo_prefix, "[^_]+$") bmo_old <- pars[grepl(bmo_regex, pars)] bmo_new <- paste0(bmo_prefix, spef$coef) if (length(bmo_old) != length(bmo_new)) { stop2("Restructuring failed. Please refit your ", "model with the latest version of brms.") } for (i in seq_along(bmo_old)) { pos <- grepl(paste0("^", bmo_old[i]), pars) lc(out) <- clist(pos, fnames = bmo_new[i]) } simo_regex <- paste0("^simplex", p, "_[^_]+$") simo_old_all <- pars[grepl(simo_regex, pars)] simo_index <- get_matches("\\[[[:digit:]]+\\]$", simo_old_all) simo_old <- unique(sub("\\[[[:digit:]]+\\]$", "", simo_old_all)) simo_coef <- get_simo_labels(spef) for (i in seq_along(simo_old)) { regex_pos <- paste0("^", simo_old[i]) pos <- grepl(regex_pos, pars) simo_new <- paste0("simo", p, "_", simo_coef[i]) simo_index_part <- simo_index[grepl(regex_pos, simo_old_all)] simo_new <- paste0(simo_new, simo_index_part) lc(out) <- clist(pos, fnames = simo_new) } return(out) } out <- list() if (is.mvbrmsterms(bterms)) { for (r in bterms$responses) { c(out) <- .change_old_mo(bterms$terms[[r]]$dpars$mu) } } else if (is.brmsterms(bterms)) { for (dp in names(bterms$dpars)) { bt <- bterms$dpars[[dp]] if (length(bt$nlpars)) { for (nlp in names(bt$nlpars)) { c(out) <- .change_old_mo(bt$nlpars[[nlp]]) } } else { c(out) <- .change_old_mo(bt) } } } out } # between version 1.0 and 2.0 categorical models used # the internal multivariate interface change_old_categorical <- function(bterms, data, pars) { stopifnot(is.brmsterms(bterms)) if (!is_categorical(bterms$family)) { return(list()) } # compute the old category names respform <- bterms$respform old_dpars <- model.response(model.frame(respform, data = data)) old_dpars <- levels(factor(old_dpars)) old_dpars <- make.names(old_dpars[-1], unique = TRUE) old_dpars <- rename(old_dpars, ".", "x") new_dpars <- bterms$family$dpars stopifnot(length(old_dpars) == length(new_dpars)) pos <- rep(FALSE, length(pars)) new_pars <- pars for (i in seq_along(old_dpars)) { # not perfectly save but hopefully mostly correct regex <- paste0("(?<=_)", old_dpars[i], "(?=_|\\[)") pos <- pos | grepl(regex, pars, perl = TRUE) new_pars <- gsub(regex, new_dpars[i], new_pars, perl = TRUE) } list(nlist(pos, fnames = new_pars[pos])) } # as of brms 2.2 'mo' and 'me' terms are handled together change_old_bsp <- function(pars) { pos <- grepl("^(bmo|bme)_", pars) if (!any(pos)) return(list()) fnames <- gsub("^(bmo|bme)_", "bsp_", pars[pos]) list(nlist(pos, fnames)) } # prepare for renaming of parameters in old models change_simple <- function(oldname, fnames, pars, dims, pnames = fnames) { pos <- grepl(paste0("^", oldname), pars) if (any(pos)) { out <- nlist(pos, oldname, pnames, fnames, dims = dims[[oldname]]) class(out) <- c("clist", "list") } else { out <- NULL } out } # rescale old 'b' coefficients of monotonic effects # to represent average instead of total differences rescale_old_mo <- function(x, ...) { UseMethod("rescale_old_mo") } #' @export rescale_old_mo.brmsfit <- function(x, ...) { bterms <- brmsterms(x$formula) rescale_old_mo(bterms, fit = x, ...) } #' @export rescale_old_mo.mvbrmsterms <- function(x, fit, ...) { for (resp in x$responses) { fit <- rescale_old_mo(x$terms[[resp]], fit = fit, ...) } fit } #' @export rescale_old_mo.brmsterms <- function(x, fit, ...) { for (dp in names(x$dpars)) { fit <- rescale_old_mo(x$dpars[[dp]], fit = fit, ...) } for (nlp in names(x$nlpars)) { fit <- rescale_old_mo(x$nlpars[[nlp]], fit = fit, ...) } fit } #' @export rescale_old_mo.btnl <- function(x, fit, ...) { fit } #' @export rescale_old_mo.btl <- function(x, fit, ...) { spef <- tidy_spef(x, fit$data) has_mo <- lengths(spef$Imo) > 0L if (!any(has_mo)) { return(fit) } warning2( "The parameterization of monotonic effects has changed in brms 2.8.4 ", "so that corresponding 'b' coefficients now represent average instead ", "of total differences between categories. See vignette('brms_monotonic') ", "for more details. Parameters of old models are adjusted automatically." ) p <- combine_prefix(x) all_pars <- variables(fit) chains <- fit$fit@sim$chains for (i in which(has_mo)) { bsp_par <- paste0("bsp", p, "_", spef$coef[i]) simo_regex <- paste0(spef$coef[i], seq_along(spef$Imo[[i]])) simo_regex <- paste0("simo", p, "_", simo_regex, "[") simo_regex <- paste0("^", escape_all(simo_regex)) # scaling factor by which to divide the old 'b' coefficients D <- prod(ulapply(simo_regex, function(r) sum(grepl(r, all_pars)))) for (j in seq_len(chains)) { fit$fit@sim$samples[[j]][[bsp_par]] <- fit$fit@sim$samples[[j]][[bsp_par]] / D } } fit } # update old families to work with the latest brms version update_old_family <- function(x, ...) { UseMethod("update_old_family") } #' @export update_old_family.default <- function(x, ...) { validate_family(x) } #' @export update_old_family.brmsfamily <- function(x, ...) { # new specials may have been added in new brms versions family_info <- get(paste0(".family_", x$family))() x$specials <- family_info$specials x } #' @export update_old_family.customfamily <- function(x, ...) { if (!is.null(x$predict)) { x$posterior_predict <- x$predict x$predict <- NULL } if (!is.null(x$fitted)) { x$posterior_epred <- x$fitted x$fitted <- NULL } x } #' @export update_old_family.mixfamily <- function(x, ...) { x$mix <- lapply(x$mix, update_old_family, ...) x } #' @export update_old_family.brmsformula <- function(x, ...) { x$family <- update_old_family(x$family, ...) x } #' @export update_old_family.mvbrmsformula <- function(x, ...) { x$forms <- lapply(x$forms, update_old_family, ...) x } stop_parameterization_changed <- function(family, version) { stop2( "The parameterization of '", family, "' models has changed in brms ", version, ". Please refit your model with the current version of brms." ) } check_old_nl_dpars <- function(bterms) { .check_nl_dpars <- function(x) { stopifnot(is.brmsterms(x)) non_mu_dpars <- x$dpars[names(x$dpars) != "mu"] if (any(ulapply(non_mu_dpars, is.btnl))) { stop2( "Non-linear parameters are global within univariate models ", "as of version 2.3.7. Please refit your model with the ", "latest version of brms." ) } return(TRUE) } if (is.mvbrmsterms(bterms)) { lapply(bterms$terms, .check_nl_dpars) } else { .check_nl_dpars(bterms) } TRUE } brms/R/brmsfit-methods.R0000644000175000017500000004512014111751665015013 0ustar nileshnilesh# This file contains several extractor methods for brmsfit objects. # A lot of other brmsfit methods have their own dedicated files. #' Extract Population-Level Estimates #' #' Extract the population-level ('fixed') effects #' from a \code{brmsfit} object. #' #' @aliases fixef #' #' @inheritParams predict.brmsfit #' @param pars Optional names of coefficients to extract. #' By default, all coefficients are extracted. #' @param ... Currently ignored. #' #' @return If \code{summary} is \code{TRUE}, a matrix returned #' by \code{\link{posterior_summary}} for the population-level effects. #' If \code{summary} is \code{FALSE}, a matrix with one row per #' posterior draw and one column per population-level effect. #' #' @examples #' \dontrun{ #' fit <- brm(time | cens(censored) ~ age + sex + disease, #' data = kidney, family = "exponential") #' fixef(fit) #' # extract only some coefficients #' fixef(fit, pars = c("age", "sex")) #' } #' #' @method fixef brmsfit #' @export #' @export fixef #' @importFrom nlme fixef fixef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, ...) { contains_draws(object) all_pars <- variables(object) fpars <- all_pars[grepl(fixef_pars(), all_pars)] if (!is.null(pars)) { pars <- as.character(pars) fpars <- fpars[sub("^[^_]+_", "", fpars) %in% pars] } if (!length(fpars)) { return(NULL) } out <- as.matrix(object, variable = fpars) colnames(out) <- gsub(fixef_pars(), "", fpars) if (summary) { out <- posterior_summary(out, probs, robust) } out } #' Covariance and Correlation Matrix of Population-Level Effects #' #' Get a point estimate of the covariance or #' correlation matrix of population-level parameters #' #' @inheritParams fixef.brmsfit #' @param correlation Logical; if \code{FALSE} (the default), compute #' the covariance matrix, if \code{TRUE}, compute the correlation matrix. #' #' @return covariance or correlation matrix of population-level parameters #' #' @details Estimates are obtained by calculating the maximum likelihood #' covariances (correlations) of the posterior draws. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' vcov(fit) #' } #' #' @export vcov.brmsfit <- function(object, correlation = FALSE, pars = NULL, ...) { contains_draws(object) all_pars <- variables(object) fpars <- all_pars[grepl(fixef_pars(), all_pars)] if (!is.null(pars)) { pars <- as.character(pars) fpars <- intersect(fpars, paste0("b_", pars)) } if (!length(fpars)) { return(NULL) } draws <- as.data.frame(object, variable = fpars) names(draws) <- sub(fixef_pars(), "", names(draws)) if (correlation) { out <- cor(draws) } else { out <- cov(draws) } out } #' Extract Group-Level Estimates #' #' Extract the group-level ('random') effects of each level #' from a \code{brmsfit} object. #' #' @aliases ranef #' #' @inheritParams fixef.brmsfit #' @param groups Optional names of grouping variables #' for which to extract effects. #' @param ... Currently ignored. #' #' @return A list of 3D arrays (one per grouping factor). #' If \code{summary} is \code{TRUE}, #' the 1st dimension contains the factor levels, #' the 2nd dimension contains the summary statistics #' (see \code{\link{posterior_summary}}), and #' the 3rd dimension contains the group-level effects. #' If \code{summary} is \code{FALSE}, the 1st dimension contains #' the posterior draws, the 2nd dimension contains the factor levels, #' and the 3rd dimension contains the group-level effects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' ranef(fit) #' } #' #' @method ranef brmsfit #' @export #' @export ranef #' @importFrom nlme ranef ranef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), pars = NULL, groups = NULL, ...) { contains_draws(object) object <- restructure(object) if (!nrow(object$ranef)) { stop2("The model does not contain group-level effects.") } all_pars <- variables(object) if (!is.null(pars)) { pars <- as.character(pars) } ranef <- object$ranef all_groups <- unique(ranef$group) if (!is.null(groups)) { groups <- as.character(groups) all_groups <- intersect(all_groups, groups) } out <- named_list(all_groups) for (g in all_groups) { r <- subset2(ranef, group = g) coefs <- paste0(usc(combine_prefix(r), "suffix"), r$coef) rpars <- all_pars[grepl(paste0("^r_", g, "(__.+\\[|\\[)"), all_pars)] if (!is.null(pars)) { coefs <- coefs[r$coef %in% pars] if (!length(coefs)) { next } regex <- paste0("(", escape_all(coefs), ")", collapse = "|") regex <- paste0(",", regex, "\\]$") rpars <- rpars[grepl(regex, rpars)] } out[[g]] <- as.matrix(object, variable = rpars) levels <- attr(ranef, "levels")[[g]] dim(out[[g]]) <- c(nrow(out[[g]]), length(levels), length(coefs)) dimnames(out[[g]])[2:3] <- list(levels, coefs) if (summary) { out[[g]] <- posterior_summary(out[[g]], probs, robust) } } rmNULL(out, recursive = FALSE) } #' Extract Model Coefficients #' #' Extract model coefficients, which are the sum of population-level #' effects and corresponding group-level effects #' #' @inheritParams ranef.brmsfit #' @param ... Further arguments passed to \code{\link{fixef.brmsfit}} #' and \code{\link{ranef.brmsfit}}. #' #' @return A list of 3D arrays (one per grouping factor). #' If \code{summary} is \code{TRUE}, #' the 1st dimension contains the factor levels, #' the 2nd dimension contains the summary statistics #' (see \code{\link{posterior_summary}}), and #' the 3rd dimension contains the group-level effects. #' If \code{summary} is \code{FALSE}, the 1st dimension contains #' the posterior draws, the 2nd dimension contains the factor levels, #' and the 3rd dimension contains the group-level effects. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' ## extract population and group-level coefficients separately #' fixef(fit) #' ranef(fit) #' ## extract combined coefficients #' coef(fit) #' } #' #' @export coef.brmsfit <- function(object, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) if (!nrow(object$ranef)) { stop2("No group-level effects detected. Call method ", "'fixef' to access population-level effects.") } fixef <- fixef(object, summary = FALSE, ...) coef <- ranef(object, summary = FALSE, ...) # add missing coefficients to fixef all_ranef_names <- unique(ulapply(coef, function(x) dimnames(x)[[3]])) fixef_names <- colnames(fixef) fixef_no_digits <- get_matches("^[^\\[]+", fixef_names) miss_fixef <- setdiff(all_ranef_names, fixef_names) miss_fixef_no_digits <- get_matches("^[^\\[]+", miss_fixef) new_fixef <- named_list(miss_fixef) for (k in seq_along(miss_fixef)) { # digits occur in ordinal models with category specific effects match_fixef <- match(miss_fixef_no_digits[k], fixef_names) if (!is.na(match_fixef)) { new_fixef[[k]] <- fixef[, match_fixef] } else if (!miss_fixef[k] %in% fixef_no_digits) { new_fixef[[k]] <- 0 } } rm_fixef <- fixef_names %in% miss_fixef_no_digits fixef <- fixef[, !rm_fixef, drop = FALSE] fixef <- do_call(cbind, c(list(fixef), rmNULL(new_fixef))) for (g in names(coef)) { # add missing coefficients to ranef ranef_names <- dimnames(coef[[g]])[[3]] ranef_no_digits <- get_matches("^[^\\[]+", ranef_names) miss_ranef <- setdiff(fixef_names, ranef_names) miss_ranef_no_digits <- get_matches("^[^\\[]+", miss_ranef) new_ranef <- named_list(miss_ranef) for (k in seq_along(miss_ranef)) { # digits occur in ordinal models with category specific effects match_ranef <- match(miss_ranef_no_digits[k], ranef_names) if (!is.na(match_ranef)) { new_ranef[[k]] <- coef[[g]][, , match_ranef] } else if (!miss_ranef[k] %in% ranef_no_digits) { new_ranef[[k]] <- array(0, dim = dim(coef[[g]])[1:2]) } } rm_ranef <- ranef_names %in% miss_ranef_no_digits coef[[g]] <- coef[[g]][, , !rm_ranef, drop = FALSE] coef[[g]] <- abind(c(list(coef[[g]]), rmNULL(new_ranef))) for (nm in dimnames(coef[[g]])[[3]]) { is_ord_intercept <- grepl("(^|_)Intercept\\[[[:digit:]]+\\]$", nm) if (is_ord_intercept) { # correct the sign of thresholds in ordinal models resp <- if (is_mv(object)) get_matches("^[^_]+", nm) family <- family(object, resp = resp)$family if (has_thres_minus_eta(family)) { coef[[g]][, , nm] <- fixef[, nm] - coef[[g]][, , nm] } else if (has_eta_minus_thres(family)) { coef[[g]][, , nm] <- coef[[g]][, , nm] - fixef[, nm] } else { coef[[g]][, , nm] <- fixef[, nm] + coef[[g]][, , nm] } } else { coef[[g]][, , nm] <- fixef[, nm] + coef[[g]][, , nm] } } if (summary) { coef[[g]] <- posterior_summary(coef[[g]], probs, robust) } } coef } #' Extract Variance and Correlation Components #' #' This function calculates the estimated standard deviations, #' correlations and covariances of the group-level terms #' in a multilevel model of class \code{brmsfit}. #' For linear models, the residual standard deviations, #' correlations and covariances are also returned. #' #' @aliases VarCorr #' #' @param x An object of class \code{brmsfit}. #' @inheritParams fixef.brmsfit #' @param sigma Ignored (included for compatibility with #' \code{\link[nlme:VarCorr]{VarCorr}}). #' @param ... Currently ignored. #' #' @return A list of lists (one per grouping factor), each with #' three elements: a matrix containing the standard deviations, #' an array containing the correlation matrix, and an array #' containing the covariance matrix with variances on the diagonal. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt + (1+Trt|visit), #' data = epilepsy, family = gaussian(), chains = 2) #' VarCorr(fit) #' } #' #' @method VarCorr brmsfit #' @import abind abind #' @importFrom nlme VarCorr #' @export VarCorr #' @export VarCorr.brmsfit <- function(x, sigma = 1, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(x) x <- restructure(x) if (!(nrow(x$ranef) || any(grepl("^sigma($|_)", variables(x))))) { stop2("The model does not contain covariance matrices.") } .VarCorr <- function(y) { # extract draws for sd, cor and cov out <- list(sd = as.matrix(x, variable = y$sd_pars)) colnames(out$sd) <- y$rnames # compute correlation and covariance matrices found_cor_pars <- intersect(y$cor_pars, variables(x)) if (length(found_cor_pars)) { cor <- as.matrix(x, variable = found_cor_pars) if (length(found_cor_pars) < length(y$cor_pars)) { # some correlations are missing and will be replaced by 0 cor_all <- matrix(0, nrow = nrow(cor), ncol = length(y$cor_pars)) names(cor_all) <- y$cor_pars for (i in seq_len(ncol(cor_all))) { found <- match(names(cor_all)[i], colnames(cor)) if (!is.na(found)) { cor_all[, i] <- cor[, found] } } cor <- cor_all } out$cor <- get_cor_matrix(cor = cor) out$cov <- get_cov_matrix(sd = out$sd, cor = cor) dimnames(out$cor)[2:3] <- list(y$rnames, y$rnames) dimnames(out$cov)[2:3] <- list(y$rnames, y$rnames) if (summary) { out$cor <- posterior_summary(out$cor, probs, robust) out$cov <- posterior_summary(out$cov, probs, robust) } } if (summary) { out$sd <- posterior_summary(out$sd, probs, robust) } return(out) } if (nrow(x$ranef)) { get_names <- function(group) { # get names of group-level parameters r <- subset2(x$ranef, group = group) rnames <- as.vector(get_rnames(r)) cor_type <- paste0("cor_", group) sd_pars <- paste0("sd_", group, "__", rnames) cor_pars <- get_cornames(rnames, cor_type, brackets = FALSE) nlist(rnames, sd_pars, cor_pars) } group <- unique(x$ranef$group) tmp <- lapply(group, get_names) names(tmp) <- group } else { tmp <- list() } # include residual variances in the output as well bterms <- brmsterms(x$formula) if (is.brmsterms(bterms)) { if (simple_sigma(bterms) && !is.mixfamily(x$family)) { tmp_resid <- list(rnames = bterms$resp, sd_pars = "sigma") tmp <- c(tmp, residual__ = list(tmp_resid)) } } else if (is.mvbrmsterms(bterms)) { simple_sigma <- ulapply(bterms$terms, simple_sigma) pred_sigma <- ulapply(bterms$terms, pred_sigma) is_mix <- ulapply(x$family, is.mixfamily) if (any(simple_sigma) && !any(pred_sigma) && !any(is_mix)) { resps <- bterms$responses[simple_sigma] sd_pars <- paste0("sigma_", resps) if (bterms$rescor) { cor_pars <- get_cornames(resps, type = "rescor", brackets = FALSE) } else { cor_pars <- character(0) } tmp_resid <- nlist(rnames = resps, sd_pars, cor_pars) tmp <- c(tmp, residual__ = list(tmp_resid)) } } lapply(tmp, .VarCorr) } #' @export model.frame.brmsfit <- function(formula, ...) { formula$data } #' (Deprecated) Number of Posterior Samples #' #' Extract the number of posterior samples (draws) stored in a fitted Bayesian #' model. Method \code{nsamples} is deprecated. Please use \code{ndraws} #' instead. #' #' @aliases nsamples #' #' @param object An object of class \code{brmsfit}. #' @param subset An optional integer vector defining a subset of samples #' to be considered. #' @param incl_warmup A flag indicating whether to also count warmup / burn-in #' samples. #' @param ... Currently ignored. #' #' @method nsamples brmsfit #' @export #' @export nsamples #' @importFrom rstantools nsamples nsamples.brmsfit <- function(object, subset = NULL, incl_warmup = FALSE, ...) { warning2("'nsamples.brmsfit' is deprecated. Please use 'ndraws' instead.") if (!is(object$fit, "stanfit") || !length(object$fit@sim)) { out <- 0 } else { ntsamples <- object$fit@sim$n_save[1] if (!incl_warmup) { ntsamples <- ntsamples - object$fit@sim$warmup2[1] } ntsamples <- ntsamples * object$fit@sim$chains if (length(subset)) { out <- length(subset) if (out > ntsamples || max(subset) > ntsamples) { stop2("Argument 'subset' is invalid.") } } else { out <- ntsamples } } out } #' @export nobs.brmsfit <- function(object, resp = NULL, ...) { if (is_mv(object) && length(resp)) { resp <- validate_resp(resp, object, multiple = FALSE) bterms <- brmsterms(object$formula$forms[[resp]]) out <- nrow(subset_data(model.frame(object), bterms)) } else { out <- nrow(model.frame(object)) } out } #' Number of Grouping Factor Levels #' #' Extract the number of levels of one or more grouping factors. #' #' @aliases ngrps.brmsfit #' #' @param object An \R object. #' @param ... Currently ignored. #' #' @return A named list containing the number of levels per #' grouping factor. #' #' @export ngrps.brmsfit <- function(object, ...) { object <- restructure(object) if (nrow(object$ranef)) { out <- lapply(attr(object$ranef, "levels"), length) } else { out <- NULL } out } #' @rdname ngrps.brmsfit #' @export ngrps <- function(object, ...) { UseMethod("ngrps") } #' @export formula.brmsfit <- function(x, ...) { x$formula } #' @export getCall.brmsfit <- function(x, ...) { x$formula } #' Extract Model Family Objects #' #' @inheritParams posterior_predict.brmsfit #' @param ... Currently unused. #' #' @return A \code{brmsfamily} object #' or a list of such objects for multivariate models. #' #' @export family.brmsfit <- function(object, resp = NULL, ...) { resp <- validate_resp(resp, object) if (!is.null(resp)) { # multivariate model family <- lapply(object$formula$forms[resp], "[[", "family") if (length(resp) == 1L) { family <- family[[1]] } } else { # univariate model family <- object$formula$family if (is.null(family)) { family <- object$family } } family } #' Expose user-defined \pkg{Stan} functions #' #' Export user-defined \pkg{Stan} function and #' optionally vectorize them. For more details see #' \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. #' #' @param x An object of class \code{brmsfit}. #' @param vectorize Logical; Indicates if the exposed functions #' should be vectorized via \code{\link{Vectorize}}. #' Defaults to \code{FALSE}. #' @param env Environment where the functions should be made #' available. Defaults to the global environment. #' @param ... Further arguments passed to #' \code{\link[rstan:expose_stan_functions]{expose_stan_functions}}. #' #' @export expose_functions.brmsfit <- function(x, vectorize = FALSE, env = globalenv(), ...) { vectorize <- as_one_logical(vectorize) if (x$backend == "cmdstanr") { # cmdstanr does not yet support 'expose_stan_functions' itself (#1176) scode <- strsplit(stancode(x), "\n")[[1]] data_line <- grep("^data[ ]+\\{$", scode) scode <- paste0(c(scode[seq_len(data_line - 1)], "\n"), collapse = "\n") stanmodel <- tempfile(fileext = ".stan") cat(scode, file = stanmodel) } else { stanmodel <- x$fit } if (vectorize) { funs <- rstan::expose_stan_functions(stanmodel, env = environment(), ...) for (i in seq_along(funs)) { FUN <- Vectorize(get(funs[i], mode = "function")) assign(funs[i], FUN, pos = env) } } else { funs <- rstan::expose_stan_functions(stanmodel, env = env, ...) } invisible(funs) } #' @rdname expose_functions.brmsfit #' @export expose_functions <- function(x, ...) { UseMethod("expose_functions") } brms/R/loo.R0000644000175000017500000010114314111751666012474 0ustar nileshnilesh#' Efficient approximate leave-one-out cross-validation (LOO) #' #' Perform approximate leave-one-out cross-validation based #' on the posterior likelihood using the \pkg{loo} package. #' For more details see \code{\link[loo:loo]{loo}}. #' #' @aliases loo LOO LOO.brmsfit #' #' @param x A \code{brmsfit} object. #' @param ... More \code{brmsfit} objects or further arguments #' passed to the underlying post-processing functions. #' In particular, see \code{\link{prepare_predictions}} for further #' supported arguments. #' @param compare A flag indicating if the information criteria #' of the models should be compared to each other #' via \code{\link{loo_compare}}. #' @param pointwise A flag indicating whether to compute the full #' log-likelihood matrix at once or separately for each observation. #' The latter approach is usually considerably slower but #' requires much less working memory. Accordingly, if one runs #' into memory issues, \code{pointwise = TRUE} is the way to go. #' @param moment_match Logical; Indicate whether \code{\link{loo_moment_match}} #' should be applied on problematic observations. Defaults to \code{FALSE}. #' For most models, moment matching will only work if you have set #' \code{save_pars = save_pars(all = TRUE)} when fitting the model with #' \code{\link{brm}}. See \code{\link{loo_moment_match.brmsfit}} for more #' details. #' @param reloo Logical; Indicate whether \code{\link{reloo}} #' should be applied on problematic observations. Defaults to \code{FALSE}. #' @param k_threshold The threshold at which pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' Only used if argument \code{reloo} is \code{TRUE}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} for more details. #' @param save_psis Should the \code{"psis"} object created internally be saved #' in the returned object? For more details see \code{\link[loo:loo]{loo}}. #' @param moment_match_args Optional \code{list} of additional arguments passed to #' \code{\link{loo_moment_match}}. #' @param reloo_args Optional \code{list} of additional arguments passed to #' \code{\link{reloo}}. #' @param model_names If \code{NULL} (the default) will use model names #' derived from deparsing the call. Otherwise will use the passed #' values as model names. #' @inheritParams predict.brmsfit #' #' @details See \code{\link{loo_compare}} for details on model comparisons. #' For \code{brmsfit} objects, \code{LOO} is an alias of \code{loo}. #' Use method \code{\link{add_criterion}} to store #' information criteria in the fitted model object for later usage. #' #' @return If just one object is provided, an object of class \code{loo}. #' If multiple objects are provided, an object of class \code{loolist}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (loo1 <- loo(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (loo2 <- loo(fit2)) #' #' # compare both models #' loo_compare(loo1, loo2) #' } #' #' @references #' Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model #' evaluation using leave-one-out cross-validation and WAIC. In Statistics #' and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. #' #' Gelman, A., Hwang, J., & Vehtari, A. (2014). #' Understanding predictive information criteria for Bayesian models. #' Statistics and Computing, 24, 997-1016. #' #' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation #' and widely applicable information criterion in singular learning theory. #' The Journal of Machine Learning Research, 11, 3571-3594. #' #' @importFrom loo loo is.loo #' @export loo #' @export loo.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist( criterion = "loo", pointwise, compare, resp, k_threshold, save_psis, moment_match, reloo, moment_match_args, reloo_args ) do_call(compute_loolist, args) } #' @export LOO.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, moment_match = FALSE, reloo = FALSE, k_threshold = 0.7, save_psis = FALSE, moment_match_args = list(), reloo_args = list(), model_names = NULL) { cl <- match.call() cl[[1]] <- quote(loo) eval(cl, parent.frame()) } #' @export LOO <- function(x, ...) { UseMethod("LOO") } #' Widely Applicable Information Criterion (WAIC) #' #' Compute the widely applicable information criterion (WAIC) #' based on the posterior likelihood using the \pkg{loo} package. #' For more details see \code{\link[loo:waic]{waic}}. #' #' @aliases waic WAIC WAIC.brmsfit #' #' @inheritParams loo.brmsfit #' #' @details See \code{\link{loo_compare}} for details on model comparisons. #' For \code{brmsfit} objects, \code{WAIC} is an alias of \code{waic}. #' Use method \code{\link[brms:add_criterion]{add_criterion}} to store #' information criteria in the fitted model object for later usage. #' #' @return If just one object is provided, an object of class \code{loo}. #' If multiple objects are provided, an object of class \code{loolist}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (waic1 <- waic(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (waic2 <- waic(fit2)) #' #' # compare both models #' loo_compare(waic1, waic2) #' } #' #' @references #' Vehtari, A., Gelman, A., & Gabry J. (2016). Practical Bayesian model #' evaluation using leave-one-out cross-validation and WAIC. In Statistics #' and Computing, doi:10.1007/s11222-016-9696-4. arXiv preprint arXiv:1507.04544. #' #' Gelman, A., Hwang, J., & Vehtari, A. (2014). #' Understanding predictive information criteria for Bayesian models. #' Statistics and Computing, 24, 997-1016. #' #' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation #' and widely applicable information criterion in singular learning theory. #' The Journal of Machine Learning Research, 11, 3571-3594. #' #' @importFrom loo waic #' @export waic #' @export waic.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist(criterion = "waic", pointwise, compare, resp) do_call(compute_loolist, args) } #' @export WAIC.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, pointwise = FALSE, model_names = NULL) { cl <- match.call() cl[[1]] <- quote(waic) eval(cl, parent.frame()) } #' @export WAIC <- function(x, ...) { UseMethod("WAIC") } # helper function used to create (lists of) 'loo' objects # @param models list of brmsfit objects # @param criterion name of the criterion to compute # @param use_stored use precomputed criterion objects if possible? # @param compare compare models using 'loo_compare'? # @param ... more arguments passed to compute_loo # @return If length(models) > 1 an object of class 'loolist' # If length(models) == 1 an object of class 'loo' compute_loolist <- function(models, criterion, use_stored = TRUE, compare = TRUE, ...) { criterion <- match.arg(criterion, loo_criteria()) args <- nlist(criterion, ...) for (i in seq_along(models)) { models[[i]] <- restructure(models[[i]]) } if (length(models) > 1L) { if (!match_nobs(models)) { stop2("Models have different number of observations.") } if (length(use_stored) == 1L) { use_stored <- rep(use_stored, length(models)) } out <- list(loos = named_list(names(models))) for (i in seq_along(models)) { args$x <- models[[i]] args$model_name <- names(models)[i] args$use_stored <- use_stored[i] out$loos[[i]] <- do_call(compute_loo, args) } compare <- as_one_logical(compare) if (compare) { out$diffs <- loo_compare(out$loos) # for backwards compatibility; remove in brms 3.0 out$ic_diffs__ <- SW(compare_ic(x = out$loos))$ic_diffs__ } class(out) <- "loolist" } else { args$x <- models[[1]] args$model_name <- names(models) args$use_stored <- use_stored out <- do_call(compute_loo, args) } out } # compute model fit criteria using the 'loo' package # @param x an object of class brmsfit # @param criterion the criterion to be computed # @param newdata optional data.frame of new data # @param resp optional names of the predicted response variables # @param model_name original variable name of object 'x' # @param use_stored use precomputed criterion objects if possible? # @param ... passed to the individual methods # @return an object of class 'loo' compute_loo <- function(x, criterion, newdata = NULL, resp = NULL, model_name = "", use_stored = TRUE, ...) { criterion <- match.arg(criterion, loo_criteria()) model_name <- as_one_character(model_name) use_stored <- as_one_logical(use_stored) out <- get_criterion(x, criterion) if (!(use_stored && is.loo(out))) { args <- nlist(x, newdata, resp, model_name, ...) out <- do_call(paste0(".", criterion), args) attr(out, "yhash") <- hash_response(x, newdata = newdata, resp = resp) } attr(out, "model_name") <- model_name out } # possible criteria to evaluate via the loo package loo_criteria <- function() { c("loo", "waic", "psis", "kfold", "loo_subsample") } # compute 'loo' criterion using the 'loo' package .loo <- function(x, pointwise, k_threshold, moment_match, reloo, moment_match_args, reloo_args, newdata, resp, model_name, save_psis, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = pointwise, save_psis = save_psis, ... ) out <- SW(do_call("loo", loo_args, pkg = "loo")) if (moment_match) { c(moment_match_args) <- nlist( x, loo = out, newdata, resp, k_threshold, check = FALSE, ... ) out <- do_call("loo_moment_match", moment_match_args) } if (reloo) { c(reloo_args) <- nlist( x, loo = out, newdata, resp, k_threshold, check = FALSE, ... ) out <- do_call("reloo", reloo_args) } recommend_loo_options(out, k_threshold, moment_match, model_name) out } # compute 'waic' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .waic <- function(x, pointwise, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = pointwise, ... ) do_call("waic", loo_args, pkg = "loo") } # compute 'psis' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .psis <- function(x, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = FALSE, ... ) loo_args$log_ratios <- -loo_args$x loo_args$x <- NULL do_call("psis", loo_args, pkg = "loo") } # prepare arguments passed to the methods of the `loo` package prepare_loo_args <- function(x, newdata, resp, pointwise, ...) { pointwise <- as_one_logical(pointwise) loo_args <- list(...) ll_args <- nlist(object = x, newdata, resp, pointwise, ...) loo_args$x <- do_call(log_lik, ll_args) if (pointwise) { loo_args$draws <- attr(loo_args$x, "draws") loo_args$data <- attr(loo_args$x, "data") } # compute pointwise relative efficiencies r_eff_args <- loo_args r_eff_args$fit <- x loo_args$r_eff <- do_call(r_eff_log_lik, r_eff_args) loo_args } #' Model comparison with the \pkg{loo} package #' #' For more details see \code{\link[loo:loo_compare]{loo_compare}}. #' #' @aliases loo_compare #' #' @inheritParams loo.brmsfit #' @param ... More \code{brmsfit} objects. #' @param criterion The name of the criterion to be extracted #' from \code{brmsfit} objects. #' #' @details All \code{brmsfit} objects should contain precomputed #' criterion objects. See \code{\link{add_criterion}} for more help. #' #' @return An object of class "\code{compare.loo}". #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' fit1 <- add_criterion(fit1, "waic") #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' fit2 <- add_criterion(fit2, "waic") #' #' # compare both models #' loo_compare(fit1, fit2, criterion = "waic") #' } #' #' @importFrom loo loo_compare #' @export loo_compare #' @export loo_compare.brmsfit <- function(x, ..., criterion = c("loo", "waic", "kfold"), model_names = NULL) { criterion <- match.arg(criterion) models <- split_dots(x, ..., model_names = model_names, other = FALSE) loos <- named_list(names(models)) for (i in seq_along(models)) { models[[i]] <- restructure(models[[i]]) loos[[i]] <- get_criterion(models[[i]], criterion) if (is.null(loos[[i]])) { stop2( "Model '", names(models)[i], "' does not contain a precomputed '", criterion, "' criterion. See ?loo_compare.brmsfit for help." ) } } loo_compare(loos) } #' Model averaging via stacking or pseudo-BMA weighting. #' #' Compute model weights for \code{brmsfit} objects via stacking #' or pseudo-BMA weighting. For more details, see #' \code{\link[loo:loo_model_weights]{loo::loo_model_weights}}. #' #' @aliases loo_model_weights #' #' @inheritParams loo.brmsfit #' #' @return A named vector of model weights. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler, family = "gaussian") #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "gaussian") #' loo_model_weights(fit1, fit2) #' } #' #' @method loo_model_weights brmsfit #' @importFrom loo loo_model_weights #' @export loo_model_weights #' @export loo_model_weights.brmsfit <- function(x, ..., model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL log_lik_list <- lapply(models, function(x) do_call(log_lik, c(list(x), args)) ) args$x <- log_lik_list args$r_eff_list <- mapply( r_eff_log_lik, log_lik_list, fit = models, SIMPLIFY = FALSE ) out <- do_call(loo::loo_model_weights, args) names(out) <- names(models) out } #' Add model fit criteria to model objects #' #' @param x An \R object typically of class \code{brmsfit}. #' @param criterion Names of model fit criteria #' to compute. Currently supported are \code{"loo"}, #' \code{"waic"}, \code{"kfold"}, \code{"loo_subsample"}, #' \code{"bayes_R2"} (Bayesian R-squared), #' \code{"loo_R2"} (LOO-adjusted R-squared), and #' \code{"marglik"} (log marginal likelihood). #' @param model_name Optional name of the model. If \code{NULL} #' (the default) the name is taken from the call to \code{x}. #' @param overwrite Logical; Indicates if already stored fit #' indices should be overwritten. Defaults to \code{FALSE}. #' @param file Either \code{NULL} or a character string. In the latter case, the #' fitted model object including the newly added criterion values is saved via #' \code{\link{saveRDS}} in a file named after the string supplied in #' \code{file}. The \code{.rds} extension is added automatically. If \code{x} #' was already stored in a file before, the file name will be reused #' automatically (with a message) unless overwritten by \code{file}. In any #' case, \code{file} only applies if new criteria were actually added via #' \code{add_criterion} or if \code{force_save} was set to \code{TRUE}. #' @param force_save Logical; only relevant if \code{file} is specified and #' ignored otherwise. If \code{TRUE}, the fitted model object will be saved #' regardless of whether new criteria were added via \code{add_criterion}. #' @param ... Further arguments passed to the underlying #' functions computing the model fit criteria. #' #' @return An object of the same class as \code{x}, but #' with model fit criteria added for later usage. #' #' @details Functions \code{add_loo} and \code{add_waic} are aliases of #' \code{add_criterion} with fixed values for the \code{criterion} argument. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ Trt, data = epilepsy) #' # add both LOO and WAIC at once #' fit <- add_criterion(fit, c("loo", "waic")) #' print(fit$criteria$loo) #' print(fit$criteria$waic) #' } #' #' @export add_criterion <- function(x, ...) { UseMethod("add_criterion") } #' @rdname add_criterion #' @export add_criterion.brmsfit <- function(x, criterion, model_name = NULL, overwrite = FALSE, file = NULL, force_save = FALSE, ...) { if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse_combine(substitute(x)) } criterion <- unique(as.character(criterion)) if (any(criterion == "R2")) { # deprecated as of version 2.10.4 warning2("Criterion 'R2' is deprecated. Please use 'bayes_R2' instead.") criterion[criterion == "R2"] <- "bayes_R2" } loo_options <- c("loo", "waic", "kfold", "loo_subsample") options <- c(loo_options, "bayes_R2", "loo_R2", "marglik") if (!length(criterion) || !all(criterion %in% options)) { stop2("Argument 'criterion' should be a subset of ", collapse_comma(options)) } auto_save <- FALSE if (!is.null(file)) { file <- paste0(as_one_character(file), ".rds") } else { file <- x$file if (!is.null(file)) auto_save <- TRUE } force_save <- as_one_logical(force_save) overwrite <- as_one_logical(overwrite) if (overwrite) { # recompute all criteria new_criteria <- criterion } else { # only computed criteria not already stored new_criteria <- criterion[ulapply(x$criteria[criterion], is.null)] } args <- list(x, ...) for (fun in intersect(new_criteria, loo_options)) { args$model_names <- model_name x$criteria[[fun]] <- do_call(fun, args) } if ("bayes_R2" %in% new_criteria) { args$summary <- FALSE x$criteria$bayes_R2 <- do_call(bayes_R2, args) } if ("loo_R2" %in% new_criteria) { args$summary <- FALSE x$criteria$loo_R2 <- do_call(loo_R2, args) } if ("marglik" %in% new_criteria) { x$criteria$marglik <- do_call(bridge_sampler, args) } if (!is.null(file) && (force_save || length(new_criteria))) { if (auto_save) { message("Automatically saving the model object in '", file, "'") } x$file <- file saveRDS(x, file = file) } x } # extract a recomputed model fit criterion get_criterion <- function(x, criterion) { stopifnot(is.brmsfit(x)) criterion <- as_one_character(criterion) x$criteria[[criterion]] } # create a hash based on the response of a model hash_response <- function(x, newdata = NULL, resp = NULL, ...) { require_package("digest") stopifnot(is.brmsfit(x)) sdata <- standata( x, newdata = newdata, re_formula = NA, internal = TRUE, check_response = TRUE, only_response = TRUE ) add_funs <- lsp("brms", what = "exports", pattern = "^resp_") regex <- c("Y", sub("^resp_", "", add_funs)) regex <- outer(regex, escape_all(usc(resp)), FUN = paste0) regex <- paste0("(", as.vector(regex), ")", collapse = "|") regex <- paste0("^(", regex, ")(_|$)") out <- sdata[grepl(regex, names(sdata))] out <- as.matrix(as.data.frame(rmNULL(out))) out <- p(out, attr(sdata, "old_order")) # see issue #642 attributes(out) <- NULL digest::sha1(x = out, ...) } # compare the response parts of multiple brmsfit objects # @param models A list of brmsfit objects # @param ... passed to hash_response # @return TRUE if the response parts of all models match and FALSE otherwise match_response <- function(models, ...) { if (length(models) <= 1L) { out <- TRUE } else { yhash <- lapply(models, hash_response, ...) yhash_check <- ulapply(yhash, is_equal, yhash[[1]]) if (all(yhash_check)) { out <- TRUE } else { out <- FALSE } } out } # compare number of observations of multipe models # @param models A list of brmsfit objects # @param ... currently ignored # @return TRUE if the number of rows match match_nobs <- function(models, ...) { if (length(models) <= 1L) { out <- TRUE } else { nobs <- lapply(models, nobs) nobs_check <- ulapply(nobs, is_equal, nobs[[1]]) if (all(nobs_check)) { out <- TRUE } else { out <- FALSE } } out } # validate models passed to loo and related methods # @param models list of fitted model objects # @param model_names names specified by the user # @param sub_names names inferred by substitute() validate_models <- function(models, model_names, sub_names) { stopifnot(is.list(models)) model_names <- as.character(model_names) if (!length(model_names)) { model_names <- as.character(sub_names) } if (length(model_names) != length(models)) { stop2("Number of model names is not equal to the number of models.") } names(models) <- model_names for (i in seq_along(models)) { if (!is.brmsfit(models[[i]])) { stop2("Object '", names(models)[i], "' is not of class 'brmsfit'.") } } models } # recommend options if approximate loo fails for some observations # @param moment_match has moment matching already been performed? recommend_loo_options <- function(loo, k_threshold, moment_match = FALSE, model_name = "") { if (isTRUE(nzchar(model_name))) { model_name <- paste0(" in model '", model_name, "'") } else { model_name <- "" } n <- length(loo::pareto_k_ids(loo, threshold = k_threshold)) if (!moment_match && n > 0) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". It is recommended to set 'moment_match = TRUE' in order ", "to perform moment matching for problematic observations. " ) out <- "loo_moment_match" } else if (n > 0 && n <= 10) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". It is recommended to set 'reloo = TRUE' in order to ", "calculate the ELPD without the assumption that these observations " , "are negligible. This will refit the model ", n, " times to compute ", "the ELPDs for the problematic observations directly." ) out <- "reloo" } else if (n > 10) { warning2( "Found ", n, " observations with a pareto_k > ", k_threshold, model_name, ". With this many problematic observations, it may be more ", "appropriate to use 'kfold' with argument 'K = 10' to perform ", "10-fold cross-validation rather than LOO." ) out <- "kfold" } else { out <- "loo" } invisible(out) } # helper function to compute relative efficiences # @param x matrix of posterior draws # @param fit a brmsfit object to extract metadata from # @param allow_na allow NA values in the output? # @return a numeric vector of length NCOL(x) r_eff_helper <- function(x, chain_id, allow_na = TRUE, ...) { out <- loo::relative_eff(x, chain_id = chain_id, ...) if (!allow_na && anyNA(out)) { # avoid error in loo if some but not all r_effs are NA out <- rep(1, length(out)) warning2( "Ignoring relative efficiencies as some were NA. ", "See argument 'r_eff' in ?loo::loo for more details." ) } out } # wrapper around r_eff_helper to compute efficiency # of likelihood draws based on log-likelihood draws r_eff_log_lik <- function(x, ...) { UseMethod("r_eff_log_lik") } #' @export r_eff_log_lik.matrix <- function(x, fit, allow_na = FALSE, ...) { if (is.brmsfit_multiple(fit)) { # due to stacking of chains from multiple models # efficiency computations will likely be incorrect # assume relative efficiency of 1 for now return(rep(1, ncol(x))) } chain_id <- get_chain_id(nrow(x), fit) r_eff_helper(exp(x), chain_id = chain_id, allow_na = allow_na, ...) } #' @export r_eff_log_lik.function <- function(x, fit, draws, allow_na = FALSE, ...) { if (is.brmsfit_multiple(fit)) { # due to stacking of chains from multiple models # efficiency computations will likely be incorrect # assume relative efficiency of 1 for now return(rep(1, draws$nobs)) } lik_fun <- function(data_i, draws, ...) { exp(x(data_i, draws, ...)) } chain_id <- get_chain_id(draws$ndraws, fit) r_eff_helper( lik_fun, chain_id = chain_id, draws = draws, allow_na = allow_na, ... ) } # get chain IDs per posterior draw get_chain_id <- function(ndraws, fit) { if (ndraws != ndraws(fit)) { # don't know the chain IDs of a subset of draws chain_id <- rep(1L, ndraws) } else { nchains <- fit$fit@sim$chains chain_id <- rep(seq_len(nchains), each = ndraws / nchains) } chain_id } # print the output of a list of loo objects #' @export print.loolist <- function(x, digits = 1, ...) { model_names <- loo::find_model_names(x$loos) for (i in seq_along(x$loos)) { cat(paste0("Output of model '", model_names[i], "':\n")) print(x$loos[[i]], digits = digits, ...) cat("\n") } if (!is.null(x$diffs)) { cat("Model comparisons:\n") print(x$diffs, digits = digits, ...) } invisible(x) } # ---------- deprecated functions ---------- #' @rdname add_ic #' @export add_loo <- function(x, model_name = NULL, ...) { warning2("'add_loo' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse_combine(substitute(x)) } add_criterion(x, criterion = "loo", model_name = model_name, ...) } #' @rdname add_ic #' @export add_waic <- function(x, model_name = NULL, ...) { warning2("'add_waic' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse_combine(substitute(x)) } add_criterion(x, criterion = "waic", model_name = model_name, ...) } #' Add model fit criteria to model objects #' #' Deprecated aliases of \code{\link{add_criterion}}. #' #' @inheritParams add_criterion #' @param ic,value Names of model fit criteria #' to compute. Currently supported are \code{"loo"}, #' \code{"waic"}, \code{"kfold"}, \code{"R2"} (R-squared), and #' \code{"marglik"} (log marginal likelihood). #' #' @return An object of the same class as \code{x}, but #' with model fit criteria added for later usage. #' Previously computed criterion objects will be overwritten. #' #' @export add_ic <- function(x, ...) { UseMethod("add_ic") } #' @rdname add_ic #' @export add_ic.brmsfit <- function(x, ic = "loo", model_name = NULL, ...) { warning2("'add_ic' is deprecated. Please use 'add_criterion' instead.") if (!is.null(model_name)) { model_name <- as_one_character(model_name) } else { model_name <- deparse_combine(substitute(x)) } add_criterion(x, criterion = ic, model_name = model_name, ...) } #' @rdname add_ic #' @export 'add_ic<-' <- function(x, ..., value) { add_ic(x, ic = value, ...) } #' Compare Information Criteria of Different Models #' #' Compare information criteria of different models fitted #' with \code{\link{waic}} or \code{\link{loo}}. #' Deprecated and will be removed in the future. Please use #' \code{\link{loo_compare}} instead. #' #' @param ... At least two objects returned by #' \code{\link{waic}} or \code{\link{loo}}. #' Alternatively, \code{brmsfit} objects with information #' criteria precomputed via \code{\link{add_ic}} #' may be passed, as well. #' @param x A \code{list} containing the same types of objects as #' can be passed via \code{...}. #' @param ic The name of the information criterion to be extracted #' from \code{brmsfit} objects. Ignored if information #' criterion objects are only passed directly. #' #' @return An object of class \code{iclist}. #' #' @details See \code{\link{loo_compare}} for the recommended way #' of comparing models with the \pkg{loo} package. #' #' @seealso #' \code{\link{loo}}, #' \code{\link{loo_compare}} #' \code{\link{add_criterion}} #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' waic1 <- waic(fit1) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' waic2 <- waic(fit2) #' #' # compare both models #' compare_ic(waic1, waic2) #' } #' #' @export compare_ic <- function(..., x = NULL, ic = c("loo", "waic", "kfold")) { # will be removed in brms 3.0 warning2( "'compare_ic' is deprecated and will be removed ", "in the future. Please use 'loo_compare' instead." ) ic <- match.arg(ic) if (!(is.null(x) || is.list(x))) { stop2("Argument 'x' should be a list.") } x$ic_diffs__ <- NULL x <- c(list(...), x) for (i in seq_along(x)) { # extract precomputed values from brmsfit objects if (is.brmsfit(x[[i]]) && !is.null(x[[i]][[ic]])) { x[[i]] <- x[[i]][[ic]] } } if (!all(sapply(x, inherits, "loo"))) { stop2("All inputs should have class 'loo' ", "or contain precomputed 'loo' objects.") } if (length(x) < 2L) { stop2("Expecting at least two objects.") } ics <- unname(sapply(x, function(y) rownames(y$estimates)[3])) if (!all(ics %in% ics[1])) { stop2("All inputs should be from the same criterion.") } yhash <- lapply(x, attr, which = "yhash") yhash_check <- ulapply(yhash, is_equal, yhash[[1]]) if (!all(yhash_check)) { warning2( "Model comparisons are likely invalid as the response ", "values of at least two models do not match." ) } names(x) <- loo::find_model_names(x) n_models <- length(x) ic_diffs <- matrix(0, nrow = n_models * (n_models - 1) / 2, ncol = 2) rnames <- rep("", nrow(ic_diffs)) # pairwise comparision to get differences in ICs and their SEs n <- 1 for (i in seq_len(n_models - 1)) { for (j in (i + 1):n_models) { tmp <- SW(loo::compare(x[[j]], x[[i]])) ic_diffs[n, ] <- c(-2 * tmp[["elpd_diff"]], 2 * tmp[["se"]]) rnames[n] <- paste(names(x)[i], "-", names(x)[j]) n <- n + 1 } } rownames(ic_diffs) <- rnames colnames(ic_diffs) <- c(toupper(ics[1]), "SE") x$ic_diffs__ <- ic_diffs class(x) <- "iclist" x } # print the output of LOO and WAIC with multiple models # deprecated as of brms > 2.5.0 and will be removed in brms 3.0 #' @export print.iclist <- function(x, digits = 2, ...) { m <- x m$ic_diffs__ <- NULL if (length(m)) { ic <- rownames(m[[1]]$estimates)[3] mat <- matrix(0, nrow = length(m), ncol = 2) dimnames(mat) <- list(names(m), c(toupper(ic), "SE")) for (i in seq_along(m)) { mat[i, ] <- m[[i]]$estimates[3, ] } } else { mat <- ic <- NULL } ic_diffs <- x$ic_diffs__ if (is.matrix(attr(x, "compare"))) { # deprecated as of brms 1.4.0 ic_diffs <- attr(x, "compare") } if (is.matrix(ic_diffs)) { # models were compared using the compare_ic function mat <- rbind(mat, ic_diffs) } print(round(mat, digits = digits), na.print = "") invisible(x) } brms/R/exclude_terms.R0000644000175000017500000000300513701270367014542 0ustar nileshnilesh# exclude predictor terms from being evaluated exclude_terms <- function(x, ...) { UseMethod("exclude_terms") } #' @export exclude_terms.brmsfit <- function(x, ...) { x$formula <- exclude_terms(x$formula, ...) x } #' @export exclude_terms.mvbrmsformula <- function(x, ...) { for (i in seq_along(x$forms)) { x$forms[[i]] <- exclude_terms(x$forms[[i]], ...) } x } #' @export exclude_terms.brmsformula <- function( x, excl_term_types = NULL, incl_autocor = TRUE, smooths_only = FALSE, offset = TRUE, ... ) { excl_term_types <- as.character(excl_term_types) # TODO: deprecate the three arguments below? incl_autocor <- as_one_logical(incl_autocor) smooths_only <- as_one_logical(smooths_only) offset <- as_one_logical(offset) if (!incl_autocor) { c(excl_term_types) <- "ac" } if (!offset) { c(excl_term_types) <- "offset" } if (smooths_only) { excl_term_types <- setdiff(all_term_types(), "sm") } if (!length(excl_term_types)) { return(x) } invalid_types <- setdiff(excl_term_types, all_term_types()) if (length(invalid_types)) { stop2("The following term types are invalid: ", collapse_comma(invalid_types)) } attr(x$formula, "excl_term_types") <- excl_term_types for (i in seq_along(x$pforms)) { attr(x$pforms[[i]], "excl_term_types") <- excl_term_types } x } # extract names of excluded term types excluded_term_types <- function(x) { as.character(attr(x, "excl_term_types", TRUE)) } brms/R/prior_draws.R0000644000175000017500000001223614111751666014242 0ustar nileshnilesh#' Extract Prior Draws #' #' Extract prior draws of specified parameters #' #' @aliases prior_draws.brmsfit prior_samples #' #' @param x An \code{R} object typically of class \code{brmsfit}. #' @inheritParams as.data.frame.brmsfit #' @param ... Arguments passed to individual methods (if applicable). #' #' @details To make use of this function, the model must contain draws of #' prior distributions. This can be ensured by setting \code{sample_prior = #' TRUE} in function \code{brm}. Priors of certain parameters cannot be saved #' for technical reasons. For instance, this is the case for the #' population-level intercept, which is only computed after fitting the model #' by default. If you want to treat the intercept as part of all the other #' regression coefficients, so that sampling from its prior becomes possible, #' use \code{... ~ 0 + Intercept + ...} in the formulas. #' #' @return A \code{data.frame} containing the prior draws. #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative", #' prior = set_prior("normal(0,2)", class = "b"), #' sample_prior = TRUE) #' #' # extract all prior draws #' draws1 <- prior_draws(fit) #' head(draws1) #' #' # extract prior draws for the coefficient of 'treat' #' draws2 <- prior_draws(fit, "b_treat") #' head(draws2) #' } #' #' @export prior_draws.brmsfit <- function(x, variable = NULL, pars = NULL, ...) { variable <- use_alias(variable, pars) if (!is.null(variable)) { variable <- as.character(variable) } all_names <- variables(x) prior_names <- unique(all_names[grepl("^prior_", all_names)]) if (!length(prior_names)) { return(data.frame(NULL)) } draws <- as.data.frame(x, variable = prior_names) names(draws) <- sub("^prior_", "", prior_names) if (is.null(variable)) { return(draws) } # get prior draws for a single variable .prior_draws <- function(variable) { matches <- paste0("^", escape_all(names(draws))) matches <- lapply(matches, regexpr, text = variable) matches <- ulapply(matches, attr, which = "match.length") if (max(matches) == -1 || ignore_prior(x, variable)) { out <- NULL } else { take <- match(max(matches), matches) # order draws randomly to avoid artificial dependencies # between parameters using the same prior draws draws <- list(draws[sample(ndraws(x)), take]) out <- structure(draws, names = variable) } return(out) } draws <- rmNULL(lapply(variable, .prior_draws)) draws <- data.frame(draws, check.names = FALSE) draws } #' @rdname prior_draws.brmsfit #' @export prior_draws <- function(x, ...) { UseMethod("prior_draws") } #' @export prior_draws.default <- function(x, variable = NULL, pars = NULL, regex = FALSE, fixed = FALSE, ...) { call <- match.call() if ("pars" %in% names(call)) { variable <- use_alias(variable, pars) regex <- !as_one_logical(fixed) } if (is.null(variable)) { variable <- "^prior_" regex <- TRUE } else { variable <- as.character(variable) regex <- as_one_logical(regex) if (regex) { hat <- substr(variable, 1, 1) == "^" variable <- ifelse(hat, substr(variable, 2, nchar(variable)), variable) variable <- paste0("^prior_", variable) } else { variable <- paste0("prior_", variable) } } x <- as_draws_df(as.data.frame(x)) if (!regex) { # missing variables will leads to an error in posterior variable <- intersect(variable, variables(x)) if (!length(variable)) { return(data.frame(NULL)) } } x <- subset_draws(x, variable = variable, regex = regex, ...) unclass_draws(x) } #' @rdname prior_draws.brmsfit #' @export prior_samples <- function(x, ...) { warning2("'prior_samples' is deprecated. Please use 'prior_draws' instead.") UseMethod("prior_draws") } # ignore priors of certain parameters from whom we cannot obtain prior draws # currently applies only to overall intercepts of centered design matrices # fixes issue #696 # @param x a brmsfit object # @param variable name of a single variable # @return TRUE (if the prior should be ignored) or FALSE ignore_prior <- function(x, variable) { stopifnot(is.brmsfit(x)) variable <- as_one_character(variable) out <- FALSE if (grepl("^b_.*Intercept($|\\[)", variable)) { # cannot sample from intercepts if 'center' was TRUE intercept_priors <- subset2(x$prior, class = "Intercept") if (NROW(intercept_priors)) { # prefixes of the model intercepts p_intercepts <- usc(combine_prefix(intercept_priors)) # prefix of the parameter under question p_par <- sub("^b", "", variable) p_par <- sub("_Intercept($|\\[)", "", p_par) out <- p_par %in% p_intercepts if (out) { warning2( "Sampling from the prior of an overall intercept is not ", "possible by default. See the documentation of the ", "'sample_prior' argument in help('brm')." ) } } } out } brms/R/launch_shinystan.R0000644000175000017500000000355614111751666015266 0ustar nileshnilesh#' Interface to \pkg{shinystan} #' #' Provide an interface to \pkg{shinystan} for models fitted with \pkg{brms} #' #' @aliases launch_shinystan #' #' @param object A fitted model object typically of class \code{brmsfit}. #' @param rstudio Only relevant for RStudio users. #' The default (\code{rstudio=FALSE}) is to launch the app #' in the default web browser rather than RStudio's pop-up Viewer. #' Users can change the default to \code{TRUE} #' by setting the global option \cr \code{options(shinystan.rstudio = TRUE)}. #' @param ... Optional arguments to pass to \code{\link[shiny:runApp]{runApp}} #' #' @return An S4 shinystan object #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "gaussian") #' launch_shinystan(fit) #' } #' #' @seealso \code{\link[shinystan:launch_shinystan]{launch_shinystan}} #' #' @method launch_shinystan brmsfit #' @importFrom shinystan launch_shinystan #' @export launch_shinystan #' @export launch_shinystan.brmsfit <- function( object, rstudio = getOption("shinystan.rstudio"), ... ) { contains_draws(object) if (object$algorithm != "sampling") { return(shinystan::launch_shinystan(object$fit, rstudio = rstudio, ...)) } draws <- as.array(object) sampler_params <- rstan::get_sampler_params(object$fit, inc_warmup = FALSE) control <- object$fit@stan_args[[1]]$control if (is.null(control)) { max_td <- 10 } else { max_td <- control$max_treedepth if (is.null(max_td)) { max_td <- 10 } } sso <- shinystan::as.shinystan( X = draws, model_name = object$fit@model_name, warmup = 0, sampler_params = sampler_params, max_treedepth = max_td, algorithm = "NUTS" ) shinystan::launch_shinystan(sso, rstudio = rstudio, ...) } brms/R/predictor.R0000644000175000017500000004217414111751666013706 0ustar nileshnilesh# compute predictor terms predictor <- function(prep, ...) { UseMethod("predictor") } # compute linear/additive predictor terms # @param prep a list generated by prepare_predictions containing # all required data and posterior draws # @param i An optional vector indicating the observation(s) # for which to compute eta. If NULL, eta is computed # for all all observations at once. # @param fprep Optional full brmsprep object of the model. # Currently only needed in non-linear models or for # predicting new data in models with autocorrelation. # @return Usually an S x N matrix where S is the number of draws # and N is the number of observations or length of i if specified. #' @export predictor.bprepl <- function(prep, i = NULL, fprep = NULL, ...) { nobs <- ifelse(!is.null(i), length(i), prep$nobs) eta <- matrix(0, nrow = prep$ndraws, ncol = nobs) + predictor_fe(prep, i) + predictor_re(prep, i) + predictor_sp(prep, i) + predictor_sm(prep, i) + predictor_gp(prep, i) + predictor_offset(prep, i, nobs) # some autocorrelation structures depend on eta eta <- predictor_ac(eta, prep, i, fprep = fprep) # intentionally last as it may return 3D arrays eta <- predictor_cs(eta, prep, i) unname(eta) } # compute non-linear predictor terms # @param prep a list generated by prepare_predictions containing # all required data and posterior draws # @param i An optional vector indicating the observation(s) # for which to compute eta. If NULL, eta is computed # for all all observations at once. # @param ... further arguments passed to predictor.bprepl # @return Usually an S x N matrix where S is the number of draws # and N is the number of observations or length of i if specified. #' @export predictor.bprepnl <- function(prep, i = NULL, fprep = NULL, ...) { stopifnot(!is.null(fprep)) nlpars <- prep$used_nlpars covars <- names(prep$C) args <- named_list(c(nlpars, covars)) for (nlp in nlpars) { args[[nlp]] <- get_nlpar(fprep, nlpar = nlp, i = i, ...) } for (cov in covars) { args[[cov]] <- p(prep$C[[cov]], i, row = FALSE) } dim_eta <- dim(rmNULL(args)[[1]]) # evaluate non-linear predictor if (!prep$loop) { # cannot reasonably vectorize over posterior draws # when 'nlform' must be evaluated jointly across observations # and hence 'loop' had been set to FALSE for (i in seq_along(args)) { args[[i]] <- split(args[[i]], row(args[[i]])) } .fun <- function(...) eval(prep$nlform, list(...)) eta <- try( t(do_call(mapply, c(list(FUN = .fun, SIMPLIFY = "array"), args))), silent = TRUE ) } else { # assumes fully vectorized version of 'nlform' eta <- try(eval(prep$nlform, args), silent = TRUE) } if (is(eta, "try-error")) { if (grepl("could not find function", eta)) { eta <- rename(eta, "Error in eval(expr, envir, enclos) : ", "") vectorize <- str_if(prep$loop, ", vectorize = TRUE") message( eta, " Most likely this is because you used a Stan ", "function in the non-linear model formula that ", "is not defined in R. If this is a user-defined function, ", "please run 'expose_functions(.", vectorize, ")' ", "on your fitted model and try again." ) } else { eta <- rename(eta, "^Error :", "", fixed = FALSE) stop2(eta) } } dim(eta) <- dim_eta unname(eta) } # compute eta for overall effects predictor_fe <- function(prep, i) { fe <- prep[["fe"]] if (!isTRUE(ncol(fe[["X"]]) > 0)) { return(0) } eta <- try(.predictor_fe(X = p(fe[["X"]], i), b = fe[["b"]])) if (is(eta, "try-error")) { stop2( "Something went wrong (see the error message above). ", "Perhaps you transformed numeric variables ", "to factors or vice versa within the model formula? ", "If yes, please convert your variables beforehand. ", "Or did you set a predictor variable to NA?" ) } eta } # workhorse function of predictor_fe # @param X fixed effects design matrix # @param b draws of fixed effects coeffients .predictor_fe <- function(X, b) { stopifnot(is.matrix(X)) stopifnot(is.matrix(b)) tcrossprod(b, X) } # compute eta for varying effects predictor_re <- function(prep, i) { eta <- 0 re <- prep[["re"]] group <- names(re[["r"]]) for (g in group) { eta_g <- try(.predictor_re(Z = p(re[["Z"]][[g]], i), r = re[["r"]][[g]])) if (is(eta_g, "try-error")) { stop2( "Something went wrong (see the error message above). ", "Perhaps you transformed numeric variables ", "to factors or vice versa within the model formula? ", "If yes, please convert your variables beforehand. ", "Or did you use a grouping factor also for a different purpose? ", "If yes, please make sure that its factor levels are correct ", "also in the new data you may have provided." ) } eta <- eta + eta_g } eta } # workhorse function of predictor_re # @param Z sparse random effects design matrix # @param r random effects draws # @return linear predictor for random effects .predictor_re <- function(Z, r) { Matrix::as.matrix(Matrix::tcrossprod(r, Z)) } # compute eta for special effects terms predictor_sp <- function(prep, i) { eta <- 0 sp <- prep[["sp"]] if (!length(sp)) { return(eta) } eval_list <- list() for (j in seq_along(sp[["simo"]])) { eval_list[[paste0("Xmo_", j)]] <- p(sp[["Xmo"]][[j]], i) eval_list[[paste0("simo_", j)]] <- sp[["simo"]][[j]] } for (j in seq_along(sp[["Xme"]])) { eval_list[[paste0("Xme_", j)]] <- p(sp[["Xme"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["Yl"]])) { eval_list[[names(sp[["Yl"]])[j]]] <- p(sp[["Yl"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["idxl"]])) { eval_list[[names(sp[["idxl"]])[j]]] <- p(sp[["idxl"]][[j]], i, row = FALSE) } for (j in seq_along(sp[["Csp"]])) { eval_list[[paste0("Csp_", j)]] <- p(sp[["Csp"]][[j]], i, row = FALSE) } re <- prep[["re"]] coef <- colnames(sp[["bsp"]]) for (j in seq_along(coef)) { # prepare special group-level effects rsp <- named_list(names(re[["rsp"]][[coef[j]]])) for (g in names(rsp)) { rsp[[g]] <- .predictor_re( Z = p(re[["Zsp"]][[g]], i), r = re[["rsp"]][[coef[j]]][[g]] ) } eta <- eta + .predictor_sp( eval_list, call = sp[["calls"]][[j]], b = sp[["bsp"]][, j], r = Reduce("+", rsp) ) } eta } # workhorse function of predictor_sp # @param call expression for evaluation of special effects # @param eval_list list containing variables for 'call' # @param b special effects coefficients draws # @param r matrix with special effects group-level draws .predictor_sp <- function(eval_list, call, b, r = NULL) { b <- as.vector(b) if (is.null(r)) r <- 0 (b + r) * eval(call, eval_list) } # R implementation of the user defined Stan function 'mo' # @param simplex posterior draws of a simplex parameter vector # @param X variable modeled as monotonic .mo <- function(simplex, X) { stopifnot(is.matrix(simplex), is.atomic(X)) D <- NCOL(simplex) simplex <- cbind(0, simplex) for (i in seq_cols(simplex)[-1]) { # compute the cumulative representation of the simplex simplex[, i] <- simplex[, i] + simplex[, i - 1] } D * simplex[, X + 1] } # compute eta for smooth terms predictor_sm <- function(prep, i) { eta <- 0 if (!length(prep[["sm"]])) { return(eta) } fe <- prep[["sm"]]$fe if (length(fe)) { eta <- eta + .predictor_fe(X = p(fe$Xs, i), b = fe$bs) } re <- prep[["sm"]]$re for (k in seq_along(re)) { for (j in seq_along(re[[k]]$s)) { Zs <- p(re[[k]]$Zs[[j]], i) s <- re[[k]]$s[[j]] eta <- eta + .predictor_fe(X = Zs, b = s) } } eta } # compute eta for gaussian processes predictor_gp <- function(prep, i) { if (!length(prep[["gp"]])) { return(0) } if (!is.null(i)) { stop2("Pointwise evaluation is not supported for Gaussian processes.") } eta <- matrix(0, nrow = prep$ndraws, ncol = prep$nobs) for (k in seq_along(prep[["gp"]])) { gp <- prep[["gp"]][[k]] if (isTRUE(attr(gp, "byfac"))) { # categorical 'by' variable for (j in seq_along(gp)) { if (length(gp[[j]][["Igp"]])) { eta[, gp[[j]][["Igp"]]] <- .predictor_gp(gp[[j]]) } } } else { eta <- eta + .predictor_gp(gp) } } eta } # workhorse function of predictor_gp # @param gp a list returned by '.prepare_predictions_gp' # @return A S x N matrix to be added to the linear predictor # @note does not work with pointwise evaluation .predictor_gp <- function(gp) { if (is.null(gp[["slambda"]])) { # predictions for exact GPs ndraws <- length(gp[["sdgp"]]) eta <- as.list(rep(NA, ndraws)) if (!is.null(gp[["x_new"]])) { for (i in seq_along(eta)) { eta[[i]] <- with(gp, .predictor_gp_new( x_new = x_new, yL = yL[i, ], x = x, sdgp = sdgp[i], lscale = lscale[i, ], nug = nug )) } } else { for (i in seq_along(eta)) { eta[[i]] <- with(gp, .predictor_gp_old( x = x, sdgp = sdgp[i], lscale = lscale[i, ], zgp = zgp[i, ], nug = nug )) } } eta <- do_call(rbind, eta) } else { # predictions for approximate GPs eta <- with(gp, .predictor_gpa( x = x, sdgp = sdgp, lscale = lscale, zgp = zgp, slambda = slambda )) } if (!is.null(gp[["Jgp"]])) { eta <- eta[, gp[["Jgp"]], drop = FALSE] } if (!is.null(gp[["Cgp"]])) { eta <- eta * data2draws(gp[["Cgp"]], dim = dim(eta)) } eta } # make exact GP predictions for old data points # vectorized over posterior draws # @param x old predictor values # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param zgp draws of parameter vector zgp # @param nug very small positive value to ensure numerical stability .predictor_gp_old <- function(x, sdgp, lscale, zgp, nug) { Sigma <- cov_exp_quad(x, sdgp = sdgp, lscale = lscale) lx <- nrow(x) Sigma <- Sigma + diag(rep(nug, lx), lx, lx) L_Sigma <- try_nug(t(chol(Sigma)), nug = nug) as.numeric(L_Sigma %*% zgp) } # make exact GP predictions for new data points # vectorized over posterior draws # @param x_new new predictor values # @param yL linear predictor of the old data # @param x old predictor values # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param nug very small positive value to ensure numerical stability .predictor_gp_new <- function(x_new, yL, x, sdgp, lscale, nug) { Sigma <- cov_exp_quad(x, sdgp = sdgp, lscale = lscale) lx <- nrow(x) lx_new <- nrow(x_new) Sigma <- Sigma + diag(rep(nug, lx), lx, lx) L_Sigma <- try_nug(t(chol(Sigma)), nug = nug) L_Sigma_inverse <- solve(L_Sigma) K_div_yL <- L_Sigma_inverse %*% yL K_div_yL <- t(t(K_div_yL) %*% L_Sigma_inverse) k_x_x_new <- cov_exp_quad(x, x_new, sdgp = sdgp, lscale = lscale) mu_yL_new <- as.numeric(t(k_x_x_new) %*% K_div_yL) v_new <- L_Sigma_inverse %*% k_x_x_new cov_yL_new <- cov_exp_quad(x_new, sdgp = sdgp, lscale = lscale) - t(v_new) %*% v_new + diag(rep(nug, lx_new), lx_new, lx_new) yL_new <- try_nug( rmulti_normal(1, mu = mu_yL_new, Sigma = cov_yL_new), nug = nug ) return(yL_new) } # make predictions for approximate GPs # vectorized over posterior draws # @param x matrix of evaluated eigenfunctions of the cov matrix # @param sdgp sample of parameter sdgp # @param lscale sample of parameter lscale # @param zgp draws of parameter vector zgp # @param slambda vector of eigenvalues of the cov matrix # @note no need to differentiate between old and new data points .predictor_gpa <- function(x, sdgp, lscale, zgp, slambda) { spd <- sqrt(spd_cov_exp_quad(slambda, sdgp, lscale)) (spd * zgp) %*% t(x) } # compute eta for category specific effects # @param predictor matrix of other additive terms # @return 3D predictor array in the presence of 'cs' effects # otherwise return 'eta' unchanged predictor_cs <- function(eta, prep, i) { cs <- prep[["cs"]] re <- prep[["re"]] if (!length(cs[["bcs"]]) && !length(re[["rcs"]])) { return(eta) } nthres <- cs[["nthres"]] rcs <- NULL if (!is.null(re[["rcs"]])) { groups <- names(re[["rcs"]]) rcs <- vector("list", nthres) for (k in seq_along(rcs)) { rcs[[k]] <- named_list(groups) for (g in groups) { rcs[[k]][[g]] <- .predictor_re( Z = p(re[["Zcs"]][[g]], i), r = re[["rcs"]][[g]][[k]] ) } rcs[[k]] <- Reduce("+", rcs[[k]]) } } .predictor_cs( eta, X = p(cs[["Xcs"]], i), b = cs[["bcs"]], nthres = nthres, r = rcs ) } # workhorse function of predictor_cs # @param X category specific design matrix # @param b category specific effects draws # @param nthres number of thresholds # @param eta linear predictor matrix # @param r list of draws of cs group-level effects # @return 3D predictor array including category specific effects .predictor_cs <- function(eta, X, b, nthres, r = NULL) { stopifnot(is.null(X) && is.null(b) || is.matrix(X) && is.matrix(b)) nthres <- max(nthres) eta <- predictor_expand(eta, nthres) if (!is.null(X)) { I <- seq(1, (nthres) * ncol(X), nthres) - 1 X <- t(X) } for (k in seq_len(nthres)) { if (!is.null(X)) { eta[, , k] <- eta[, , k] + b[, I + k, drop = FALSE] %*% X } if (!is.null(r[[k]])) { eta[, , k] <- eta[, , k] + r[[k]] } } eta } # expand dimension of the predictor matrix to a 3D array predictor_expand <- function(eta, nthres) { if (length(dim(eta)) == 2L) { eta <- array(eta, dim = c(dim(eta), nthres)) } eta } predictor_offset <- function(prep, i, nobs) { if (is.null(prep$offset)) { return(0) } eta <- rep(p(prep$offset, i), prep$ndraws) matrix(eta, ncol = nobs, byrow = TRUE) } # compute eta for autocorrelation structures # @note eta has to be passed to this function in # order for ARMA structures to work correctly predictor_ac <- function(eta, prep, i, fprep = NULL) { if (has_ac_class(prep$ac$acef, "arma")) { if (!is.null(prep$ac$err)) { # ARMA correlations via latent residuals eta <- eta + p(prep$ac$err, i, row = FALSE) } else { # ARMA correlations via explicit natural residuals if (!is.null(i)) { stop2("Pointwise evaluation is not possible for ARMA models.") } eta <- .predictor_arma( eta, ar = prep$ac$ar, ma = prep$ac$ma, Y = prep$ac$Y, J_lag = prep$ac$J_lag, fprep = fprep ) } } if (has_ac_class(prep$ac$acef, "car")) { eta <- eta + .predictor_re(Z = p(prep$ac$Zcar, i), r = prep$ac$rcar) } eta } # add ARMA effects to a predictor matrix # @param eta linear predictor matrix # @param ar optional autoregressive draws # @param ma optional moving average draws # @param Y vector of response values # @param J_lag autocorrelation lag for each observation # @return linear predictor matrix updated by ARMA effects .predictor_arma <- function(eta, ar = NULL, ma = NULL, Y = NULL, J_lag = NULL, fprep = NULL) { if (is.null(ar) && is.null(ma)) { return(eta) } if (anyNA(Y)) { # predicting Y will be necessary at some point stopifnot(is.brmsprep(fprep) || is.mvbrmsprep(fprep)) pp_fun <- paste0("posterior_predict_", fprep$family$fun) pp_fun <- get(pp_fun, asNamespace("brms")) } S <- nrow(eta) N <- length(Y) max_lag <- max(J_lag, 1) Kar <- ifelse(is.null(ar), 0, ncol(ar)) Kma <- ifelse(is.null(ma), 0, ncol(ma)) # relevant if time-series are shorter than the ARMA orders take_ar <- seq_len(min(Kar, max_lag)) take_ma <- seq_len(min(Kma, max_lag)) ar <- ar[, take_ar, drop = FALSE] ma <- ma[, take_ma, drop = FALSE] Err <- array(0, dim = c(S, max_lag, max_lag + 1)) err <- zero_mat <- matrix(0, nrow = S, ncol = max_lag) zero_vec <- rep(0, S) for (n in seq_len(N)) { if (Kma) { eta[, n] <- eta[, n] + rowSums(ma * Err[, take_ma, max_lag]) } eta_before_ar <- eta[, n] if (Kar) { eta[, n] <- eta[, n] + rowSums(ar * Err[, take_ar, max_lag]) } # AR terms need to be included in the predictions of y if missing # the prediction code thus differs from the structure of the Stan code y <- Y[n] if (is.na(y)) { # y was not observed and has to be predicted fprep$dpars$mu <- eta y <- pp_fun(n, fprep) } # errors in AR models need to be computed before adding AR terms err[, max_lag] <- y - eta_before_ar if (J_lag[n] > 0) { # store residuals of former observations I <- seq_len(J_lag[n]) Err[, I, max_lag + 1] <- err[, max_lag + 1 - I] } # keep the size of 'err' and 'Err' as small as possible Err <- abind(Err[, , -1, drop = FALSE], zero_mat) err <- cbind(err[, -1, drop = FALSE], zero_vec) } eta } brms/R/ggplot-themes.R0000644000175000017500000000676614111751666014501 0ustar nileshnilesh#' (Deprecated) Black Theme for \pkg{ggplot2} Graphics #' #' A black theme for ggplot graphics inspired by a blog post of Jon Lefcheck #' (\url{https://jonlefcheck.net/2013/03/11/black-theme-for-ggplot2-2/}). #' #' @param base_size base font size #' @param base_family base font family #' #' @return A \code{theme} object used in \pkg{ggplot2} graphics. #' #' @details When using \code{theme_black} in plots powered by the #' \pkg{bayesplot} package such as \code{pp_check} or \code{stanplot}, #' I recommend using the \code{"viridisC"} color scheme (see examples). #' #' @examples #' \dontrun{ #' # change default ggplot theme #' ggplot2::theme_set(theme_black()) #' #' # change default bayesplot color scheme #' bayesplot::color_scheme_set("viridisC") #' #' # fit a simple model #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), chains = 2) #' summary(fit) #' #' # create various plots #' plot(marginal_effects(fit), ask = FALSE) #' pp_check(fit) #' mcmc_plot(fit, type = "hex", variable = c("b_Intercept", "b_Trt1")) #' } #' #' @export theme_black = function(base_size = 12, base_family = "") { warning2("'theme_black' is deprecated. Please use the 'ggdark' package ", "for dark ggplot themes.") theme_grey(base_size = base_size, base_family = base_family) %+replace% theme( # axis options axis.line = element_blank(), axis.text.x = element_text( size = base_size * 0.8, color = "white", lineheight = 0.9 ), axis.text.y = element_text( size = base_size * 0.8, color = "white", lineheight = 0.9 ), axis.ticks = element_line(color = "white", size = 0.2), axis.title.x = element_text( size = base_size, color = "white", margin = margin(10, 0, 0, 0) ), axis.title.y = element_text( size = base_size, color = "white", angle = 90, margin = margin(0, 10, 0, 0) ), axis.ticks.length = unit(0.3, "lines"), # legend options legend.background = element_rect(color = NA, fill = "black"), legend.key = element_rect(color = "white", fill = "black"), legend.key.size = unit(1.2, "lines"), legend.key.height = NULL, legend.key.width = NULL, legend.text = element_text(size = base_size * 0.8, color = "white"), legend.title = element_text( size = base_size * 0.8, face = "bold", hjust = 0, color = "white" ), legend.position = "right", legend.text.align = NULL, legend.title.align = NULL, legend.direction = "vertical", legend.box = NULL, # panel options panel.background = element_rect(fill = "black", color = NA), panel.border = element_rect(fill = NA, color = "white"), panel.grid.major = element_line(color = "grey35"), panel.grid.minor = element_line(color = "grey20"), panel.spacing = unit(0.5, "lines"), # facetting options strip.background = element_rect(fill = "grey30", color = "grey10"), strip.text.x = element_text( size = base_size * 0.8, color = "white", margin = margin(3, 0, 4, 0) ), strip.text.y = element_text( size = base_size * 0.8, color = "white", angle = -90 ), # plot options plot.background = element_rect(color = "black", fill = "black"), plot.title = element_text(size = base_size * 1.2, color = "white"), plot.margin = unit(rep(1, 4), "lines") ) } brms/R/summary.R0000644000175000017500000004675714111751666013423 0ustar nileshnilesh#' Create a summary of a fitted model represented by a \code{brmsfit} object #' #' @param object An object of class \code{brmsfit}. #' @param priors Logical; Indicating if priors should be included #' in the summary. Default is \code{FALSE}. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @param mc_se Logical; Indicating if the uncertainty in \code{Estimate} #' caused by the MCMC sampling should be shown in the summary. Defaults to #' \code{FALSE}. #' @param ... Other potential arguments #' @inheritParams posterior_summary #' #' @details The convergence diagnostics \code{Rhat}, \code{Bulk_ESS}, and #' \code{Tail_ESS} are described in detail in Vehtari et al. (2020). #' #' @references #' Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, and #' Paul-Christian Bürkner (2020). Rank-normalization, folding, and #' localization: An improved R-hat for assessing convergence of #' MCMC. *Bayesian Analysis*. 1–28. dpi:10.1214/20-BA1221 #' #' @method summary brmsfit #' @importMethodsFrom rstan summary #' @importFrom posterior subset_draws summarize_draws #' @export summary.brmsfit <- function(object, priors = FALSE, prob = 0.95, robust = FALSE, mc_se = FALSE, ...) { priors <- as_one_logical(priors) probs <- validate_ci_bounds(prob) robust <- as_one_logical(robust) mc_se <- as_one_logical(mc_se) object <- restructure(object) bterms <- brmsterms(object$formula) out <- list( formula = object$formula, data_name = get_data_name(object$data), group = unique(object$ranef$group), nobs = nobs(object), ngrps = ngrps(object), autocor = object$autocor, prior = empty_prior(), algorithm = algorithm(object) ) class(out) <- "brmssummary" if (!length(object$fit@sim)) { # the model does not contain posterior draws return(out) } out$chains <- nchains(object) out$iter <- niterations(object) + nwarmup(object) out$warmup <- nwarmup(object) out$thin <- nthin(object) stan_args <- object$fit@stan_args[[1]] out$sampler <- paste0(stan_args$method, "(", stan_args$algorithm, ")") if (priors) { out$prior <- prior_summary(object, all = FALSE) } # compute a summary for given set of parameters # TODO: align names with summary outputs of other methods and packages .summary <- function(draws, variables, probs, robust) { # quantiles with appropriate names to retain backwards compatibility .quantile <- function(x, ...) { qs <- posterior::quantile2(x, probs = probs, ...) prob <- probs[2] - probs[1] names(qs) <- paste0(c("l-", "u-"), prob * 100, "% CI") return(qs) } draws <- subset_draws(draws, variable = variables) measures <- list() if (robust) { measures$Estimate <- median if (mc_se) { measures$MCSE <- posterior::mcse_median } measures$Est.Error <- mad } else { measures$Estimate <- mean if (mc_se) { measures$MCSE <- posterior::mcse_mean } measures$Est.Error <- sd } c(measures) <- list( quantiles = .quantile, Rhat = posterior::rhat, Bulk_ESS = posterior::ess_bulk, Tail_ESS = posterior::ess_tail ) out <- do.call(summarize_draws, c(list(draws), measures)) out <- as.data.frame(out) rownames(out) <- out$variable out$variable <- NULL return(out) } variables <- variables(object) excl_regex <- "^(r|s|z|zs|zgp|Xme|L|Lrescor|prior|lp)(_|$)" variables <- variables[!grepl(excl_regex, variables)] draws <- as_draws_array(object) full_summary <- .summary(draws, variables, probs, robust) if (algorithm(object) == "sampling") { Rhats <- full_summary[, "Rhat"] if (any(Rhats > 1.05, na.rm = TRUE)) { warning2( "Parts of the model have not converged (some Rhats are > 1.05). ", "Be careful when analysing the results! We recommend running ", "more iterations and/or setting stronger priors." ) } div_trans <- sum(nuts_params(object, pars = "divergent__")$Value) adapt_delta <- control_params(object)$adapt_delta if (div_trans > 0) { warning2( "There were ", div_trans, " divergent transitions after warmup. ", "Increasing adapt_delta above ", adapt_delta, " may help. See ", "http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup" ) } } # summary of population-level effects fe_pars <- variables[grepl(fixef_pars(), variables)] out$fixed <- full_summary[fe_pars, , drop = FALSE] rownames(out$fixed) <- gsub(fixef_pars(), "", fe_pars) # summary of family specific parameters spec_pars <- c(valid_dpars(object), "delta") spec_pars <- paste0(spec_pars, collapse = "|") spec_pars <- paste0("^(", spec_pars, ")($|_)") spec_pars <- variables[grepl(spec_pars, variables)] out$spec_pars <- full_summary[spec_pars, , drop = FALSE] # summary of residual correlations rescor_pars <- variables[grepl("^rescor_", variables)] if (length(rescor_pars)) { out$rescor_pars <- full_summary[rescor_pars, , drop = FALSE] rescor_pars <- sub("__", ",", sub("__", "(", rescor_pars)) rownames(out$rescor_pars) <- paste0(rescor_pars, ")") } # summary of autocorrelation effects cor_pars <- variables[grepl(regex_autocor_pars(), variables)] out$cor_pars <- full_summary[cor_pars, , drop = FALSE] rownames(out$cor_pars) <- cor_pars # summary of group-level effects for (g in out$group) { gregex <- escape_dot(g) sd_prefix <- paste0("^sd_", gregex, "__") sd_pars <- variables[grepl(sd_prefix, variables)] cor_prefix <- paste0("^cor_", gregex, "__") cor_pars <- variables[grepl(cor_prefix, variables)] df_prefix <- paste0("^df_", gregex, "$") df_pars <- variables[grepl(df_prefix, variables)] gpars <- c(df_pars, sd_pars, cor_pars) out$random[[g]] <- full_summary[gpars, , drop = FALSE] if (has_rows(out$random[[g]])) { sd_names <- sub(sd_prefix, "sd(", sd_pars) cor_names <- sub(cor_prefix, "cor(", cor_pars) cor_names <- sub("__", ",", cor_names) df_names <- sub(df_prefix, "df", df_pars) gnames <- c(df_names, paste0(c(sd_names, cor_names), ")")) rownames(out$random[[g]]) <- gnames } } # summary of smooths sm_pars <- variables[grepl("^sds_", variables)] if (length(sm_pars)) { out$splines <- full_summary[sm_pars, , drop = FALSE] rownames(out$splines) <- paste0(gsub("^sds_", "sds(", sm_pars), ")") } # summary of monotonic parameters mo_pars <- variables[grepl("^simo_", variables)] if (length(mo_pars)) { out$mo <- full_summary[mo_pars, , drop = FALSE] rownames(out$mo) <- gsub("^simo_", "", mo_pars) } # summary of gaussian processes gp_pars <- variables[grepl("^(sdgp|lscale)_", variables)] if (length(gp_pars)) { out$gp <- full_summary[gp_pars, , drop = FALSE] rownames(out$gp) <- gsub("^sdgp_", "sdgp(", rownames(out$gp)) rownames(out$gp) <- gsub("^lscale_", "lscale(", rownames(out$gp)) rownames(out$gp) <- paste0(rownames(out$gp), ")") } out } #' Print a summary for a fitted model represented by a \code{brmsfit} object #' #' @aliases print.brmssummary #' #' @param x An object of class \code{brmsfit} #' @param digits The number of significant digits for printing out the summary; #' defaults to 2. The effective sample size is always rounded to integers. #' @param ... Additional arguments that would be passed #' to method \code{summary} of \code{brmsfit}. #' #' @seealso \code{\link{summary.brmsfit}} #' #' @export print.brmsfit <- function(x, digits = 2, ...) { print(summary(x, ...), digits = digits, ...) } #' @export print.brmssummary <- function(x, digits = 2, ...) { cat(" Family: ") cat(summarise_families(x$formula), "\n") cat(" Links: ") cat(summarise_links(x$formula, wsp = 9), "\n") cat("Formula: ") print(x$formula, wsp = 9) cat(paste0( " Data: ", x$data_name, " (Number of observations: ", x$nobs, ") \n" )) if (!isTRUE(nzchar(x$sampler))) { cat("\nThe model does not contain posterior draws.\n") } else { total_ndraws <- ceiling((x$iter - x$warmup) / x$thin * x$chains) cat(paste0( " Draws: ", x$chains, " chains, each with iter = ", x$iter, "; warmup = ", x$warmup, "; thin = ", x$thin, ";\n", " total post-warmup draws = ", total_ndraws, "\n\n" )) if (nrow(x$prior)) { cat("Priors: \n") print(x$prior, show_df = FALSE) cat("\n") } if (length(x$splines)) { cat("Smooth Terms: \n") print_format(x$splines, digits) cat("\n") } if (length(x$gp)) { cat("Gaussian Process Terms: \n") print_format(x$gp, digits) cat("\n") } if (nrow(x$cor_pars)) { cat("Correlation Structures:\n") # TODO: better printing for correlation structures? print_format(x$cor_pars, digits) cat("\n") } if (length(x$random)) { cat("Group-Level Effects: \n") for (i in seq_along(x$random)) { g <- names(x$random)[i] cat(paste0("~", g, " (Number of levels: ", x$ngrps[[g]], ") \n")) print_format(x$random[[g]], digits) cat("\n") } } if (nrow(x$fixed)) { cat("Population-Level Effects: \n") print_format(x$fixed, digits) cat("\n") } if (length(x$mo)) { cat("Simplex Parameters: \n") print_format(x$mo, digits) cat("\n") } if (nrow(x$spec_pars)) { cat("Family Specific Parameters: \n") print_format(x$spec_pars, digits) cat("\n") } if (length(x$rescor_pars)) { cat("Residual Correlations: \n") print_format(x$rescor, digits) cat("\n") } cat(paste0("Draws were sampled using ", x$sampler, ". ")) if (x$algorithm == "sampling") { cat(paste0( "For each parameter, Bulk_ESS\n", "and Tail_ESS are effective sample size measures, ", "and Rhat is the potential\n", "scale reduction factor on split chains ", "(at convergence, Rhat = 1)." )) } cat("\n") } invisible(x) } # helper function to print summary matrices in nice format # also displays -0.00 as a result of round negative values to zero (#263) # @param x object to be printed; coerced to matrix # @param digits number of digits to show # @param no_digits names of columns for which no digits should be shown print_format <- function(x, digits = 2, no_digits = c("Bulk_ESS", "Tail_ESS")) { x <- as.matrix(x) digits <- as.numeric(digits) if (length(digits) != 1L) { stop2("'digits' should be a single numeric value.") } out <- x fmt <- paste0("%.", digits, "f") for (i in seq_cols(x)) { if (isTRUE(colnames(x)[i] %in% no_digits)) { out[, i] <- sprintf("%.0f", x[, i]) } else { out[, i] <- sprintf(fmt, x[, i]) } } print(out, quote = FALSE, right = TRUE) invisible(x) } # regex to extract population-level coefficients fixef_pars <- function() { types <- c("", "s", "cs", "sp", "mo", "me", "mi", "m") types <- paste0("(", types, ")", collapse = "|") paste0("^b(", types, ")_") } # algorithm used in the model fitting algorithm <- function(x) { stopifnot(is.brmsfit(x)) if (is.null(x$algorithm)) "sampling" else x$algorithm } #' Summarize Posterior draws #' #' Summarizes posterior draws based on point estimates (mean or median), #' estimation errors (SD or MAD) and quantiles. This function mainly exists to #' retain backwards compatibility. It will eventually be replaced by functions #' of the \pkg{posterior} package (see examples below). #' #' @param x An \R object. #' @inheritParams as.matrix.brmsfit #' @param probs The percentiles to be computed by the #' \code{\link[stats:quantile]{quantile}} function. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' @param ... More arguments passed to or from other methods. #' #' @return A matrix where rows indicate variables #' and columns indicate the summary estimates. #' #' @seealso \code{\link[posterior:summarize_draws]{summarize_draws}} #' #' @examples #' \dontrun{ #' fit <- brm(time ~ age * sex, data = kidney) #' posterior_summary(fit) #' #' # recommended workflow using posterior #' library(posterior) #' draws <- as_draws_array(fit) #' summarise_draws(draws, default_summary_measures()) #' } #' #' @export posterior_summary <- function(x, ...) { UseMethod("posterior_summary") } #' @rdname posterior_summary #' @export posterior_summary.default <- function(x, probs = c(0.025, 0.975), robust = FALSE, ...) { # TODO: replace with summary functions from posterior # TODO: find a way to represent 3D summaries as well if (!length(x)) { stop2("No posterior draws supplied.") } if (robust) { coefs <- c("median", "mad", "quantile") } else { coefs <- c("mean", "sd", "quantile") } .posterior_summary <- function(x) { do_call(cbind, lapply( coefs, get_estimate, draws = x, probs = probs, na.rm = TRUE )) } if (length(dim(x)) <= 2L) { # data.frames cause trouble in as.array x <- as.matrix(x) } else { x <- as.array(x) } if (length(dim(x)) == 2L) { out <- .posterior_summary(x) rownames(out) <- colnames(x) } else if (length(dim(x)) == 3L) { out <- lapply(array2list(x), .posterior_summary) out <- abind(out, along = 3) dnx <- dimnames(x) dimnames(out) <- list(dnx[[2]], dimnames(out)[[2]], dnx[[3]]) } else { stop("'x' must be of dimension 2 or 3.") } # TODO: align names with summary outputs of other methods and packages colnames(out) <- c("Estimate", "Est.Error", paste0("Q", probs * 100)) out } #' @rdname posterior_summary #' @export posterior_summary.brmsfit <- function(x, pars = NA, variable = NULL, probs = c(0.025, 0.975), robust = FALSE, ...) { out <- as.matrix(x, pars = pars, variable = variable, ...) posterior_summary(out, probs = probs, robust = robust, ...) } # calculate estimates over posterior draws # @param coef coefficient to be applied on the draws (e.g., "mean") # @param draws the draws over which to apply coef # @param margin see 'apply' # @param ... additional arguments passed to get(coef) # @return typically a matrix with colnames(draws) as colnames get_estimate <- function(coef, draws, margin = 2, ...) { # TODO: replace with summary functions from posterior dots <- list(...) args <- list(X = draws, MARGIN = margin, FUN = coef) fun_args <- names(formals(coef)) if (!"..." %in% fun_args) { dots <- dots[names(dots) %in% fun_args] } x <- do_call(apply, c(args, dots)) if (is.null(dim(x))) { x <- matrix(x, dimnames = list(NULL, coef)) } else if (coef == "quantile") { x <- aperm(x, length(dim(x)):1) } x } # validate bounds of credible intervals # @return a numeric vector of length 2 validate_ci_bounds <- function(prob, probs = NULL) { if (!is.null(probs)) { # deprecated as of version 2.13.7 warning2("Argument 'probs' is deprecated. Please use 'prob' instead.") if (length(probs) != 2L) { stop2("Arguments 'probs' must be of length 2.") } probs <- as.numeric(probs) } else { prob <- as_one_numeric(prob) if (prob < 0 || prob > 1) { stop2("'prob' must be a single numeric value in [0, 1].") } probs <- c((1 - prob) / 2, 1 - (1 - prob) / 2) } probs } #' Table Creation for Posterior Draws #' #' Create a table for unique values of posterior draws. #' This is usually only useful when summarizing predictions #' of ordinal models. #' #' @param x A matrix of posterior draws where rows #' indicate draws and columns indicate parameters. #' @param levels Optional values of possible posterior values. #' Defaults to all unique values in \code{x}. #' #' @return A matrix where rows indicate parameters #' and columns indicate the unique values of #' posterior draws. #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ period + carry + treat, #' data = inhaler, family = cumulative()) #' pr <- predict(fit, summary = FALSE) #' posterior_table(pr) #' } #' #' @export posterior_table <- function(x, levels = NULL) { x <- as.matrix(x) if (anyNA(x)) { warning2("NAs will be ignored in 'posterior_table'.") } if (is.null(levels)) { levels <- sort(unique(as.vector(x))) } xlevels <- attr(x, "levels") if (length(xlevels) != length(levels)) { xlevels <- levels } out <- lapply(seq_len(ncol(x)), function(n) table(factor(x[, n], levels = levels)) ) out <- do_call(rbind, out) # compute relative frequencies out <- out / rowSums(out) rownames(out) <- colnames(x) colnames(out) <- paste0("P(Y = ", xlevels, ")") out } #' Compute posterior uncertainty intervals #' #' Compute posterior uncertainty intervals for \code{brmsfit} objects. #' #' @param object An object of class \code{brmsfit}. #' @param prob A value between 0 and 1 indicating the desired probability #' to be covered by the uncertainty intervals. The default is 0.95. #' @inheritParams as.matrix.brmsfit #' @param ... More arguments passed to \code{\link{as.matrix.brmsfit}}. #' #' @return A \code{matrix} with lower and upper interval bounds #' as columns and as many rows as selected variables. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = negbinomial()) #' posterior_interval(fit) #' } #' #' @aliases posterior_interval #' @method posterior_interval brmsfit #' @export #' @export posterior_interval #' @importFrom rstantools posterior_interval posterior_interval.brmsfit <- function( object, pars = NA, variable = NULL, prob = 0.95, ... ) { ps <- as.matrix(object, pars = pars, variable = variable, ...) rstantools::posterior_interval(ps, prob = prob) } #' Extract Priors of a Bayesian Model Fitted with \pkg{brms} #' #' @aliases prior_summary #' #' @param object An object of class \code{brmsfit}. #' @param all Logical; Show all parameters in the model which may have #' priors (\code{TRUE}) or only those with proper priors (\code{FALSE})? #' @param ... Further arguments passed to or from other methods. #' #' @return For \code{brmsfit} objects, an object of class \code{brmsprior}. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|obs), #' data = epilepsy, family = poisson(), #' prior = c(prior(student_t(5,0,10), class = b), #' prior(cauchy(0,2), class = sd))) #' #' prior_summary(fit) #' prior_summary(fit, all = FALSE) #' print(prior_summary(fit, all = FALSE), show_df = FALSE) #' } #' #' @method prior_summary brmsfit #' @export #' @export prior_summary #' @importFrom rstantools prior_summary prior_summary.brmsfit <- function(object, all = TRUE, ...) { object <- restructure(object) prior <- object$prior if (!all) { prior <- prior[nzchar(prior$prior), ] } prior } brms/R/make_stancode.R0000644000175000017500000003406214136715152014502 0ustar nileshnilesh#' Stan Code for \pkg{brms} Models #' #' Generate Stan code for \pkg{brms} models #' #' @inheritParams brm #' @param ... Other arguments for internal usage only. #' #' @return A character string containing the fully commented \pkg{Stan} code #' to fit a \pkg{brms} model. #' #' @examples #' make_stancode(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' #' make_stancode(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' #' @export make_stancode <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sparse = NULL, sample_prior = "no", stanvars = NULL, stan_funs = NULL, knots = NULL, threads = NULL, normalize = getOption("brms.normalize", TRUE), save_model = NULL, ...) { if (is.brmsfit(formula)) { stop2("Use 'stancode' to extract Stan code from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, sparse = sparse, cov_ranef = cov_ranef ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data <- validate_data( data, bterms = bterms, data2 = data2, knots = knots ) prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior ) stanvars <- validate_stanvars(stanvars, stan_funs = stan_funs) threads <- validate_threads(threads) .make_stancode( bterms, data = data, prior = prior, stanvars = stanvars, threads = threads, normalize = normalize, save_model = save_model, ... ) } # internal work function of 'make_stancode' # @param parse parse the Stan model for automatic syntax checking # @param backend name of the backend used for parsing # @param silent silence parsing messages .make_stancode <- function(bterms, data, prior, stanvars, threads = threading(), normalize = getOption("brms.normalize", TRUE), parse = getOption("brms.parse_stancode", FALSE), backend = getOption("brms.backend", "rstan"), silent = TRUE, save_model = NULL, ...) { normalize <- as_one_logical(normalize) parse <- as_one_logical(parse) backend <- match.arg(backend, backend_choices()) silent <- as_one_logical(silent) ranef <- tidy_ranef(bterms, data = data) meef <- tidy_meef(bterms, data = data) scode_predictor <- stan_predictor( bterms, data = data, prior = prior, normalize = normalize, ranef = ranef, meef = meef, stanvars = stanvars, threads = threads ) scode_ranef <- stan_re( ranef, prior = prior, threads = threads, normalize = normalize ) scode_Xme <- stan_Xme( meef, prior = prior, threads = threads, normalize = normalize ) scode_global_defs <- stan_global_defs( bterms, prior = prior, ranef = ranef, threads = threads ) # extend Stan's likelihood part if (use_threading(threads)) { # threading is activated for (i in seq_along(scode_predictor)) { resp <- usc(names(scode_predictor)[i]) pll_args <- stan_clean_pll_args( scode_predictor[[i]][["pll_args"]], scode_ranef[["pll_args"]], scode_Xme[["pll_args"]], collapse_stanvars_pll_args(stanvars) ) partial_log_lik <- paste0( scode_predictor[[i]][["pll_def"]], scode_predictor[[i]][["model_def"]], collapse_stanvars(stanvars, "likelihood", "start"), scode_predictor[[i]][["model_comp_basic"]], scode_predictor[[i]][["model_comp_eta_loop"]], scode_predictor[[i]][["model_comp_dpar_link"]], scode_predictor[[i]][["model_comp_mu_link"]], scode_predictor[[i]][["model_comp_dpar_trans"]], scode_predictor[[i]][["model_comp_mix"]], scode_predictor[[i]][["model_comp_arma"]], scode_predictor[[i]][["model_comp_catjoin"]], scode_predictor[[i]][["model_comp_mvjoin"]], scode_predictor[[i]][["model_log_lik"]], collapse_stanvars(stanvars, "likelihood", "end") ) partial_log_lik <- gsub(" target \\+=", " ptarget +=", partial_log_lik) partial_log_lik <- paste0( "// compute partial sums of the log-likelihood\n", "real partial_log_lik_lpmf", resp, "(int[] seq", resp, ", int start, int end", pll_args$typed, ") {\n", " real ptarget = 0;\n", " int N = end - start + 1;\n", partial_log_lik, " return ptarget;\n", "}\n" ) partial_log_lik <- wsp_per_line(partial_log_lik, 2) scode_predictor[[i]][["partial_log_lik"]] <- partial_log_lik static <- str_if(threads$static, "_static") scode_predictor[[i]][["model_lik"]] <- paste0( " target += reduce_sum", static, "(partial_log_lik_lpmf", resp, ", seq", resp, ", grainsize", pll_args$plain, ");\n" ) str_add(scode_predictor[[i]][["tdata_def"]]) <- glue( " int seq{resp}[N{resp}] = sequence(1, N{resp});\n" ) } scode_predictor <- collapse_lists(ls = scode_predictor) scode_predictor[["model_lik"]] <- paste0( scode_predictor[["model_no_pll_def"]], scode_predictor[["model_no_pll_comp_basic"]], scode_predictor[["model_no_pll_comp_mvjoin"]], scode_predictor[["model_lik"]] ) str_add(scode_predictor[["data"]]) <- " int grainsize; // grainsize for threading\n" } else { # threading is not activated scode_predictor <- collapse_lists(ls = scode_predictor) scode_predictor[["model_lik"]] <- paste0( scode_predictor[["model_no_pll_def"]], scode_predictor[["model_def"]], collapse_stanvars(stanvars, "likelihood", "start"), scode_predictor[["model_no_pll_comp_basic"]], scode_predictor[["model_comp_basic"]], scode_predictor[["model_comp_eta_loop"]], scode_predictor[["model_comp_dpar_link"]], scode_predictor[["model_comp_mu_link"]], scode_predictor[["model_comp_dpar_trans"]], scode_predictor[["model_comp_mix"]], scode_predictor[["model_comp_arma"]], scode_predictor[["model_comp_catjoin"]], scode_predictor[["model_no_pll_comp_mvjoin"]], scode_predictor[["model_comp_mvjoin"]], scode_predictor[["model_log_lik"]], collapse_stanvars(stanvars, "likelihood", "end") ) } scode_predictor[["model_lik"]] <- wsp_per_line(scode_predictor[["model_lik"]], 2) # get priors for all parameters in the model scode_prior <- paste0( scode_predictor[["prior"]], scode_ranef[["prior"]], scode_Xme[["prior"]], stan_unchecked_prior(prior) ) # generate functions block scode_functions <- paste0( "// generated with brms ", utils::packageVersion("brms"), "\n", "functions {\n", scode_global_defs[["fun"]], collapse_stanvars(stanvars, "functions"), scode_predictor[["partial_log_lik"]], "}\n" ) # generate data block scode_data <- paste0( "data {\n", " int N; // total number of observations\n", scode_predictor[["data"]], scode_ranef[["data"]], scode_Xme[["data"]], " int prior_only; // should the likelihood be ignored?\n", collapse_stanvars(stanvars, "data"), "}\n" ) # generate transformed parameters block scode_transformed_data <- paste0( "transformed data {\n", scode_global_defs[["tdata_def"]], scode_predictor[["tdata_def"]], collapse_stanvars(stanvars, "tdata", "start"), scode_predictor[["tdata_comp"]], collapse_stanvars(stanvars, "tdata", "end"), "}\n" ) # generate parameters block scode_parameters <- paste0( scode_predictor[["par"]], scode_ranef[["par"]], scode_Xme[["par"]] ) # prepare additional sampling from priors scode_rngprior <- stan_rngprior( prior = scode_prior, par_declars = scode_parameters, gen_quantities = scode_predictor[["gen_def"]], prior_special = attr(prior, "special"), sample_prior = get_sample_prior(prior) ) scode_parameters <- paste0( "parameters {\n", scode_parameters, scode_rngprior[["par"]], collapse_stanvars(stanvars, "parameters"), "}\n" ) # generate transformed parameters block scode_transformed_parameters <- paste0( "transformed parameters {\n", scode_predictor[["tpar_def"]], scode_ranef[["tpar_def"]], scode_Xme[["tpar_def"]], collapse_stanvars(stanvars, "tparameters", "start"), scode_predictor[["tpar_prior"]], scode_ranef[["tpar_prior"]], scode_Xme[["tpar_prior"]], scode_predictor[["tpar_comp"]], scode_predictor[["tpar_reg_prior"]], scode_ranef[["tpar_comp"]], scode_Xme[["tpar_comp"]], collapse_stanvars(stanvars, "tparameters", "end"), "}\n" ) # combine likelihood with prior part not_const <- str_if(!normalize, " not") scode_model <- paste0( "model {\n", collapse_stanvars(stanvars, "model", "start"), " // likelihood", not_const, " including constants\n", " if (!prior_only) {\n", scode_predictor[["model_lik"]], " }\n", " // priors", not_const, " including constants\n", scode_prior, collapse_stanvars(stanvars, "model", "end"), "}\n" ) # generate generated quantities block scode_generated_quantities <- paste0( "generated quantities {\n", scode_predictor[["gen_def"]], scode_ranef[["gen_def"]], scode_Xme[["gen_def"]], scode_rngprior[["gen_def"]], collapse_stanvars(stanvars, "genquant", "start"), scode_predictor[["gen_comp"]], scode_ranef[["gen_comp"]], scode_rngprior[["gen_comp"]], scode_Xme[["gen_comp"]], collapse_stanvars(stanvars, "genquant", "end"), "}\n" ) # combine all elements into a complete Stan model scode <- paste0( scode_functions, scode_data, scode_transformed_data, scode_parameters, scode_transformed_parameters, scode_model, scode_generated_quantities ) scode <- expand_include_statements(scode) if (parse) { scode <- parse_model(scode, backend, silent = silent) } if (is.character(save_model)) { cat(scode, file = save_model) } class(scode) <- c("character", "brmsmodel") scode } #' @export print.brmsmodel <- function(x, ...) { cat(x) invisible(x) } #' Extract Stan model code #' #' Extract Stan code that was used to specify the model. #' #' @aliases stancode.brmsfit #' #' @param object An object of class \code{brmsfit}. #' @param version Logical; indicates if the first line containing #' the \pkg{brms} version number should be included. #' Defaults to \code{TRUE}. #' @param regenerate Logical; indicates if the Stan code should #' be regenerated with the current \pkg{brms} version. #' By default, \code{regenerate} will be \code{FALSE} unless required #' to be \code{TRUE} by other arguments. #' @param threads Controls whether the Stan code should be threaded. #' See \code{\link{threading}} for details. #' @param ... Further arguments passed to \code{\link{make_stancode}} if the #' Stan code is regenerated. #' #' @return Stan model code for further processing. #' #' @export stancode.brmsfit <- function(object, version = TRUE, regenerate = NULL, threads = NULL, ...) { if (is.null(regenerate)) { # determine whether regenerating the Stan code is required regenerate <- FALSE cl <- match.call() if ("threads" %in% names(cl)) { threads <- validate_threads(threads) if (use_threading(threads) && !use_threading(object$threads) || !use_threading(threads) && use_threading(object$threads)) { # threading changed; regenerated Stan code regenerate <- TRUE } object$threads <- threads } } regenerate <- as_one_logical(regenerate) if (regenerate) { object <- restructure(object) out <- make_stancode( formula = object$formula, data = object$data, prior = object$prior, data2 = object$data2, stanvars = object$stanvars, sample_prior = get_sample_prior(object$prior), threads = object$threads, ... ) } else { # extract Stan code unaltered out <- object$model } if (!version) { out <- sub("^[^\n]+[[:digit:]]\\.[^\n]+\n", "", out) } out } #' @rdname stancode.brmsfit #' @export stancode <- function(object, ...) { UseMethod("stancode") } # expand '#include' statements # This could also be done automatically by Stan at compilation time # but would result in Stan code that is not self-contained until compilation # @param model Stan code potentially including '#include' statements # @return Stan code with '#include' statements expanded expand_include_statements <- function(model) { path <- system.file("chunks", package = "brms") includes <- get_matches("#include '[^']+'", model) # removal of duplicates could make code generation easier in the future includes <- unique(includes) files <- gsub("(#include )|(')", "", includes) for (i in seq_along(includes)) { code <- readLines(paste0(path, "/", files[i])) code <- paste0(code, collapse = "\n") pattern <- paste0(" *", escape_all(includes[i])) model <- sub(pattern, code, model) } model } # check if Stan code includes normalization constants is_normalized <- function(stancode) { !grepl("_lup(d|m)f\\(", stancode) } # Normalizes Stan code to avoid triggering refit after whitespace and # comment changes in the generated code. # In some distant future, StanC3 may provide its own normalizing functions, # until then this is a set of regex hacks. # @param x a string containing the Stan code normalize_stancode <- function(x) { x <- as_one_character(x) # Remove single-line comments x <- gsub("//[^\n\r]*[\n\r]", " ", x) x <- gsub("//[^\n\r]*$", " ", x) # Remove multi-line comments x <- gsub("/\\*([^*]*(\\*[^/])?)*\\*/", " ", x) # Standardize whitespace (including newlines) x <- gsub("[[:space:]]+"," ", x) trimws(x) } brms/R/stan-response.R0000644000175000017500000005753214116452545014517 0ustar nileshnilesh# unless otherwise specifiedm functions return a named list # of Stan code snippets to be pasted together later on # Stan code for the response variables stan_response <- function(bterms, data, normalize) { stopifnot(is.brmsterms(bterms)) lpdf <- stan_lpdf_name(normalize) family <- bterms$family rtype <- str_if(use_int(family), "int", "real") multicol <- has_multicol(family) px <- check_prefix(bterms) resp <- usc(combine_prefix(px)) out <- list(resp_type = rtype) if (nzchar(resp)) { # global N is defined elsewhere str_add(out$data) <- glue( " int N{resp}; // number of observations\n" ) str_add(out$pll_def) <- glue( " int N{resp} = end - start + 1;\n" ) } if (has_cat(family)) { str_add(out$data) <- glue( " int ncat{resp}; // number of categories\n" ) str_add(out$pll_args) <- glue(", data int ncat{resp}") } if (has_multicol(family)) { if (rtype == "real") { str_add(out$data) <- glue( " vector[ncat{resp}] Y{resp}[N{resp}]; // response array\n" ) str_add(out$pll_args) <- glue(", data vector[] Y{resp}") } else if (rtype == "int") { str_add(out$data) <- glue( " int Y{resp}[N{resp}, ncat{resp}]; // response array\n" ) str_add(out$pll_args) <- glue(", data int[,] Y{resp}") } } else { if (rtype == "real") { # type vector (instead of real[]) is required by some PDFs str_add(out$data) <- glue( " vector[N{resp}] Y{resp}; // response variable\n" ) str_add(out$pll_args) <- glue(", data vector Y{resp}") } else if (rtype == "int") { str_add(out$data) <- glue( " int Y{resp}[N{resp}]; // response variable\n" ) str_add(out$pll_args) <- glue(", data int[] Y{resp}") } } if (has_ndt(family)) { str_add(out$tdata_def) <- glue( " real min_Y{resp} = min(Y{resp});\n" ) } if (has_trials(family) || is.formula(bterms$adforms$trials)) { str_add(out$data) <- glue( " int trials{resp}[N{resp}]; // number of trials\n" ) str_add(out$pll_args) <- glue(", data int[] trials{resp}") } if (is.formula(bterms$adforms$weights)) { str_add(out$data) <- glue( " vector[N{resp}] weights{resp}; // model weights\n" ) str_add(out$pll_args) <- glue(", data vector weights{resp}") } if (has_thres(family)) { groups <- get_thres_groups(family) if (any(nzchar(groups))) { str_add(out$data) <- glue( " int ngrthres{resp}; // number of threshold groups\n", " int nthres{resp}[ngrthres{resp}]; // number of thresholds\n", " int Jthres{resp}[N{resp}, 2]; // threshold indices\n" ) str_add(out$tdata_def) <- glue( " int nmthres{resp} = sum(nthres{resp});", " // total number of thresholds\n", " int Kthres_start{resp}[ngrthres{resp}];", " // start index per threshold group\n", " int Kthres_end{resp}[ngrthres{resp}];", " // end index per threshold group\n" ) str_add(out$tdata_comp) <- glue( " Kthres_start{resp}[1] = 1;\n", " Kthres_end{resp}[1] = nthres{resp}[1];\n", " for (i in 2:ngrthres{resp}) {{\n", " Kthres_start{resp}[i] = Kthres_end{resp}[i-1] + 1;\n", " Kthres_end{resp}[i] = Kthres_end{resp}[i-1] + nthres{resp}[i];\n", " }}\n" ) str_add(out$pll_args) <- glue( ", data int[] nthres{resp}, data int[,] Jthres{resp}" ) } else { str_add(out$data) <- glue( " int nthres{resp}; // number of thresholds\n" ) str_add(out$pll_args) <- glue(", data int nthres{resp}") } } if (is.formula(bterms$adforms$se)) { str_add(out$data) <- glue( " vector[N{resp}] se{resp}; // known sampling error\n" ) str_add(out$tdata_def) <- glue( " vector[N{resp}] se2{resp} = square(se{resp});\n" ) str_add(out$pll_args) <- glue( ", data vector se{resp}, data vector se2{resp}" ) } if (is.formula(bterms$adforms$dec)) { str_add(out$data) <- glue( " int dec{resp}[N{resp}]; // decisions\n" ) str_add(out$pll_args) <- glue(", data int[] dec{resp}") } if (is.formula(bterms$adforms$rate)) { str_add(out$data) <- glue( " vector[N{resp}] denom{resp};", " // response denominator\n" ) str_add(out$tdata_def) <- glue( " // log response denominator\n", " vector[N{resp}] log_denom{resp} = log(denom{resp});\n" ) str_add(out$pll_args) <- glue( ", data vector denom{resp}, data vector log_denom{resp}" ) } if (is.formula(bterms$adforms$cens)) { cens <- eval_rhs(bterms$adforms$cens) str_add(out$data) <- glue( " int cens{resp}[N{resp}]; // indicates censoring\n" ) str_add(out$pll_args) <- glue(", data int[] cens{resp}") if (cens$vars$y2 != "NA") { # interval censoring is required if (rtype == "int") { str_add(out$data) <- glue( " int rcens{resp}[N{resp}];" ) str_add(out$pll_args) <- glue(", data int[] rcens{resp}") } else { str_add(out$data) <- glue( " vector[N{resp}] rcens{resp};" ) str_add(out$pll_args) <- glue(", data vector rcens{resp}") } str_add(out$data) <- " // right censor points for interval censoring\n" } } bounds <- trunc_bounds(bterms, data = data) if (any(bounds$lb > -Inf)) { str_add(out$data) <- glue( " {rtype} lb{resp}[N{resp}]; // lower truncation bounds;\n" ) str_add(out$pll_args) <- glue(", data {rtype}[] lb{resp}") } if (any(bounds$ub < Inf)) { str_add(out$data) <- glue( " {rtype} ub{resp}[N{resp}]; // upper truncation bounds\n" ) str_add(out$pll_args) <- glue(", data {rtype}[] ub{resp}") } if (is.formula(bterms$adforms$mi)) { # TODO: pass 'Ybounds' via 'standata' instead of hardcoding them Ybounds <- trunc_bounds(bterms, data, incl_family = TRUE, stan = TRUE) sdy <- get_sdy(bterms, data) if (is.null(sdy)) { # response is modeled without measurement error str_add(out$data) <- glue( " int Nmi{resp}; // number of missings\n", " int Jmi{resp}[Nmi{resp}]; // positions of missings\n" ) str_add(out$par) <- glue( " vector{Ybounds}[Nmi{resp}] Ymi{resp}; // estimated missings\n" ) str_add(out$model_no_pll_def) <- glue( " // vector combining observed and missing responses\n", " vector[N{resp}] Yl{resp} = Y{resp};\n" ) str_add(out$model_no_pll_comp_basic) <- glue( " Yl{resp}[Jmi{resp}] = Ymi{resp};\n" ) str_add(out$pll_args) <- glue(", vector Yl{resp}") } else { str_add(out$data) <- glue( " // data for measurement-error in the response\n", " vector[N{resp}] noise{resp};\n", " // information about non-missings\n", " int Nme{resp};\n", " int Jme{resp}[Nme{resp}];\n" ) str_add(out$par) <- glue( " vector{Ybounds}[N{resp}] Yl{resp}; // latent variable\n" ) str_add(out$prior) <- glue( " target += normal_{lpdf}(Y{resp}[Jme{resp}]", " | Yl{resp}[Jme{resp}], noise{resp}[Jme{resp}]);\n" ) str_add(out$pll_args) <- glue(", vector Yl{resp}") } } if (is.formula(bterms$adforms$vreal)) { # vectors of real values for use in custom families vreal <- eval_rhs(bterms$adforms$vreal) k <- length(vreal$vars) str_add(out$data) <- cglue( " // data for custom real vectors\n", " real vreal{seq_len(k)}{resp}[N{resp}];\n" ) str_add(out$pll_args) <- cglue(", data real[] vreal{seq_len(k)}{resp}") } if (is.formula(bterms$adforms$vint)) { # vectors of integer values for use in custom families vint <- eval_rhs(bterms$adforms$vint) k <- length(vint$vars) str_add(out$data) <- cglue( " // data for custom integer vectors\n", " int vint{seq_len(k)}{resp}[N{resp}];\n" ) str_add(out$pll_args) <- cglue(", data int[] vint{seq_len(k)}{resp}") } out } # Stan code for ordinal thresholds # intercepts in ordinal models require special treatment # and must be present even when using non-linear predictors # thus the relevant Stan code cannot be part of 'stan_fe' stan_thres <- function(bterms, data, prior, normalize, ...) { stopifnot(is.btl(bterms) || is.btnl(bterms)) out <- list() if (!is_ordinal(bterms)) { return(out) } px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) type <- str_if(has_ordered_thres(bterms), "ordered", "vector") coef_type <- str_if(has_ordered_thres(bterms), "", "real") gr <- grb <- "" groups <- get_thres_groups(bterms) if (has_thres_groups(bterms)) { # include one threshold vector per group gr <- usc(seq_along(groups)) grb <- paste0("[", seq_along(groups), "]") } if (fix_intercepts(bterms)) { # identify ordinal mixtures by fixing their thresholds to the same values if (has_equidistant_thres(bterms)) { stop2("Cannot use equidistant and fixed thresholds at the same time.") } # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // ordinal thresholds\n" str_add(out$tpar_def) <- cglue( " {type}[nthres{resp}{grb}] Intercept{p}{gr};\n" ) str_add(out$tpar_comp) <- " // fix thresholds across ordinal mixture components\n" str_add(out$tpar_comp) <- cglue( " Intercept{p}{gr} = fixed_Intercept{resp}{gr};\n" ) } else { if (has_equidistant_thres(bterms)) { bound <- subset2(prior, class = "delta", group = "", ls = px)$bound for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", group = groups[i], type = "real", prefix = "first_", suffix = glue("{p}{gr[i]}"), px = px, comment = "first threshold", normalize = normalize ) str_add_list(out) <- stan_prior( prior, class = "delta", group = groups[i], type = glue("real{bound}"), px = px, suffix = gr[i], comment = "distance between thresholds", normalize = normalize ) } str_add(out$tpar_def) <- " // temporary thresholds for centered predictors\n" str_add(out$tpar_def) <- cglue( " {type}[nthres{resp}{grb}] Intercept{p}{gr};\n" ) str_add(out$tpar_comp) <- " // compute equidistant thresholds\n" str_add(out$tpar_comp) <- cglue( " for (k in 1:(nthres{resp}{grb})) {{\n", " Intercept{p}{gr}[k] = first_Intercept{p}{gr}", " + (k - 1.0) * delta{p}{gr};\n", " }}\n" ) } else { for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", group = groups[i], coef = get_thres(bterms, group = groups[i]), type = glue("{type}[nthres{resp}{grb[i]}]"), coef_type = coef_type, px = px, suffix = glue("{p}{gr[i]}"), comment = "temporary thresholds for centered predictors", normalize = normalize ) } } } stz <- "" if (has_sum_to_zero_thres(bterms)) { stz <- "_stz" str_add(out$tpar_def) <- cglue( " vector[nthres{resp}{grb}] Intercept{p}_stz{gr};", " // sum-to-zero constraint thresholds\n" ) str_add(out$tpar_comp) <- " // compute sum-to-zero constraint thresholds\n" str_add(out$tpar_comp) <- cglue( " Intercept{p}_stz{gr} = Intercept{p}{gr} - mean(Intercept{p}{gr});\n" ) } if (has_thres_groups(bterms)) { # merge all group specific thresholds into one vector str_add(out$tpar_def) <- glue( " vector[nmthres{resp}] merged_Intercept{p}{stz}; // merged thresholds\n" ) str_add(out$tpar_comp) <- " // merge thresholds\n" grj <- seq_along(groups) grj <- glue("Kthres_start{resp}[{grj}]:Kthres_end{resp}[{grj}]") str_add(out$tpar_comp) <- cglue( " merged_Intercept{p}{stz}[{grj}] = Intercept{p}{stz}{gr};\n" ) str_add(out$pll_args) <- cglue(", vector merged_Intercept{p}{stz}") } else { str_add(out$pll_args) <- glue(", vector Intercept{p}{stz}") } sub_X_means <- "" if (stan_center_X(bterms) && length(all_terms(bterms$fe))) { # centering of the design matrix improves convergence # ordinal families either use thres - mu or mu - thres # both implies adding to the temporary intercept sub_X_means <- glue(" + dot_product(means_X{p}, b{p})") } str_add(out$gen_def) <- " // compute actual thresholds\n" str_add(out$gen_def) <- cglue( " vector[nthres{resp}{grb}] b{p}_Intercept{gr}", " = Intercept{p}{stz}{gr}{sub_X_means};\n" ) out } # Stan code for the baseline functions of the Cox model stan_bhaz <- function(bterms, prior, threads, normalize, ...) { stopifnot(is.btl(bterms) || is.btnl(bterms)) out <- list() if (!is_cox(bterms$family)) { return(out) } lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) resp <- usc(px$resp) slice <- stan_slice(threads) str_add(out$data) <- glue( " // data for flexible baseline functions\n", " int Kbhaz{resp}; // number of basis functions\n", " // design matrix of the baseline function\n", " matrix[N{resp}, Kbhaz{resp}] Zbhaz{resp};\n", " // design matrix of the cumulative baseline function\n", " matrix[N{resp}, Kbhaz{resp}] Zcbhaz{resp};\n", " // a-priori concentration vector of baseline coefficients\n", " vector[Kbhaz{resp}] con_sbhaz{resp};\n" ) str_add(out$par) <- glue( " simplex[Kbhaz{resp}] sbhaz{resp}; // baseline coefficients\n" ) str_add(out$prior) <- glue( " target += dirichlet_{lpdf}(sbhaz{resp} | con_sbhaz{resp});\n" ) str_add(out$model_def) <- glue( " // compute values of baseline function\n", " vector[N{resp}] bhaz{resp} = Zbhaz{resp}{slice} * sbhaz{resp};\n", " // compute values of cumulative baseline function\n", " vector[N{resp}] cbhaz{resp} = Zcbhaz{resp}{slice} * sbhaz{resp};\n" ) str_add(out$pll_args) <- glue( ", data matrix Zbhaz{resp}, data matrix Zcbhaz{resp}, vector sbhaz{resp}" ) out } # Stan code specific to mixture families stan_mixture <- function(bterms, data, prior, threads, normalize, ...) { out <- list() if (!is.mixfamily(bterms$family)) { return(out) } lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) nmix <- length(bterms$family$mix) theta_pred <- grepl("^theta", names(bterms$dpars)) theta_pred <- bterms$dpars[theta_pred] theta_fix <- grepl("^theta", names(bterms$fdpars)) theta_fix <- bterms$fdpars[theta_fix] def_thetas <- cglue( " real theta{1:nmix}{p}; // mixing proportion\n" ) if (length(theta_pred)) { if (length(theta_pred) != nmix - 1) { stop2("Can only predict all but one mixing proportion.") } missing_id <- setdiff(1:nmix, dpar_id(names(theta_pred))) str_add(out$model_def) <- glue( " vector[N{p}] theta{missing_id}{p} = rep_vector(0.0, N{p});\n", " real log_sum_exp_theta;\n" ) sum_exp_theta <- glue("exp(theta{1:nmix}{p}[n])", collapse = " + ") str_add(out$model_comp_mix) <- glue( " for (n in 1:N{p}) {{\n", " // scale theta to become a probability vector\n", " log_sum_exp_theta = log({sum_exp_theta});\n" ) str_add(out$model_comp_mix) <- cglue( " theta{1:nmix}{p}[n] = theta{1:nmix}{p}[n] - log_sum_exp_theta;\n" ) str_add(out$model_comp_mix) <- " }\n" } else if (length(theta_fix)) { # fix mixture proportions if (length(theta_fix) != nmix) { stop2("Can only fix no or all mixing proportions.") } str_add(out$data) <- " // mixing proportions\n" str_add(out$data) <- cglue( " real theta{1:nmix}{p};\n" ) str_add(out$pll_args) <- cglue(", real theta{1:nmix}{p}") } else { # estimate mixture proportions str_add(out$data) <- glue( " vector[{nmix}] con_theta{p}; // prior concentration\n" ) str_add(out$par) <- glue( " simplex[{nmix}] theta{p}; // mixing proportions\n" ) str_add(out$prior) <- glue( " target += dirichlet_{lpdf}(theta{p} | con_theta{p});\n" ) # separate definition from computation to support fixed parameters str_add(out$tpar_def) <- " // mixing proportions\n" str_add(out$tpar_def) <- cglue( " real theta{1:nmix}{p};\n" ) str_add(out$tpar_comp) <- cglue( " theta{1:nmix}{p} = theta{p}[{1:nmix}];\n" ) str_add(out$pll_args) <- cglue(", real theta{1:nmix}{p}") } if (order_intercepts(bterms)) { # identify mixtures by ordering the intercepts of their components str_add(out$par) <- glue( " ordered[{nmix}] ordered_Intercept{p}; // to identify mixtures\n" ) } if (fix_intercepts(bterms)) { # identify ordinal mixtures by fixing their thresholds to the same values stopifnot(is_ordinal(bterms)) gr <- grb <- "" groups <- get_thres_groups(bterms) if (has_thres_groups(bterms)) { # include one threshold vector per group gr <- usc(seq_along(groups)) grb <- paste0("[", seq_along(groups), "]") } type <- str_if(has_ordered_thres(bterms), "ordered", "vector") coef_type <- str_if(has_ordered_thres(bterms), "", "real") for (i in seq_along(groups)) { str_add_list(out) <- stan_prior( prior, class = "Intercept", coef = get_thres(bterms, group = groups[i]), type = glue("{type}[nthres{p}{grb[i]}]"), coef_type = coef_type, px = px, prefix = "fixed_", suffix = glue("{p}{gr[i]}"), comment = "thresholds fixed over mixture components", normalize = normalize ) } } out } # ordinal log-probability densitiy functions in Stan language # @return a character string stan_ordinal_lpmf <- function(family, link) { stopifnot(is.character(family), is.character(link)) ilink <- stan_ilink(link) th <- function(k) { # helper function generating stan code inside ilink(.) if (family %in% c("cumulative", "sratio")) { out <- glue("thres[{k}] - mu") } else if (family %in% c("cratio", "acat")) { out <- glue("mu - thres[{k}]") } glue("disc * ({out})") } out <- glue( " /* {family}-{link} log-PDF for a single response\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * disc: discrimination parameter\n", " * thres: ordinal thresholds\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_lpmf(int y, real mu, real disc, vector thres) {{\n" ) # define the function body if (family == "cumulative") { if (ilink == "inv_logit") { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " if (y == 1) {{\n", " return log_inv_logit({th(1)});\n", " }} else if (y == nthres + 1) {{\n", " return log1m_inv_logit({th('nthres')});\n", " }} else {{\n", # TODO: replace with log_inv_logit_diff once rstan >= 2.25 " return log_diff_exp(\n", " log_inv_logit({th('y')}), \n", " log_inv_logit({th('y - 1')})\n", " );\n", " }}\n", " }}\n" ) } else { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " real p;\n", " if (y == 1) {{\n", " p = {ilink}({th(1)});\n", " }} else if (y == nthres + 1) {{\n", " p = 1 - {ilink}({th('nthres')});\n", " }} else {{\n", " p = {ilink}({th('y')}) -\n", " {ilink}({th('y - 1')});\n", " }}\n", " return log(p);\n", " }}\n" ) } } else if (family %in% c("sratio", "cratio")) { if (ilink == "inv_cloglog") { qk <- str_if( family == "sratio", "-exp({th('k')})", "log1m_exp(-exp({th('k')}))" ) } else if (ilink == "inv_logit") { qk <- str_if( family == "sratio", "log1m_inv_logit({th('k')})", "log_inv_logit({th('k')})" ) } else if (ilink == "Phi") { # TODO: replace with more stable std_normal_lcdf once rstan >= 2.25 qk <- str_if( family == "sratio", "normal_lccdf({th('k')}|0,1)", "normal_lcdf({th('k')}|0,1)" ) } else if (ilink == "Phi_approx") { qk <- str_if( family == "sratio", "log1m_inv_logit(0.07056 * pow({th('k')}, 3.0) + 1.5976 * {th('k')})", "log_inv_logit(0.07056 * pow({th('k')}, 3.0) + 1.5976 * {th('k')})" ) } else if (ilink == "inv_cauchit") { qk <- str_if( family == "sratio", "cauchy_lccdf({th('k')}|0,1)", "cauchy_lcdf({th('k')}|0,1)" ) } qk <- glue(qk) str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p;\n", " vector[nthres] q;\n", " int k = 1;\n", " while (k <= min(y, nthres)) {{\n", " q[k] = {qk};\n", " p[k] = log1m_exp(q[k]);\n", " for (kk in 1:(k - 1)) p[k] = p[k] + q[kk];\n", " k += 1;\n", " }}\n", " if (y == nthres + 1) {{\n", " p[nthres + 1] = sum(q);\n", " }}\n", " return p[y];\n", " }}\n" ) } else if (family == "acat") { if (ilink == "inv_logit") { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p = append_row(0, cumulative_sum(disc * (mu - thres)));\n", " return p[y] - log_sum_exp(p);\n", " }}\n" ) } else { str_add(out) <- glue( " int nthres = num_elements(thres);\n", " vector[nthres + 1] p;\n", " vector[nthres] q;\n", " for (k in 1:(nthres))\n", " q[k] = {ilink}({th('k')});\n", " for (k in 1:(nthres + 1)) {{\n", " p[k] = 1.0;\n", " for (kk in 1:(k - 1)) p[k] = p[k] * q[kk];\n", " for (kk in k:(nthres)) p[k] = p[k] * (1 - q[kk]);\n", " }}\n", " return log(p[y] / sum(p));\n", " }}\n" ) } } # lpmf function for multiple merged thresholds str_add(out) <- glue( " /* {family}-{link} log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * disc: discrimination parameter\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real {family}_{link}_merged_lpmf(", "int y, real mu, real disc, vector thres, int[] j) {{\n", " return {family}_{link}_lpmf(y | mu, disc, thres[j[1]:j[2]]);\n", " }}\n" ) if (family == "cumulative" && link == "logit") { # use the more efficient 'ordered_logistic' built-in function str_add(out) <- glue( " /* ordered-logistic log-PDF for a single response and merged thresholds\n", " * Args:\n", " * y: response category\n", " * mu: latent mean parameter\n", " * thres: vector of merged ordinal thresholds\n", " * j: start and end index for the applid threshold within 'thres'\n", " * Returns:\n", " * a scalar to be added to the log posterior\n", " */\n", " real ordered_logistic_merged_lpmf(", "int y, real mu, vector thres, int[] j) {{\n", " return ordered_logistic_lpmf(y | mu, thres[j[1]:j[2]]);\n", " }}\n" ) } out } brms/R/numeric-helpers.R0000644000175000017500000000743714111751666015020 0ustar nileshnilesh# Most of the functions below have equivalents in Stan. Defining them in R is # necessary to evaluate non-linear formulas containing these functions. logit <- function(p) { log(p / (1 - p)) } inv_logit <- function(x) { 1 / (1 + exp(-x)) } cloglog <- function(x) { log(-log(1 - x)) } inv_cloglog <- function(x) { 1 - exp(-exp(x)) } Phi <- function(x) { pnorm(x) } # incomplete gamma funcion incgamma <- function(a, x) { pgamma(x, shape = a) * gamma(a) } square <- function(x) { x^2 } cbrt <- function(x) { x^(1/3) } exp2 <- function(x) { 2^x } pow <- function(x, y) { x^y } inv <- function(x) { 1/x } inv_sqrt <- function(x) { 1/sqrt(x) } inv_square <- function(x) { 1/x^2 } hypot <- function(x, y) { stopifnot(all(x >= 0)) stopifnot(all(y >= 0)) sqrt(x^2 + y^2) } log1m <- function(x) { log(1 - x) } step <- function(x) { ifelse(x > 0, 1, 0) } #' Logarithm with a minus one offset. #' #' Computes \code{log(x - 1)}. #' #' @param x A numeric or complex vector. #' @param base A positive or complex number: the base with respect to which #' logarithms are computed. Defaults to \emph{e} = \code{exp(1)}. #' #' @export logm1 <- function(x, base = exp(1)) { log(x - 1, base = base) } #' Exponential function plus one. #' #' Computes \code{exp(x) + 1}. #' #' @param x A numeric or complex vector. #' #' @export expp1 <- function(x) { exp(x) + 1 } #' Scaled logit-link #' #' Computes \code{logit((x - lb) / (ub - lb))} #' #' @param x A numeric or complex vector. #' @param lb Lower bound defaulting to \code{0}. #' @param ub Upper bound defaulting to \code{1}. #' #' @return A numeric or complex vector. #' #' @export logit_scaled <- function(x, lb = 0, ub = 1) { logit((x - lb) / (ub - lb)) } #' Scaled inverse logit-link #' #' Computes \code{inv_logit(x) * (ub - lb) + lb} #' #' @param x A numeric or complex vector. #' @param lb Lower bound defaulting to \code{0}. #' @param ub Upper bound defaulting to \code{1}. #' #' @return A numeric or complex vector between \code{lb} and \code{ub}. #' #' @export inv_logit_scaled <- function(x, lb = 0, ub = 1) { inv_logit(x) * (ub - lb) + lb } multiply_log <- function(x, y) { ifelse(x == y & x == 0, 0, x * log(y)) } log1p_exp <- function(x) { log(1 + exp(x)) } log1m_exp <- function(x) { ifelse(x < 0, log(1 - exp(x)), NaN) } log_diff_exp <- function(x, y) { stopifnot(length(x) == length(y)) ifelse(x > y, log(exp(x) - exp(y)), NaN) } log_sum_exp <- function(x, y) { max <- pmax(x, y) max + log(exp(x - max) + exp(y - max)) } log_mean_exp <- function(x) { max_x <- max(x) max_x + log(sum(exp(x - max_x))) - log(length(x)) } expm1 <- function(x) { exp(x) - 1 } log_expm1 <- function(x) { log(expm1(x)) } log_inv_logit <- function(x) { log(inv_logit(x)) } log1m_inv_logit <- function(x) { log(1 - inv_logit(x)) } scale_unit <- function(x, lb = min(x), ub = max(x)) { (x - lb) / (ub - lb) } fabs <- function(x) { abs(x) } softmax <- function(x) { ndim <- length(dim(x)) if (ndim <= 1) { x <- matrix(x, nrow = 1) ndim <- length(dim(x)) } x <- exp(x) dim_noncat <- dim(x)[-ndim] marg_noncat <- seq_along(dim(x))[-ndim] catsum <- array(apply(x, marg_noncat, sum), dim = dim_noncat) sweep(x, marg_noncat, catsum, "/") } log_softmax <- function(x) { ndim <- length(dim(x)) if (ndim <= 1) { x <- matrix(x, nrow = 1) ndim <- length(dim(x)) } dim_noncat <- dim(x)[-ndim] marg_noncat <- seq_along(dim(x))[-ndim] catsum <- log(array(apply(exp(x), marg_noncat, sum), dim = dim_noncat)) sweep(x, marg_noncat, catsum, "-") } inv_odds <- function(x) { x / (1 + x) } brms/R/family-lists.R0000644000175000017500000004032414111751666014323 0ustar nileshnilesh# This file contains a list for every native family. # These lists may contain the following elements: # links: possible link function (first is default) # dpars: distributional parameters of the family # type: either real or int (i.e. continuous or discrete) # ybounds: area of definition of the response values # closed: is the interval closed or open? # ad: supported addition arguments # include: names of user-defined Stan functions # to be included in the Stan code # normalized: suffixes of Stan lpdfs or lpmfs which only exist as normalized # versions; can also be "" in which case the family is always normalized # specials: character vector specialties of some families .family_gaussian <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index"), normalized = c("_time_hom", "_time_het", "_lagsar", "_errorsar", "_fcor"), specials = c("residuals", "rescor") ) } .family_student <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "nu"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index"), include = "fun_logm1.stan", normalized = c("_time_hom", "_time_het", "_lagsar", "_errorsar", "_fcor"), specials = c("residuals", "rescor") ) } .family_skew_normal <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "alpha"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "se", "cens", "trunc", "mi", "index") ) } .family_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index"), specials = "sbi_logit" ) } .family_bernoulli <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu"), type = "int", ybounds = c(0, 1), closed = c(TRUE, TRUE), ad = c("weights", "subset", "index"), specials = c("binary", "sbi_logit") ) } .family_categorical <- function() { list( links = "logit", dpars = NULL, # is determined based on the data type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "index"), specials = c("categorical", "joint_link", "sbi_logit") ) } .family_multinomial <- function() { list( links = "logit", dpars = NULL, # is determined based on the data type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "trials", "index"), specials = c("multinomial", "joint_link"), include = "fun_multinomial_logit.stan", normalized = "" ) } .family_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu", "phi"), type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_dirichlet <- function() { list( links = "logit", dpars = "phi", # more dpars are determined based on the data type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "index"), specials = c("dirichlet", "joint_link"), include = "fun_dirichlet_logit.stan", normalized = "" ) } .family_dirichlet2 <- function() { list( links = c("log", "softplus", "squareplus", "identity", "logm1"), dpars = NULL, # is determind based on the data type = "real", ybounds = c(0, 1), closed = c(FALSE, FALSE), ad = c("weights", "subset", "index"), specials = c("dirichlet"), include = "fun_logm1.stan", normalized = "" ) } .family_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } # as negbinomial but with sigma = 1 / shape parameterization .family_negbinomial2 <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "sigma"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_geometric <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "rate", "index"), specials = "sbi_log" ) } .family_discrete_weibull <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_discrete_weibull.stan" ) } .family_com_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_com_poisson.stan", specials = "sbi_log" ) } .family_gamma <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "transeta" # see stan_eta_ilink() ) } .family_weibull <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "transeta" # see stan_eta_ilink() ) } .family_exponential <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = "mu", type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "transeta" # see stan_eta_ilink() ) } .family_frechet <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "nu"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_logm1.stan", specials = "transeta" # see stan_eta_ilink() ) } .family_inverse.gaussian <- function() { list( links = c("1/mu^2", "inverse", "identity", "log", "softplus", "squareplus"), dpars = c("mu", "shape"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_inv_gaussian.stan" ) } .family_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "logscale" ) } .family_shifted_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma", "ndt"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), specials = "logscale" ) } .family_exgaussian <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "beta"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index") ) } .family_wiener <- function() { list( links = c("identity", "log", "softplus", "squareplus"), dpars = c("mu", "bs", "ndt", "bias"), type = "real", ybounds = c(0, Inf), closed = c(FALSE, NA), ad = c("weights", "subset", "dec", "index"), include = "fun_wiener_diffusion.stan", normalized = "" ) } .family_gen_extreme_value <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "xi"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = c("fun_gen_extreme_value.stan", "fun_scale_xi.stan"), normalized = "" ) } .family_von_mises <- function() { list( links = c("tan_half", "identity"), dpars = c("mu", "kappa"), type = "real", ybounds = c(-pi, pi), closed = c(TRUE, TRUE), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = c("fun_tan_half.stan", "fun_von_mises.stan"), normalized = "" ) } .family_asym_laplace <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "quantile"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "mi", "index"), include = "fun_asym_laplace.stan", normalized = "" ) } .family_zero_inflated_asym_laplace <- function() { list( links = c("identity", "log", "inverse", "softplus", "squareplus"), dpars = c("mu", "sigma", "quantile", "zi"), type = "real", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = c("fun_asym_laplace.stan", "fun_zero_inflated_asym_laplace.stan") ) } .family_cox <- function() { list( links = c("log", "identity", "softplus", "squareplus"), dpars = c("mu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_cox.stan", specials = c("cox", "sbi_log", "sbi_log_cdf"), normalized = "" ) } .family_cumulative <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c( "ordinal", "ordered_thres", "thres_minus_eta", "joint_link", "ocs", "sbi_logit" ), normalized = "" ) } .family_sratio <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "thres_minus_eta", "joint_link"), normalized = "" ) } .family_cratio <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "eta_minus_thres", "joint_link"), normalized = "" ) } .family_acat <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit" ), dpars = c("mu", "disc"), type = "int", ybounds = c(-Inf, Inf), closed = c(NA, NA), ad = c("weights", "subset", "thres", "cat", "index"), specials = c("ordinal", "cs", "eta_minus_thres", "joint_link"), normalized = "" ) } .family_hurdle_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "hu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_poisson.stan", specials = c("sbi_log", "sbi_hu_logit"), normalized = "" ) } .family_hurdle_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape", "hu"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_negbinomial.stan", specials = c("sbi_log", "sbi_hu_logit"), normalized = "" ) } .family_hurdle_gamma <- function() { list( links = c("log", "identity", "inverse", "softplus", "squareplus"), dpars = c("mu", "shape", "hu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_gamma.stan", specials = "sbi_hu_logit", normalized = "" ) } .family_hurdle_lognormal <- function() { list( links = c("identity", "inverse"), dpars = c("mu", "sigma", "hu"), type = "real", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_hurdle_lognormal.stan", specials = c("logscale", "sbi_hu_logit"), normalized = "" ) } .family_zero_inflated_poisson <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_poisson.stan", specials = c("sbi_log", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_negbinomial <- function() { list( links = c("log", "identity", "sqrt", "softplus", "squareplus"), dpars = c("mu", "shape", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_negbinomial.stan", specials = c("sbi_log", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_binomial <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu", "zi"), type = "int", ybounds = c(0, Inf), closed = c(TRUE, NA), ad = c("weights", "subset", "trials", "cens", "trunc", "index"), include = "fun_zero_inflated_binomial.stan", specials = c("sbi_logit", "sbi_zi_logit"), normalized = "" ) } .family_zero_inflated_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu", "phi", "zi"), type = "real", ybounds = c(0, 1), closed = c(TRUE, FALSE), ad = c("weights", "subset", "cens", "trunc", "index"), include = "fun_zero_inflated_beta.stan", specials = "sbi_zi_logit", normalized = "" ) } .family_zero_one_inflated_beta <- function() { list( links = c( "logit", "probit", "probit_approx", "cloglog", "cauchit", "identity" ), dpars = c("mu", "phi", "zoi", "coi"), type = "real", ybounds = c(0, 1), closed = c(TRUE, TRUE), ad = c("weights", "subset", "index"), include = "fun_zero_one_inflated_beta.stan", specials = "sbi_zi_logit", normalized = "" ) } .family_custom <- function() { list( ad = c("weights", "subset", "se", "cens", "trunc", "trials", "thres", "cat", "dec", "mi", "index", "vreal", "vint"), ybounds = c(-Inf, Inf), closed = c(NA, NA) ) } brms/R/diagnostics.R0000644000175000017500000000562214111751666014217 0ustar nileshnilesh#' Extract Diagnostic Quantities of \pkg{brms} Models #' #' Extract quantities that can be used to diagnose sampling behavior #' of the algorithms applied by \pkg{Stan} at the back-end of \pkg{brms}. #' #' @name diagnostic-quantities #' @aliases log_posterior nuts_params rhat neff_ratio #' #' @param object A \code{brmsfit} object. #' @param pars An optional character vector of parameter names. #' For \code{nuts_params} these will be NUTS sampler parameter #' names rather than model parameters. If pars is omitted #' all parameters are included. #' @param ... Arguments passed to individual methods. #' #' @return The exact form of the output depends on the method. #' #' @details For more details see #' \code{\link[bayesplot:bayesplot-extractors]{bayesplot-extractors}}. #' #' @examples #' \dontrun{ #' fit <- brm(time ~ age * sex, data = kidney) #' #' lp <- log_posterior(fit) #' head(lp) #' #' np <- nuts_params(fit) #' str(np) #' # extract the number of divergence transitions #' sum(subset(np, Parameter == "divergent__")$Value) #' #' head(rhat(fit)) #' head(neff_ratio(fit)) #' } NULL #' @rdname diagnostic-quantities #' @importFrom bayesplot log_posterior #' @export log_posterior #' @export log_posterior.brmsfit <- function(object, ...) { contains_draws(object) bayesplot::log_posterior(object$fit, ...) } #' @rdname diagnostic-quantities #' @importFrom bayesplot nuts_params #' @export nuts_params #' @export nuts_params.brmsfit <- function(object, pars = NULL, ...) { contains_draws(object) bayesplot::nuts_params(object$fit, pars = pars, ...) } #' @rdname diagnostic-quantities #' @importFrom bayesplot rhat #' @export rhat #' @export rhat.brmsfit <- function(object, pars = NULL, ...) { contains_draws(object) bayesplot::rhat(object$fit, pars = pars, ...) } #' @rdname diagnostic-quantities #' @importFrom bayesplot neff_ratio #' @export neff_ratio #' @export neff_ratio.brmsfit <- function(object, pars = NULL, ...) { contains_draws(object) bayesplot::neff_ratio(object$fit, pars = pars, ...) } #' Extract Control Parameters of the NUTS Sampler #' #' Extract control parameters of the NUTS sampler such as #' \code{adapt_delta} or \code{max_treedepth}. #' #' @param x An \R object #' @param pars Optional names of the control parameters to be returned. #' If \code{NULL} (the default) all control parameters are returned. #' See \code{\link[rstan:stan]{stan}} for more details. #' @param ... Currently ignored. #' #' @return A named \code{list} with control parameter values. #' #' @export control_params <- function(x, ...) { UseMethod("control_params") } #' @rdname control_params #' @export control_params.brmsfit <- function(x, pars = NULL, ...) { contains_draws(x) out <- attr(x$fit@sim$samples[[1]], "args")$control if (!is.null(pars)) { out <- out[pars] } out } brms/R/loo_subsample.R0000644000175000017500000000512014111751666014545 0ustar nileshnilesh#' Efficient approximate leave-one-out cross-validation (LOO) using subsampling #' #' @aliases loo_subsample #' #' @inheritParams loo.brmsfit #' #' @details More details can be found on #' \code{\link[loo:loo_subsample]{loo_subsample}}. #' #' @examples #' \dontrun{ #' # model with population-level effects only #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler) #' (loo1 <- loo_subsample(fit1)) #' #' # model with an additional varying intercept for subjects #' fit2 <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler) #' (loo2 <- loo_subsample(fit2)) #' #' # compare both models #' loo_compare(loo1, loo2) #' } #' #' @importFrom loo loo_subsample #' @export loo_subsample #' @export loo_subsample.brmsfit <- function(x, ..., compare = TRUE, resp = NULL, model_names = NULL) { args <- split_dots(x, ..., model_names = model_names) c(args) <- nlist( criterion = "loo_subsample", compare, resp, add_point_estimate = TRUE ) do_call(compute_loolist, args) } # compute 'loo_subsample' criterion using the 'loo' package # @param model_name ignored but included to avoid being passed to '...' .loo_subsample <- function(x, newdata, resp, model_name, ...) { loo_args <- prepare_loo_args( x, newdata = newdata, resp = resp, pointwise = TRUE, ... ) do_call("loo_subsample", loo_args, pkg = "loo") } # methods required in loo_subsample #' @importFrom loo .ndraws #' @export .ndraws.brmsprep <- function(x) { x$ndraws } #' @export .ndraws.mvbrmsprep <- function(x) { x$ndraws } #' @importFrom loo .thin_draws #' @export .thin_draws.brmsprep <- function(draws, loo_approximation_draws) { # brmsprep objects are too complex to implement a post-hoc subsetting method if (length(loo_approximation_draws)) { stop2("'loo_approximation_draws' is not supported for brmsfit objects.") } draws } #' @export .thin_draws.mvbrmsprep <- function(draws, loo_approximation_draws) { if (length(loo_approximation_draws)) { stop2("'loo_approximation_draws' is not supported for brmsfit objects.") } draws } #' @importFrom loo .compute_point_estimate #' @export .compute_point_estimate.brmsprep <- function(draws) { # point estimates are stored in the form of an attribute rather # than computed on the fly due to the complexity of brmsprep objects attr(draws, "point_estimate") } #' @export .compute_point_estimate.mvbrmsprep <- function(draws) { attr(draws, "point_estimate") } brms/R/bayes_R2.R0000644000175000017500000000661114111751665013354 0ustar nileshnilesh#' Compute a Bayesian version of R-squared for regression models #' #' @aliases bayes_R2 #' #' @inheritParams predict.brmsfit #' @param ... Further arguments passed to #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}}, #' which is used in the computation of the R-squared values. #' #' @return If \code{summary = TRUE}, an M x C matrix is returned #' (M = number of response variables and c = \code{length(probs) + 2}) #' containing summary statistics of the Bayesian R-squared values. #' If \code{summary = FALSE}, the posterior draws of the Bayesian #' R-squared values are returned in an S x M matrix (S is the number of draws). #' #' @details For an introduction to the approach, see Gelman et al. (2018) #' and \url{https://github.com/jgabry/bayes_R2/}. #' #' @references Andrew Gelman, Ben Goodrich, Jonah Gabry & Aki Vehtari. (2018). #' R-squared for Bayesian regression models, \emph{The American Statistician}. #' \code{10.1080/00031305.2018.1549100} (Preprint available at #' \url{https://stat.columbia.edu/~gelman/research/published/bayes_R2_v3.pdf}) #' #' @examples #' \dontrun{ #' fit <- brm(mpg ~ wt + cyl, data = mtcars) #' summary(fit) #' bayes_R2(fit) #' #' # compute R2 with new data #' nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) #' bayes_R2(fit, newdata = nd) #' } #' #' @method bayes_R2 brmsfit #' @importFrom rstantools bayes_R2 #' @export bayes_R2 #' @export bayes_R2.brmsfit <- function(object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) resp <- validate_resp(resp, object) summary <- as_one_logical(summary) # check for precomputed values R2 <- get_criterion(object, "bayes_R2") if (is.matrix(R2)) { # assumes unsummarized 'R2' as ensured by 'add_criterion' take <- colnames(R2) %in% paste0("R2", resp) R2 <- R2[, take, drop = FALSE] if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } return(R2) } family <- family(object, resp = resp) if (conv_cats_dpars(family)) { stop2("'bayes_R2' is not defined for unordered categorical models.") } if (is_ordinal(family)) { warning2( "Predictions are treated as continuous variables in ", "'bayes_R2' which is likely invalid for ordinal families." ) } args_y <- list(object, warn = TRUE, ...) args_ypred <- list(object, sort = TRUE, ...) R2 <- named_list(paste0("R2", resp)) for (i in seq_along(R2)) { # assumes expectations of different responses to be independent args_ypred$resp <- args_y$resp <- resp[i] y <- do_call(get_y, args_y) ypred <- do_call(posterior_epred, args_ypred) if (is_ordinal(family(object, resp = resp[i]))) { ypred <- ordinal_probs_continuous(ypred) } R2[[i]] <- .bayes_R2(y, ypred) } R2 <- do_call(cbind, R2) colnames(R2) <- paste0("R2", resp) if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } R2 } # internal function of bayes_R2.brmsfit # see https://github.com/jgabry/bayes_R2/blob/master/bayes_R2.pdf .bayes_R2 <- function(y, ypred, ...) { e <- -1 * sweep(ypred, 2, y) var_ypred <- matrixStats::rowVars(ypred) var_e <- matrixStats::rowVars(e) as.matrix(var_ypred / (var_ypred + var_e)) } brms/R/projpred.R0000644000175000017500000001645414141006064013526 0ustar nileshnilesh#' Projection Predictive Variable Selection: Get Reference Model #' #' The \code{get_refmodel.brmsfit} method can be used to create the reference #' model structure which is needed by the \pkg{projpred} package for performing #' a projection predictive variable selection. This method is called #' automatically when performing variable selection via #' \code{\link[projpred:varsel]{varsel}} or #' \code{\link[projpred:cv_varsel]{cv_varsel}}, so you will rarely need to call #' it manually yourself. #' #' @inheritParams posterior_predict.brmsfit #' @param cvfun Optional cross-validation function #' (see \code{\link[projpred:get-refmodel]{get_refmodel}} for details). #' If \code{NULL} (the default), \code{cvfun} is defined internally #' based on \code{\link{kfold.brmsfit}}. #' @param ... Further arguments passed to #' \code{\link[projpred:get-refmodel]{init_refmodel}}. #' #' @details Note that the \code{extract_model_data} function used internally by #' \code{get_refmodel.brmsfit} ignores arguments \code{wrhs}, \code{orhs}, and #' \code{extract_y}. This is relevant for #' \code{\link[projpred:predict.refmodel]{predict.refmodel}}, for example. #' #' @return A \code{refmodel} object to be used in conjunction with the #' \pkg{projpred} package. #' #' @examples #' \dontrun{ #' # fit a simple model #' fit <- brm(count ~ zAge + zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit) #' #' # The following code requires the 'projpred' package to be installed: #' library(projpred) #' #' # perform variable selection without cross-validation #' vs <- varsel(fit) #' summary(vs) #' plot(vs) #' #' # perform variable selection with cross-validation #' cv_vs <- cv_varsel(fit) #' summary(cv_vs) #' plot(cv_vs) #' } get_refmodel.brmsfit <- function(object, newdata = NULL, resp = NULL, cvfun = NULL, ...) { require_package("projpred") dots <- list(...) resp <- validate_resp(resp, object, multiple = FALSE) formula <- formula(object) if (!is.null(resp)) { formula <- formula$forms[[resp]] } # prepare the family object for use in projpred family <- family(object, resp = resp) if (family$family == "bernoulli") { family$family <- "binomial" } else if (family$family == "gamma") { family$family <- "Gamma" } # For the augmented-data approach, do not re-define ordinal or categorical # families to preserve their family-specific extra arguments ("extra" meaning # "additionally to `link`") like `refcat` and `thresholds` (see ?brmsfamily): if (!(isTRUE(dots$aug_data) && is_polytomous(family))) { family <- get(family$family, mode = "function")(link = family$link) } else { # TODO: uncomment the lines below as soon as the # `extend_family_` exist (in brms): # family <- get(paste0("extend_family_", family$family, mode = "function"))( # family # ) } # check if the model is supported by projpred bterms <- brmsterms(formula) if (length(bterms$dpars) > 1L && !conv_cats_dpars(family$family)) { stop2("Projpred does not support distributional models.") } if (length(bterms$nlpars) > 0L) { stop2("Projpred does not support non-linear models.") } not_ok_term_types <- setdiff(all_term_types(), c("fe", "re", "offset", "sm")) if (any(not_ok_term_types %in% names(bterms$dpars$mu))) { stop2("Projpred only supports standard multilevel terms and offsets.") } # only use the raw formula for selection of terms formula <- formula$formula # LHS should only contain the response variable formula[[2]] <- bterms$respform[[2]] # projpred requires the dispersion parameter if present dis <- NULL if (family$family == "gaussian") { dis <- paste0("sigma", usc(resp)) dis <- as.data.frame(object, variable = dis)[[dis]] } else if (family$family == "Gamma") { dis <- paste0("shape", usc(resp)) dis <- as.data.frame(object, variable = dis)[[dis]] } # prepare data passed to projpred data <- current_data(object, newdata, resp = resp, check_response = TRUE) attr(data, "terms") <- NULL # allows to handle additional arguments implicitly extract_model_data <- function(object, newdata = NULL, ...) { .extract_model_data(object, newdata = newdata, resp = resp, ...) } # Using the default prediction function from projpred is usually fine ref_predfun <- NULL if (isTRUE(dots$aug_data) && is_ordinal(family$family)) { stop2("This case is not yet supported.") # Use argument `incl_thres` of posterior_linpred() (and convert the # 3-dimensional array to an "augmented-rows" matrix) # TODO: uncomment the lines below as soon as arr2augmat() is exported # ref_predfun <- function(fit, newdata = NULL) { # # Note: `transform = FALSE` is not needed, but included here for # # consistency with projpred's default ref_predfun(): # linpred_out <- posterior_linpred( # fit, transform = FALSE, newdata = newdata, incl_thres = TRUE # ) # stopifnot(length(dim(linpred_out)) == 3L) # # Since posterior_linpred() is supposed to include the offsets in its # # result, subtract them here: # # Observation weights are not needed here, so use `wrhs = NULL` to avoid # # potential conflicts for a non-`NULL` default `wrhs`: # offs <- extract_model_data(fit, newdata = newdata, wrhs = NULL)$offset # if (length(offs)) { # stopifnot(length(offs) %in% c(1L, dim(linpred_out)[2])) # linpred_out <- sweep(linpred_out, 2, offs) # } # linpred_out <- projpred:::arr2augmat(linpred_out, margin_draws = 1) # return(linpred_out) # } } # extract a list of K-fold sub-models if (is.null(cvfun)) { cvfun <- function(folds, ...) { cvres <- kfold( object, K = max(folds), save_fits = TRUE, folds = folds, ... ) fits <- cvres$fits[, "fit"] return(fits) } } else { if (!is.function(cvfun)) { stop2("'cvfun' should be a function.") } } args <- nlist( object, data, formula, family, dis, ref_predfun = ref_predfun, cvfun = cvfun, extract_model_data = extract_model_data, ... ) do_call(projpred::init_refmodel, args) } # auxiliary data required in predictions via projpred # @return a named list with slots 'weights' and 'offset' .extract_model_data <- function(object, newdata = NULL, resp = NULL, ...) { stopifnot(is.brmsfit(object)) resp <- validate_resp(resp, object, multiple = FALSE) family <- family(object, resp = resp) # call standata to ensure the correct format of the data args <- nlist( object, newdata, resp, check_response = TRUE, internal = TRUE ) sdata <- do_call(standata, args) # extract relevant auxiliary data usc_resp <- usc(resp) y <- as.vector(sdata[[paste0("Y", usc_resp)]]) offset <- as.vector(sdata[[paste0("offsets", usc_resp)]]) weights <- as.vector(sdata[[paste0("weights", usc_resp)]]) trials <- as.vector(sdata[[paste0("trials", usc_resp)]]) stopifnot(!is.null(y)) if (is_binary(family)) { trials <- rep(1, length(y)) } if (!is.null(trials)) { if (!is.null(weights)) { stop2("Projpred cannot handle 'trials' and 'weights' at the same time.") } weights <- trials } if (is.null(weights)) { weights <- rep(1, length(y)) } if (is.null(offset)) { offset <- rep(0, length(y)) } nlist(y, weights, offset) } brms/R/brmsfit-class.R0000644000175000017500000000764514111751665014467 0ustar nileshnilesh#' Class \code{brmsfit} of models fitted with the \pkg{brms} package #' #' Models fitted with the \code{\link[brms:brms-package]{brms}} package are #' represented as a \code{brmsfit} object, which contains the posterior #' draws (samples), model formula, Stan code, relevant data, and other information. #' #' @name brmsfit-class #' @aliases brmsfit #' @docType class #' #' @details #' See \code{methods(class = "brmsfit")} for an overview of available methods. #' #' @slot formula A \code{\link{brmsformula}} object. #' @slot data A \code{data.frame} containing all variables used in the model. #' @slot data2 A \code{list} of data objects which cannot be passed #' via \code{data}. #' @slot prior A \code{\link{brmsprior}} object containing #' information on the priors used in the model. #' @slot stanvars A \code{\link{stanvars}} object. #' @slot model The model code in \pkg{Stan} language. #' @slot ranef A \code{data.frame} containing the group-level structure. #' @slot exclude The names of the parameters for which draws are not saved. #' @slot algorithm The name of the algorithm used to fit the model. #' @slot backend The name of the backend used to fit the model. #' @slot threads An object of class `brmsthreads` created by #' \code{\link{threading}}. #' @slot opencl An object of class `brmsopencl` created by \code{\link{opencl}}. #' @slot fit An object of class \code{\link[rstan:stanfit-class]{stanfit}} #' among others containing the posterior draws. #' @slot criteria An empty \code{list} for adding model fit criteria #' after estimation of the model. #' @slot file Optional name of a file in which the model object was stored in #' or loaded from. #' @slot version The versions of \pkg{brms} and \pkg{rstan} with #' which the model was fitted. #' @slot family (Deprecated) A \code{\link{brmsfamily}} object. #' @slot autocor (Deprecated) An \code{\link{cor_brms}} object containing #' the autocorrelation structure if specified. #' @slot cov_ranef (Deprecated) A \code{list} of customized group-level #' covariance matrices. #' @slot stan_funs (Deprecated) A character string of length one or \code{NULL}. #' @slot data.name (Deprecated) The name of \code{data} as specified by the user. #' #' @seealso #' \code{\link{brms}}, #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{brmsfamily}} #' NULL # brmsfit class brmsfit <- function(formula = NULL, data = data.frame(), prior = empty_prior(), data2 = list(), stanvars = NULL, model = "", ranef = empty_ranef(), save_pars = NULL, algorithm = "sampling", backend = "rstan", threads = threading(), opencl = opencl(), fit = NULL, criteria = list(), file = NULL, family = NULL, autocor = NULL, cov_ranef = NULL, stan_funs = NULL, data.name = "") { version <- list( brms = utils::packageVersion("brms"), rstan = utils::packageVersion("rstan"), stanHeaders = utils::packageVersion("StanHeaders") ) if (backend == "cmdstanr") { require_package("cmdstanr") version$cmdstanr <- utils::packageVersion("cmdstanr") version$cmdstan <- as.package_version(cmdstanr::cmdstan_version()) } x <- nlist( formula, data, prior, data2, stanvars, model, ranef, save_pars, algorithm, backend, threads, opencl, fit, criteria, file, version, family, autocor, cov_ranef, stan_funs, data.name ) class(x) <- "brmsfit" x } #' Checks if argument is a \code{brmsfit} object #' #' @param x An \R object #' #' @export is.brmsfit <- function(x) { inherits(x, "brmsfit") } #' Checks if argument is a \code{brmsfit_multiple} object #' #' @param x An \R object #' #' @export is.brmsfit_multiple <- function(x) { inherits(x, "brmsfit_multiple") } is.stanfit <- function(x) { inherits(x, "stanfit") } brms/R/model_weights.R0000644000175000017500000003013314111751666014535 0ustar nileshnilesh#' Model Weighting Methods #' #' Compute model weights in various ways, for instance, via #' stacking of posterior predictive distributions, Akaike weights, #' or marginal likelihoods. #' #' @inheritParams loo.brmsfit #' @param weights Name of the criterion to compute weights from. Should be one #' of \code{"loo"}, \code{"waic"}, \code{"kfold"}, \code{"stacking"} (current #' default), or \code{"bma"}, \code{"pseudobma"}, For the former three #' options, Akaike weights will be computed based on the information criterion #' values returned by the respective methods. For \code{"stacking"} and #' \code{"pseudobma"}, method \code{\link{loo_model_weights}} will be used to #' obtain weights. For \code{"bma"}, method \code{\link{post_prob}} will be #' used to compute Bayesian model averaging weights based on log marginal #' likelihood values (make sure to specify reasonable priors in this case). #' For some methods, \code{weights} may also be a numeric vector of #' pre-specified weights. #' #' @return A numeric vector of weights for the models. #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # obtain Akaike weights based on the WAIC #' model_weights(fit1, fit2, weights = "waic") #' } #' #' @export model_weights.brmsfit <- function(x, ..., weights = "stacking", model_names = NULL) { weights <- validate_weights_method(weights) args <- split_dots(x, ..., model_names = model_names) models <- args$models args$models <- NULL model_names <- names(models) if (weights %in% c("loo", "waic", "kfold")) { # Akaike weights based on information criteria ics <- rep(NA, length(models)) for (i in seq_along(ics)) { args$x <- models[[i]] args$model_names <- names(models)[i] ics[i] <- SW(do_call(weights, args))$estimates[3, 1] } ic_diffs <- ics - min(ics) out <- exp(-ic_diffs / 2) } else if (weights %in% c("stacking", "pseudobma")) { args <- c(unname(models), args) args$method <- weights out <- do_call("loo_model_weights", args) } else if (weights %in% "bma") { args <- c(unname(models), args) out <- do_call("post_prob", args) } out <- as.numeric(out) out <- out / sum(out) names(out) <- model_names out } #' @rdname model_weights.brmsfit #' @export model_weights <- function(x, ...) { UseMethod("model_weights") } # validate name of the applied weighting method validate_weights_method <- function(method) { method <- as_one_character(method) method <- tolower(method) if (method == "loo2") { warning2("Weight method 'loo2' is deprecated. Use 'stacking' instead.") method <- "stacking" } if (method == "marglik") { warning2("Weight method 'marglik' is deprecated. Use 'bma' instead.") method <- "bma" } options <- c("loo", "waic", "kfold", "stacking", "pseudobma", "bma") match.arg(method, options) } #' Posterior predictive draws averaged across models #' #' Compute posterior predictive draws averaged across models. #' Weighting can be done in various ways, for instance using #' Akaike weights based on information criteria or #' marginal likelihoods. #' #' @inheritParams model_weights.brmsfit #' @param method Method used to obtain predictions to average over. Should be #' one of \code{"posterior_predict"} (default), \code{"posterior_epred"}, #' \code{"posterior_linpred"} or \code{"predictive_error"}. #' @param control Optional \code{list} of further arguments #' passed to the function specified in \code{weights}. #' @param ndraws Total number of posterior draws to use. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param seed A single numeric value passed to \code{\link{set.seed}} #' to make results reproducible. #' @param summary Should summary statistics #' (i.e. means, sds, and 95\% intervals) be returned #' instead of the raw values? Default is \code{TRUE}. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return Same as the output of the method specified #' in argument \code{method}. #' #' @details Weights are computed with the \code{\link{model_weights}} method. #' #' @seealso \code{\link{model_weights}}, \code{\link{posterior_average}} #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # compute model-averaged predicted values #' (df <- unique(inhaler[, c("treat", "period", "carry")])) #' pp_average(fit1, fit2, newdata = df) #' #' # compute model-averaged fitted values #' pp_average(fit1, fit2, method = "fitted", newdata = df) #' } #' #' @export pp_average.brmsfit <- function( x, ..., weights = "stacking", method = "posterior_predict", ndraws = NULL, nsamples = NULL, summary = TRUE, probs = c(0.025, 0.975), robust = FALSE, model_names = NULL, control = list(), seed = NULL ) { if (!is.null(seed)) { set.seed(seed) } method <- validate_pp_method(method) ndraws <- use_alias(ndraws, nsamples) if (any(c("draw_ids", "subset") %in% names(list(...)))) { stop2("Cannot use argument 'draw_ids' in pp_average.") } args <- split_dots(x, ..., model_names = model_names) args$summary <- FALSE models <- args$models args$models <- NULL if (!match_response(models)) { stop2("Can only average models predicting the same response.") } if (is.null(ndraws)) { ndraws <- ndraws(models[[1]]) } ndraws <- as_one_integer(ndraws) weights <- validate_weights(weights, models, control) ndraws <- round_largest_remainder(weights * ndraws) names(weights) <- names(ndraws) <- names(models) out <- named_list(names(models)) for (i in seq_along(out)) { if (ndraws[i] > 0) { args$object <- models[[i]] args$ndraws <- ndraws[i] out[[i]] <- do_call(method, args) } } out <- do_call(rbind, out) if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) } attr(out, "weights") <- weights attr(out, "ndraws") <- ndraws out } #' @rdname pp_average.brmsfit #' @export pp_average <- function(x, ...) { UseMethod("pp_average") } # validate weights passed to model averaging functions # see pp_average.brmsfit for more documentation validate_weights <- function(weights, models, control = list()) { if (!is.numeric(weights)) { weight_args <- c(unname(models), control) weight_args$weights <- weights weights <- do_call(model_weights, weight_args) } else { if (length(weights) != length(models)) { stop2("If numeric, 'weights' must have the same length ", "as the number of models.") } if (any(weights < 0)) { stop2("If numeric, 'weights' must be positive.") } } weights / sum(weights) } #' Posterior draws of parameters averaged across models #' #' Extract posterior draws of parameters averaged across models. #' Weighting can be done in various ways, for instance using #' Akaike weights based on information criteria or #' marginal likelihoods. #' #' @inheritParams pp_average.brmsfit #' @param variable Names of variables (parameters) for which to average across #' models. Only those variables can be averaged that appear in every model. #' Defaults to all overlapping variables. #' @param pars Deprecated alias of \code{variable}. #' @param missing An optional numeric value or a named list of numeric values #' to use if a model does not contain a variable for which posterior draws #' should be averaged. Defaults to \code{NULL}, in which case only those #' variables can be averaged that are present in all of the models. #' #' @return A \code{data.frame} of posterior draws. #' #' @details Weights are computed with the \code{\link{model_weights}} method. #' #' @seealso \code{\link{model_weights}}, \code{\link{pp_average}} #' #' @examples #' \dontrun{ #' # model with 'treat' as predictor #' fit1 <- brm(rating ~ treat + period + carry, data = inhaler) #' summary(fit1) #' #' # model without 'treat' as predictor #' fit2 <- brm(rating ~ period + carry, data = inhaler) #' summary(fit2) #' #' # compute model-averaged posteriors of overlapping parameters #' posterior_average(fit1, fit2, weights = "waic") #' } #' #' @export posterior_average.brmsfit <- function( x, ..., variable = NULL, pars = NULL, weights = "stacking", ndraws = NULL, nsamples = NULL, missing = NULL, model_names = NULL, control = list(), seed = NULL ) { if (!is.null(seed)) { set.seed(seed) } variable <- use_alias(variable, pars) ndraws <- use_alias(ndraws, nsamples) models <- split_dots(x, ..., model_names = model_names, other = FALSE) vars_list <- lapply(models, variables) all_vars <- unique(unlist(vars_list)) if (is.null(missing)) { common_vars <- lapply(vars_list, function(x) all_vars %in% x) common_vars <- all_vars[Reduce("&", common_vars)] if (is.null(variable)) { variable <- setdiff(common_vars, "lp__") } variable <- as.character(variable) inv_vars <- setdiff(variable, common_vars) if (length(inv_vars)) { inv_vars <- collapse_comma(inv_vars) stop2( "Parameters ", inv_vars, " cannot be found in all ", "of the models. Consider using argument 'missing'." ) } } else { if (is.null(variable)) { variable <- setdiff(all_vars, "lp__") } variable <- as.character(variable) inv_vars <- setdiff(variable, all_vars) if (length(inv_vars)) { inv_vars <- collapse_comma(inv_vars) stop2("Parameters ", inv_vars, " cannot be found in any of the models.") } if (is.list(missing)) { all_miss_vars <- unique(ulapply( models, function(m) setdiff(variable, variables(m)) )) inv_vars <- setdiff(all_miss_vars, names(missing)) if (length(inv_vars)) { stop2("Argument 'missing' has no value for parameters ", collapse_comma(inv_vars), ".") } missing <- lapply(missing, as_one_numeric, allow_na = TRUE) } else { missing <- as_one_numeric(missing, allow_na = TRUE) missing <- named_list(variable, missing) } } if (is.null(ndraws)) { ndraws <- ndraws(models[[1]]) } ndraws <- as_one_integer(ndraws) weights <- validate_weights(weights, models, control) ndraws <- round_largest_remainder(weights * ndraws) names(weights) <- names(ndraws) <- names(models) out <- named_list(names(models)) for (i in seq_along(out)) { if (ndraws[i] > 0) { draw <- sample(seq_len(ndraws(models[[i]])), ndraws[i]) draw <- sort(draw) found_vars <- intersect(variable, variables(models[[i]])) if (length(found_vars)) { out[[i]] <- as.data.frame( models[[i]], variable = found_vars, draw = draw ) } else { out[[i]] <- as.data.frame(matrix( numeric(0), nrow = ndraws[i], ncol = 0 )) } if (!is.null(missing)) { miss_vars <- setdiff(variable, names(out[[i]])) if (length(miss_vars)) { out[[i]][miss_vars] <- missing[miss_vars] } } } } out <- do_call(rbind, out) rownames(out) <- NULL attr(out, "weights") <- weights attr(out, "ndraws") <- ndraws out } #' @rdname posterior_average.brmsfit #' @export posterior_average <- function(x, ...) { UseMethod("posterior_average") } brms/R/predictive_error.R0000644000175000017500000001415414111751666015257 0ustar nileshnilesh#' Posterior Draws of Predictive Errors #' #' Compute posterior draws of predictive errors, that is, observed minus #' predicted responses. Can be performed for the data used to fit the model #' (posterior predictive checks) or for new data. #' #' @inheritParams posterior_predict.brmsfit #' #' @return An S x N \code{array} of predictive error draws, where S is the #' number of posterior draws and N is the number of observations. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, cores = 2) #' #' ## extract predictive errors #' pe <- predictive_error(fit) #' str(pe) #' } #' #' @aliases predictive_error #' @method predictive_error brmsfit #' @importFrom rstantools predictive_error #' @export #' @export predictive_error predictive_error.brmsfit <- function( object, newdata = NULL, re_formula = NULL, re.form = NULL, resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, ... ) { cl <- match.call() if ("re.form" %in% names(cl)) { re_formula <- re.form } .predictive_error( object, newdata = newdata, re_formula = re_formula, method = "posterior_predict", type = "ordinary", resp = resp, ndraws = ndraws, draw_ids = draw_ids, sort = sort, ... ) } #' Posterior Draws of Residuals/Predictive Errors #' #' This method is an alias of \code{\link{predictive_error.brmsfit}} #' with additional arguments for obtaining summaries of the computed draws. #' #' @inheritParams predictive_error.brmsfit #' @param method Method use to obtain predictions. Either #' \code{"posterior_epred"} (the default) or \code{"posterior_predict"}. #' Using \code{"posterior_predict"} is recommended #' but \code{"posterior_epred"} is the current default for #' reasons of backwards compatibility. #' @param type The type of the residuals, #' either \code{"ordinary"} or \code{"pearson"}. #' More information is provided under 'Details'. #' @param summary Should summary statistics be returned #' instead of the raw values? Default is \code{TRUE}.. #' @param robust If \code{FALSE} (the default) the mean is used as #' the measure of central tendency and the standard deviation as #' the measure of variability. If \code{TRUE}, the median and the #' median absolute deviation (MAD) are applied instead. #' Only used if \code{summary} is \code{TRUE}. #' @param probs The percentiles to be computed by the \code{quantile} #' function. Only used if \code{summary} is \code{TRUE}. #' #' @return An \code{array} of predictive error/residual draws. If #' \code{summary = FALSE} the output resembles those of #' \code{\link{predictive_error.brmsfit}}. If \code{summary = TRUE} the output #' is an N x E matrix, where N is the number of observations and E denotes #' the summary statistics computed from the draws. #' #' @details Residuals of type \code{'ordinary'} are of the form \eqn{R = Y - #' Yrep}, where \eqn{Y} is the observed and \eqn{Yrep} is the predicted response. #' Residuals of type \code{pearson} are of the form \eqn{R = (Y - Yrep) / #' SD(Yrep)}, where \eqn{SD(Yrep)} is an estimate of the standard deviation of #' \eqn{Yrep}. #' #' @examples #' \dontrun{ #' ## fit a model #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, cores = 2) #' #' ## extract residuals/predictive errors #' res <- residuals(fit) #' head(res) #' } #' #' @export residuals.brmsfit <- function(object, newdata = NULL, re_formula = NULL, method = "posterior_epred", type = c("ordinary", "pearson"), resp = NULL, ndraws = NULL, draw_ids = NULL, sort = FALSE, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { summary <- as_one_logical(summary) out <- .predictive_error( object, newdata = newdata, re_formula = re_formula, method = method, type = type, resp = resp, ndraws = ndraws, draw_ids = draw_ids, sort = sort, ... ) if (summary) { out <- posterior_summary(out, probs = probs, robust = robust) } out } # internal function doing the work for predictive_error.brmsfit .predictive_error <- function(object, newdata, re_formula, method, type, resp, ndraws, draw_ids, sort, nsamples = NULL, subset = NULL, ...) { contains_draws(object) object <- restructure(object) method <- validate_pp_method(method) type <- match.arg(type, c("ordinary", "pearson")) resp <- validate_resp(resp, object) family <- family(object, resp = resp) if (is_polytomous(family)) { stop2("Predictive errors are not defined for ordinal or categorical models.") } ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) draw_ids <- validate_draw_ids(object, draw_ids, ndraws) pred_args <- nlist( object, newdata, re_formula, resp, draw_ids, summary = FALSE, sort = sort, ... ) yrep <- do_call(method, pred_args) y <- get_y(object, resp, newdata = newdata, sort = sort, warn = TRUE, ...) if (length(dim(yrep)) == 3L) { # multivariate model y <- lapply(seq_cols(y), function(i) y[, i]) y <- lapply(y, data2draws, dim = dim(yrep)[1:2]) y <- abind(y, along = 3) dimnames(y)[[3]] <- dimnames(yrep)[[3]] } else { y <- data2draws(y, dim = dim(yrep)) } out <- y - yrep remove(y, yrep) if (type == "pearson") { # deprecated as of brms 2.10.6 warning2("Type 'pearson' is deprecated and will be removed in the future.") # get predicted standard deviation for each observation pred_args$summary <- TRUE pred <- do_call("predict", pred_args) if (length(dim(pred)) == 3L) { sd_pred <- array2list(pred[, 2, ]) sd_pred <- lapply(sd_pred, data2draws, dim = dim(out)[1:2]) sd_pred <- abind(sd_pred, along = 3) } else { sd_pred <- data2draws(pred[, 2], dim = dim(out)) } out <- out / sd_pred } out } brms/R/exclude_pars.R0000644000175000017500000001634714111751666014374 0ustar nileshnilesh# list parameters NOT to be saved by Stan # @return a vector of parameter names to be excluded exclude_pars <- function(x, ...) { UseMethod("exclude_pars") } #' @export exclude_pars.default <- function(x, ...) { character(0) } #' @export exclude_pars.brmsfit <- function(x, ...) { out <- character(0) save_pars <- x$save_pars bterms <- brmsterms(x$formula) c(out) <- exclude_pars(bterms, data = x$data, save_pars = save_pars, ...) meef <- tidy_meef(bterms, x$data) if (has_rows(meef)) { I <- seq_along(unique(meef$grname)) K <- seq_rows(meef) c(out) <- paste0(c("Corme_"), I) if (!save_pars$all) { c(out) <- c(paste0("zme_", K), paste0("Lme_", I)) } if (isFALSE(save_pars$latent)) { c(out) <- paste0("Xme_", K) } else if (is.character(save_pars$latent)) { sub_K <- K[!meef$xname %in% save_pars$latent] if (length(sub_K)) { c(out) <- paste0("Xme_", sub_K) } } } ranef <- x$ranef if (has_rows(ranef)) { rm_re_pars <- c(if (!save_pars$all) c("z", "L"), "Cor", "r") for (id in unique(ranef$id)) { c(out) <- paste0(rm_re_pars, "_", id) } if (isFALSE(save_pars$group)) { p <- usc(combine_prefix(ranef)) c(out) <- paste0("r_", ranef$id, p, "_", ranef$cn) } else if (is.character(save_pars$group)) { sub_ranef <- ranef[!ranef$group %in% save_pars$group, ] if (has_rows(sub_ranef)) { sub_p <- usc(combine_prefix(sub_ranef)) c(out) <- paste0("r_", sub_ranef$id, sub_p, "_", sub_ranef$cn) } } tranef <- get_dist_groups(ranef, "student") if (!save_pars$all && has_rows(tranef)) { c(out) <- paste0(c("udf_", "dfm_"), tranef$ggn) } } out <- unique(out) out <- setdiff(out, save_pars$manual) out } #' @export exclude_pars.mvbrmsterms <- function(x, save_pars, ...) { out <- c("Rescor", "Sigma") if (!save_pars$all) { c(out) <- c("Lrescor", "LSigma") } for (i in seq_along(x$terms)) { c(out) <- exclude_pars(x$terms[[i]], save_pars = save_pars, ...) } out } #' @export exclude_pars.brmsterms <- function(x, save_pars, ...) { out <- character(0) resp <- usc(combine_prefix(x)) if (!save_pars$all) { par_classes <- c("ordered_Intercept", "fixed_Intercept", "theta") c(out) <- paste0(par_classes, resp) } for (dp in names(x$dpars)) { c(out) <- exclude_pars(x$dpars[[dp]], save_pars = save_pars, ...) } for (nlp in names(x$nlpars)) { c(out) <- exclude_pars(x$nlpars[[nlp]], save_pars = save_pars, ...) } if (is.formula(x$adforms$mi)) { if (!(isTRUE(save_pars$latent) || x$resp %in% save_pars$latent)) { c(out) <- paste0("Yl", resp) } } if (!(isTRUE(save_pars$group) || ".err" %in% save_pars$group)) { # latent residuals are like group-level effects c(out) <- paste0("err", resp) } out } #' @export exclude_pars.btl <- function(x, data, save_pars, ...) { out <- character(0) p <- usc(combine_prefix(x)) c(out) <- paste0("chol_cor", p) if (!save_pars$all) { par_classes <- c( "bQ", "hs_global", "hs_local", "hs_slab", "zb", "hs_localsp", "R2D2_tau2", "zbsp", "Intercept", "first_Intercept", "merged_Intercept", "zcar", "nszcar", "zerr" ) c(out) <- paste0(par_classes, p) smef <- tidy_smef(x, data) for (i in seq_rows(smef)) { nb <- seq_len(smef$nbases[i]) c(out) <- paste0("zs", p, "_", i, "_", nb) } } out } #' Control Saving of Parameter Draws #' #' Control which (draws of) parameters should be saved in a \pkg{brms} #' model. The output of this function is ment for usage in the #' \code{save_pars} argument of \code{\link{brm}}. #' #' @param group A flag to indicate if group-level coefficients for #' each level of the grouping factors should be saved (default is #' \code{TRUE}). Set to \code{FALSE} to save memory. Alternatively, #' \code{group} may also be a character vector naming the grouping factors #' for which to save draws of coefficients. #' @param latent A flag to indicate if draws of latent variables obtained by #' using \code{me} and \code{mi} terms should be saved (default is #' \code{FALSE}). Saving these draws allows to better use methods such as #' \code{posterior_predict} with the latent variables but leads to very large #' \R objects even for models of moderate size and complexity. Alternatively, #' \code{latent} may also be a character vector naming the latent variables #' for which to save draws. #' @param all A flag to indicate if draws of all variables defined in Stan's #' \code{parameters} block should be saved (default is \code{FALSE}). Saving #' these draws is required in order to apply the certain methods such as #' \code{bridge_sampler} and \code{bayes_factor}. #' @param manual A character vector naming Stan variable names which should be #' saved. These names should match the variable names inside the Stan code #' before renaming. This feature is meant for power users only and will rarely #' be useful outside of very special cases. #' #' @return A list of class \code{"save_pars"}. #' #' @examples #' \dontrun{ #' # don't store group-level coefficients #' fit <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson(), #' save_pars = save_pars(group = FALSE)) #' variables(fit) #' } #' #' @export save_pars <- function(group = TRUE, latent = FALSE, all = FALSE, manual = NULL) { out <- list() if (is.logical(group)) { out$group <- as_one_logical(group) } else { out$group <- as.character(group) } if (is.logical(latent)) { out$latent <- as_one_logical(latent) } else { out$latent <- as.character(latent) } out$all <- as_one_logical(all) out$manual <- as.character(manual) class(out) <- "save_pars" out } # validate 'save_pars' argument # deprecated arguments: # @param save_ranef save varying effects per level? # @param save_mevars save noise-free variables? # @param save_all_pars save all variables from the 'parameters' block? # @return validated 'save_pars' argument validate_save_pars <- function(save_pars, save_ranef = NULL, save_mevars = NULL, save_all_pars = NULL) { if (is.null(save_pars)) { save_pars <- save_pars() } if (!is.save_pars(save_pars)) { stop2("Argument 'save_pars' needed to be created via 'save_pars()'.") } if (!is.null(save_ranef)) { warning2( "Argument 'save_ranef' is deprecated. Please use argument ", "'group' in function 'save_pars()' instead." ) save_pars$group <- as_one_logical(save_ranef) } if (!is.null(save_mevars)) { warning2( "Argument 'save_mevars' is deprecated. Please use argument ", "'latent' in function 'save_pars()' instead." ) save_pars$latent <- as_one_logical(save_mevars) } if (!is.null(save_all_pars)) { warning2( "Argument 'save_all_pars' is deprecated. Please use argument ", "'all' in function 'save_pars()' instead." ) save_pars$all <- as_one_logical(save_all_pars) } save_pars } is.save_pars <- function(x) { inherits(x, "save_pars") } brms/R/stan-likelihood.R0000644000175000017500000010617514146732674015011 0ustar nileshnilesh# unless otherwise specified, functions return a single character # string defining the likelihood of the model in Stan language stan_log_lik <- function(x, ...) { UseMethod("stan_log_lik") } # Stan code for the model likelihood # @param bterms object of class brmsterms # @param data data passed by the user # @param mix optional mixture component ID # @param ptheta are mixing proportions predicted? #' @export stan_log_lik.family <- function(x, bterms, data, threads, normalize, mix = "", ptheta = FALSE, ...) { stopifnot(is.brmsterms(bterms)) stopifnot(length(mix) == 1L) bterms$family <- x resp <- usc(combine_prefix(bterms)) # prepare family part of the likelihood log_lik_args <- nlist(bterms, resp, mix, threads) log_lik_fun <- paste0("stan_log_lik_", prepare_family(bterms)$fun) ll <- do_call(log_lik_fun, log_lik_args) # incorporate other parts into the likelihood args <- nlist(ll, bterms, data, resp, threads, normalize, mix, ptheta) if (nzchar(mix)) { out <- do_call(stan_log_lik_mix, args) } else if (is.formula(bterms$adforms$cens)) { out <- do_call(stan_log_lik_cens, args) } else if (is.formula(bterms$adforms$weights)) { out <- do_call(stan_log_lik_weights, args) } else { out <- do_call(stan_log_lik_general, args) } if (grepl(stan_nn_regex(), out) && !nzchar(mix)) { # loop over likelihood if it cannot be vectorized out <- paste0( " for (n in 1:N", resp, ") {\n", stan_nn_def(threads), " ", out, " }\n" ) } out } #' @export stan_log_lik.mixfamily <- function(x, bterms, threads, ...) { dp_ids <- dpar_id(names(bterms$dpars)) fdp_ids <- dpar_id(names(bterms$fdpars)) resp <- usc(bterms$resp) ptheta <- any(dpar_class(names(bterms$dpars)) %in% "theta") ll <- rep(NA, length(x$mix)) for (i in seq_along(x$mix)) { sbterms <- bterms sbterms$dpars <- sbterms$dpars[dp_ids == i] sbterms$fdpars <- sbterms$fdpars[fdp_ids == i] ll[i] <- stan_log_lik( x$mix[[i]], sbterms, mix = i, ptheta = ptheta, threads = threads, ... ) } resp <- usc(combine_prefix(bterms)) n <- stan_nn(threads) has_weights <- is.formula(bterms$adforms$weights) weights <- str_if(has_weights, glue("weights{resp}{n} * ")) out <- glue( " // likelihood of the mixture model\n", " for (n in 1:N{resp}) {{\n", stan_nn_def(threads), " real ps[{length(ll)}];\n" ) str_add(out) <- collapse(" ", ll) str_add(out) <- glue( " {tp()}{weights}log_sum_exp(ps);\n", " }}\n" ) out } #' @export stan_log_lik.brmsterms <- function(x, ...) { stan_log_lik(x$family, bterms = x, ...) } #' @export stan_log_lik.mvbrmsterms <- function(x, ...) { if (x$rescor) { out <- stan_log_lik(as.brmsterms(x), ...) } else { out <- ulapply(x$terms, stan_log_lik, ...) } out } # default likelihood in Stan language stan_log_lik_general <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) require_n <- grepl(stan_nn_regex(), ll$args) n <- str_if(require_n, stan_nn(threads), stan_slice(threads)) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) glue("{tp()}{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n") } # censored likelihood in Stan language stan_log_lik_cens <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) s <- wsp(nsp = 4) cens <- eval_rhs(bterms$adforms$cens) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) has_weights <- is.formula(bterms$adforms$weights) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) w <- str_if(has_weights, glue("weights{resp}{n} * ")) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) tp <- tp() out <- glue( "// special treatment of censored data\n", s, "if (cens{resp}{n} == 0) {{\n", s, "{tp}{w}{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == 1) {{\n", s, "{tp}{w}{ll$dist}_lccdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == -1) {{\n", s, "{tp}{w}{ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) if (cens$vars$y2 != "NA") { # interval censoring is required str_add(out) <- glue( s, "}} else if (cens{resp}{n} == 2) {{\n", s, "{tp}{w}log_diff_exp(\n", s, " {ll$dist}_lcdf(rcens{resp}{n}{ll$shift} | {ll$args}),\n", s, " {ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args})\n", s, " ){tr};\n" ) } str_add(out) <- glue(s, "}}\n") out } # weighted likelihood in Stan language stan_log_lik_weights <- function(ll, bterms, data, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) glue( "{tp()}weights{resp}{n} * ({ll$dist}_{lpdf}", "({Y}{resp}{n}{ll$shift} | {ll$args}){tr});\n" ) } # likelihood of a single mixture component stan_log_lik_mix <- function(ll, bterms, data, mix, ptheta, threads, normalize, resp = "", ...) { stopifnot(is.sdist(ll)) theta <- str_if(ptheta, glue("theta{mix}{resp}[n]"), glue("log(theta{mix}{resp})") ) tr <- stan_log_lik_trunc(ll, bterms, data, resp = resp, threads = threads) lpdf <- stan_log_lik_lpdf_name(bterms, normalize, dist = ll$dist) Y <- stan_log_lik_Y_name(bterms) n <- stan_nn(threads) if (is.formula(bterms$adforms$cens)) { # mostly copied over from stan_log_lik_cens cens <- eval_rhs(bterms$adforms$cens) s <- wsp(nsp = 4) out <- glue( "// special treatment of censored data\n", s, "if (cens{resp}{n} == 0) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == 1) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_lccdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n", s, "}} else if (cens{resp}{n} == -1) {{\n", s, " ps[{mix}] = {theta} + ", "{ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) if (cens$vars$y2 != "NA") { # interval censoring is required str_add(out) <- glue( s, "}} else if (cens{resp}{n} == 2) {{\n", s, " ps[{mix}] = {theta} + log_diff_exp(\n", s, " {ll$dist}_lcdf(rcens{resp}{n}{ll$shift} | {ll$args}),\n", s, " {ll$dist}_lcdf({Y}{resp}{n}{ll$shift} | {ll$args})\n", s, " ){tr};\n" ) } str_add(out) <- glue(s, "}}\n") } else { out <- glue( "ps[{mix}] = {theta} + ", "{ll$dist}_{lpdf}({Y}{resp}{n}{ll$shift} | {ll$args}){tr};\n" ) } out } # truncated part of the likelihood # @param short use the T[, ] syntax? stan_log_lik_trunc <- function(ll, bterms, data, threads, resp = "", short = FALSE) { stopifnot(is.sdist(ll)) bounds <- trunc_bounds(bterms, data = data) if (!any(bounds$lb > -Inf | bounds$ub < Inf)) { return("") } n <- stan_nn(threads) m1 <- str_if(use_int(bterms), " - 1") lb <- str_if(any(bounds$lb > -Inf), glue("lb{resp}{n}{m1}")) ub <- str_if(any(bounds$ub < Inf), glue("ub{resp}{n}")) if (short) { # truncation using T[, ] syntax out <- glue(" T[{lb}, {ub}]") } else { # truncation making use of _lcdf functions ms <- paste0(" -\n", wsp(nsp = 6)) if (any(bounds$lb > -Inf) && !any(bounds$ub < Inf)) { out <- glue("{ms}{ll$dist}_lccdf({lb}{ll$shift} | {ll$args})") } else if (!any(bounds$lb > -Inf) && any(bounds$ub < Inf)) { out <- glue("{ms}{ll$dist}_lcdf({ub}{ll$shift} | {ll$args})") } else if (any(bounds$lb > -Inf) && any(bounds$ub < Inf)) { trr <- glue("{ll$dist}_lcdf({ub}{ll$shift} | {ll$args})") trl <- glue("{ll$dist}_lcdf({lb}{ll$shift} | {ll$args})") out <- glue("{ms}log_diff_exp({trr}, {trl})") } } out } stan_log_lik_lpdf_name <- function(bterms, normalize, dist = NULL) { if (!is.null(dist) && !normalize) { # some Stan lpdfs or lpmfs only exist as normalized versions always_normalized <- always_normalized(bterms) if (length(always_normalized)) { always_normalized <- paste0(escape_all(always_normalized), "$") normalize <- any(ulapply(always_normalized, grepl, x = dist)) } } if (normalize) { out <- ifelse(use_int(bterms$family), "lpmf", "lpdf") } else { out <- ifelse(use_int(bterms$family), "lupmf", "lupdf") } out } stan_log_lik_Y_name <- function(bterms) { ifelse(is.formula(bterms$adforms$mi), "Yl", "Y") } # prepare names of distributional parameters # @param reqn will the likelihood be wrapped in a loop over n? # @param dpars optional names of distributional parameters to be prepared # if not specified will prepare all distributional parameters stan_log_lik_dpars <- function(bterms, reqn, resp = "", mix = "", dpars = NULL) { if (is.null(dpars)) { dpars <- paste0(valid_dpars(bterms), mix) } is_pred <- dpars %in% c("mu", names(bterms$dpars)) out <- paste0(dpars, resp, ifelse(reqn & is_pred, "[n]", "")) named_list(dpars, out) } # adjust lpdf name if a more efficient version is available # for a specific link. For instance 'poisson_log' stan_log_lik_simple_lpdf <- function(lpdf, link, bterms, sep = "_") { stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) if (bterms$family$link == link && !cens_or_trunc) { lpdf <- paste0(lpdf, sep, link) } lpdf } # prepare _logit suffix for distributional parameters # used in zero-inflated and hurdle models stan_log_lik_dpar_usc_logit <- function(dpar, bterms) { stopifnot(dpar %in% c("zi", "hu")) stopifnot(is.brmsterms(bterms)) cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) usc_logit <- isTRUE(bterms$dpars[[dpar]]$family$link == "logit") str_if(usc_logit && !cens_or_trunc, "_logit") } # add 'se' to 'sigma' within the Stan likelihood stan_log_lik_add_se <- function(sigma, bterms, reqn, resp = "", threads = NULL) { if (!is.formula(bterms$adforms$se)) { return(sigma) } nse <- str_if(reqn, stan_nn(threads), stan_slice(threads)) if (no_sigma(bterms)) { sigma <- glue("se{resp}{nse}") } else { sigma <- glue("sqrt(square({sigma}) + se2{resp}{nse})") } sigma } # multiply 'dpar' by the 'rate' denominator within the Stan likelihood # @param log add the rate denominator on the log scale if sensible? stan_log_lik_multiply_rate_denom <- function(dpar, bterms, reqn, resp = "", log = FALSE, transform = NULL) { dpar_transform <- dpar if (!is.null(transform)) { dpar_transform <- glue("{transform}({dpar})") } if (!is.formula(bterms$adforms$rate)) { return(dpar_transform) } ndenom <- str_if(reqn, "[n]") denom <- glue("denom{resp}{ndenom}") cens_or_trunc <- stan_log_lik_adj(bterms, c("cens", "trunc")) if (log && bterms$family$link == "log" && !cens_or_trunc) { denom <- glue("log_{denom}") operator <- "+" } else { is_pred <- dpar %in% c("mu", names(bterms$dpars)) operator <- str_if(reqn || !is_pred, "*", ".*") } glue("{dpar_transform} {operator} {denom}") } # check if the log-liklihood needs to be adjused # @param x named list of formulas or brmsterms object # @param adds vector of addition argument names # @return a single logical value stan_log_lik_adj <- function(x, adds = c("weights", "cens", "trunc")) { stopifnot(all(adds %in% c("weights", "cens", "trunc"))) if (is.brmsterms(x)) x <- x$adforms any(ulapply(x[adds], is.formula)) } # one function per family stan_log_lik_gaussian <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$sigma <- paste0("sigma", resp) out <- sdist("normal_id_glm", p$x, p$alpha, p$beta, p$sigma) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) out <- sdist("normal", p$mu, p$sigma) } out } stan_log_lik_gaussian_mv <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || bterms$sigma_pred p <- list(Mu = paste0("Mu", if (reqn) "[n]")) p$LSigma <- paste0("LSigma", if (bterms$sigma_pred) "[n]") sdist("multi_normal_cholesky", p$Mu, p$LSigma) } stan_log_lik_gaussian_time <- function(bterms, resp = "", mix = "", ...) { if (stan_log_lik_adj(bterms)) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) v <- c("chol_cor", "se2", "nobs_tg", "begin_tg", "end_tg") p[v] <- as.list(paste0(v, resp)) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("normal_time_{sfx}"), p$mu, p$sigma, p$chol_cor, p$se2, p$nobs_tg, p$begin_tg, p$end_tg ) } stan_log_lik_gaussian_fcor <- function(bterms, resp = "", mix = "", ...) { has_se <- is.formula(bterms$adforms$se) if (stan_log_lik_adj(bterms) || has_se) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$Lfcor <- paste0("Lfcor", resp) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("normal_fcor_{sfx}"), p$mu, p$sigma, p$Lfcor) } stan_log_lik_gaussian_lagsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("lagsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("normal_lagsar", p$mu, p$sigma, p$lagsar, p$Msar, p$eigenMsar) } stan_log_lik_gaussian_errorsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("errorsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("normal_errorsar", p$mu, p$sigma, p$errorsar, p$Msar, p$eigenMsar) } stan_log_lik_student <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) sdist("student_t", p$nu, p$mu, p$sigma) } stan_log_lik_student_mv <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || bterms$sigma_pred p <- stan_log_lik_dpars(bterms, reqn, resp, mix, dpars = "nu") p$Mu <- paste0("Mu", if (reqn) "[n]") p$Sigma <- paste0("Sigma", if (bterms$sigma_pred) "[n]") sdist("multi_student_t", p$nu, p$Mu, p$Sigma) } stan_log_lik_student_time <- function(bterms, resp = "", mix = "", ...) { if (stan_log_lik_adj(bterms)) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) v <- c("chol_cor", "se2", "nobs_tg", "begin_tg", "end_tg") p[v] <- as.list(paste0(v, resp)) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("student_t_time_{sfx}"), p$nu, p$mu, p$sigma, p$chol_cor, p$se2, p$nobs_tg, p$begin_tg, p$end_tg ) } stan_log_lik_student_fcor <- function(bterms, resp = "", mix = "", ...) { has_se <- is.formula(bterms$adforms$se) if (stan_log_lik_adj(bterms) || has_se) { stop2("Invalid addition arguments for this model.") } p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$Lfcor <- paste0("Lfcor", resp) sfx <- str_if("sigma" %in% names(bterms$dpars), "het", "hom") sdist(glue("student_t_fcor_{sfx}"), p$nu, p$mu, p$sigma, p$Lfcor) } stan_log_lik_student_lagsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("lagsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("student_t_lagsar", p$nu, p$mu, p$sigma, p$lagsar, p$Msar, p$eigenMsar) } stan_log_lik_student_errorsar <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, FALSE, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, FALSE, resp, threads) v <- c("errorsar", "Msar", "eigenMsar") p[v] <- as.list(paste0(v, resp)) sdist("student_t_errorsar", p$nu, p$mu, p$sigma, p$errorsar, p$Msar, p$eigenMsar) } stan_log_lik_lognormal <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("lognormal", p$mu, p$sigma) } stan_log_lik_shifted_lognormal <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("lognormal", p$mu, p$sigma, shift = paste0(" - ", p$ndt)) } stan_log_lik_asym_laplace <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("asym_laplace", p$mu, p$sigma, p$quantile) } stan_log_lik_skew_normal <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$sigma <- stan_log_lik_add_se(p$sigma, bterms, reqn, resp, threads) # required because of CP parameterization of mu and sigma nomega <- any(grepl(stan_nn_regex(), c(p$sigma, p$alpha))) nomega <- str_if(reqn && nomega, "[n]") p$omega <- paste0("omega", mix, resp, nomega) sdist("skew_normal", p$mu, p$omega, p$alpha) } stan_log_lik_poisson <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("poisson_log_glm", p$x, p$alpha, p$beta) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- stan_log_lik_simple_lpdf("poisson", "log", bterms) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) out <- sdist(lpdf, p$mu) } out } stan_log_lik_negbinomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$shape <- paste0("shape", resp) out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom(p$shape, bterms, reqn, resp) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } out } stan_log_lik_negbinomial2 <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$sigma <- paste0("sigma", resp) p$shape <- paste0("inv(", p$sigma, ")") out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom( p$sigma, bterms, reqn, resp, transform = "inv" ) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } out } stan_log_lik_geometric <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) p$shape <- "1" out <- sdist("neg_binomial_2_log_glm", p$x, p$alpha, p$beta, p$shape) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) p$shape <- "1" p$mu <- stan_log_lik_multiply_rate_denom(p$mu, bterms, reqn, resp, log = TRUE) p$shape <- stan_log_lik_multiply_rate_denom(p$shape, bterms, reqn, resp) lpdf <- stan_log_lik_simple_lpdf("neg_binomial_2", "log", bterms) out <- sdist(lpdf, p$mu, p$shape) } } stan_log_lik_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) slice <- str_if(reqn, stan_nn(threads), stan_slice(threads)) p$trials <- paste0("trials", resp, slice) lpdf <- stan_log_lik_simple_lpdf("binomial", "logit", bterms) sdist(lpdf, p$trials, p$mu) } stan_log_lik_bernoulli <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("bernoulli_logit_glm", p$x, p$alpha, p$beta) } else { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- stan_log_lik_simple_lpdf("bernoulli", "logit", bterms) out <- sdist(lpdf, p$mu) } out } stan_log_lik_discrete_weibull <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("discrete_weibull", p$mu, p$shape) } stan_log_lik_com_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("com_poisson", "log", bterms) sdist(lpdf, p$mu, p$shape) } stan_log_lik_gamma <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("gamma", p$shape, p$mu) } stan_log_lik_exponential <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("exponential", p$mu) } stan_log_lik_weibull <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("weibull", p$shape, p$mu) } stan_log_lik_frechet <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("frechet", p$nu, p$mu) } stan_log_lik_gen_extreme_value <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("gen_extreme_value", p$mu, p$sigma, p$xi) } stan_log_lik_exgaussian <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist( "exp_mod_normal", paste0(p$mu, " - ", p$beta), p$sigma, paste0("inv(", p$beta, ")") ) } stan_log_lik_inverse.gaussian <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || glue("shape{mix}") %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- paste0("inv_gaussian", if (!reqn) "_vector") n <- str_if(reqn, "[n]") sdist(lpdf, p$mu, p$shape) } stan_log_lik_wiener <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$dec <- paste0("dec", resp, n) sdist("wiener_diffusion", p$dec, p$bs, p$ndt, p$bias, p$mu) } stan_log_lik_beta <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || paste0("phi", mix) %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) sdist("beta", paste0(p$mu, " * ", p$phi), paste0("(1 - ", p$mu, ") * ", p$phi) ) } stan_log_lik_von_mises <- function(bterms, resp = "", mix = "", ...) { reqn <- stan_log_lik_adj(bterms) || nzchar(mix) || "kappa" %in% names(bterms$dpars) p <- stan_log_lik_dpars(bterms, reqn, resp, mix) lpdf <- paste0("von_mises_", str_if(reqn, "real", "vector")) sdist(lpdf, p$mu, p$kappa) } stan_log_lik_cox <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$bhaz <- paste0("bhaz", resp, n) p$cbhaz <- paste0("cbhaz", resp, n) lpdf <- "cox" if (bterms$family$link == "log") { str_add(lpdf) <- "_log" } sdist(lpdf, p$mu, p$bhaz, p$cbhaz) } stan_log_lik_cumulative <- function(bterms, resp = "", mix = "", threads = NULL, ...) { if (use_glm_primitive(bterms, allow_special_terms = FALSE)) { p <- args_glm_primitive(bterms$dpars$mu, resp = resp, threads = threads) out <- sdist("ordered_logistic_glm", p$x, p$beta, p$alpha) return(out) } stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_sratio <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_cratio <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_acat <- function(bterms, resp = "", mix = "", threads = NULL, ...) { stan_log_lik_ordinal(bterms, resp, mix, threads, ...) } stan_log_lik_categorical <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed p <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu") sdist("categorical_logit", p$mu) } stan_log_lik_multinomial <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed p <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu") sdist("multinomial_logit2", p$mu) } stan_log_lik_dirichlet <- function(bterms, resp = "", mix = "", ...) { stopifnot(bterms$family$link == "logit") stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed mu <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu")$mu reqn <- glue("phi{mix}") %in% names(bterms$dpars) phi <- stan_log_lik_dpars(bterms, reqn, resp, mix, dpars = "phi")$phi sdist("dirichlet_logit", mu, phi) } stan_log_lik_dirichlet2 <- function(bterms, resp = "", mix = "", ...) { stopifnot(!isTRUE(nzchar(mix))) # mixture models are not allowed mu <- stan_log_lik_dpars(bterms, TRUE, resp, mix, dpars = "mu")$mu sdist("dirichlet", mu) } stan_log_lik_ordinal <- function(bterms, resp = "", mix = "", threads = NULL, ...) { prefix <- paste0(str_if(nzchar(mix), paste0("_mu", mix)), resp) p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) if (use_ordered_logistic(bterms)) { # TODO: support 'ordered_probit' as well lpdf <- "ordered_logistic" p[grepl("^disc", names(p))] <- NULL } else { lpdf <- paste0(bterms$family$family, "_", bterms$family$link) } if (has_thres_groups(bterms)) { str_add(lpdf) <- "_merged" n <- stan_nn(threads) p$Jthres <- paste0("Jthres", resp, n) p$thres <- "merged_Intercept" } else { p$thres <- "Intercept" } str_add(p$thres) <- prefix if (has_sum_to_zero_thres(bterms)) { str_add(p$thres) <- "_stz" } if (has_cs(bterms)) { if (has_thres_groups(bterms)) { stop2("Cannot use category specific effects ", "in models with multiple thresholds.") } str_add(p$thres) <- paste0(" - transpose(mucs", prefix, "[n])") } sdist(lpdf, p$mu, p$disc, p$thres, p$Jthres) } stan_log_lik_hurdle_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("hurdle_poisson", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("hu", bterms)) sdist(lpdf, p$mu, p$hu) } stan_log_lik_hurdle_negbinomial <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("hurdle_neg_binomial", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("hu", bterms)) sdist(lpdf, p$mu, p$shape, p$hu) } stan_log_lik_hurdle_gamma <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("hu", bterms) lpdf <- paste0("hurdle_gamma", usc_logit) sdist(lpdf, p$shape, p$mu, p$hu) } stan_log_lik_hurdle_lognormal <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("hu", bterms) lpdf <- paste0("hurdle_lognormal", usc_logit) sdist(lpdf, p$mu, p$sigma, p$hu) } stan_log_lik_zero_inflated_poisson <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("zero_inflated_poisson", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$mu, p$zi) } stan_log_lik_zero_inflated_negbinomial <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) lpdf <- stan_log_lik_simple_lpdf("zero_inflated_neg_binomial", "log", bterms) lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$mu, p$shape, p$zi) } stan_log_lik_zero_inflated_binomial <- function(bterms, resp = "", mix = "", threads = NULL, ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) n <- stan_nn(threads) p$trials <- paste0("trials", resp, n) lpdf <- "zero_inflated_binomial" lpdf <- stan_log_lik_simple_lpdf(lpdf, "logit", bterms, sep = "_b") lpdf <- paste0(lpdf, stan_log_lik_dpar_usc_logit("zi", bterms)) sdist(lpdf, p$trials, p$mu, p$zi) } stan_log_lik_zero_inflated_beta <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("zi", bterms) lpdf <- paste0("zero_inflated_beta", usc_logit) sdist(lpdf, p$mu, p$phi, p$zi) } stan_log_lik_zero_one_inflated_beta <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) sdist("zero_one_inflated_beta", p$mu, p$phi, p$zoi, p$coi) } stan_log_lik_zero_inflated_asym_laplace <- function(bterms, resp = "", mix = "", ...) { p <- stan_log_lik_dpars(bterms, TRUE, resp, mix) usc_logit <- stan_log_lik_dpar_usc_logit("zi", bterms) lpdf <- paste0("zero_inflated_asym_laplace", usc_logit) sdist(lpdf, p$mu, p$sigma, p$quantile, p$zi) } stan_log_lik_custom <- function(bterms, resp = "", mix = "", threads = NULL, ...) { family <- bterms$family no_loop <- isFALSE(family$loop) if (no_loop && (stan_log_lik_adj(bterms) || nzchar(mix))) { stop2("This model requires evaluating the custom ", "likelihood as a loop over observations.") } reqn <- !no_loop p <- stan_log_lik_dpars(bterms, reqn, resp, mix) dpars <- paste0(family$dpars, mix) if (is_ordinal(family)) { prefix <- paste0(resp, if (nzchar(mix)) paste0("_mu", mix)) p$thres <- paste0("Intercept", prefix) } # insert the response name into the 'vars' strings # addition terms contain the response in their variable name n <- stan_nn(threads) var_names <- sub("\\[.+$", "", family$vars) var_indices <- get_matches("\\[.+$", family$vars, first = TRUE) has_n_index <- var_indices %in% "[n]" if (no_loop && any(has_n_index)) { stop2("Invalid use of index '[n]' in an unlooped custom likelihood.") } var_indices <- ifelse(has_n_index, n, var_indices) is_var_adterms <- var_names %in% c("se", "trials", "dec") | grepl("^((vint)|(vreal))[[:digit:]]+$", var_names) var_resps <- ifelse(is_var_adterms, resp, "") vars <- paste0(var_names, var_resps, var_indices) sdist(family$name, p[dpars], p$thres, vars) } # use Stan GLM primitive functions? # @param bterms a brmsterms object # @return TRUE or FALSE use_glm_primitive <- function(bterms, allow_special_terms = TRUE) { stopifnot(is.brmsterms(bterms)) # the model can only have a single predicted parameter # and no additional residual or autocorrelation structure mu <- bterms$dpars[["mu"]] if (!is.btl(mu) || length(bterms$dpars) > 1L || isTRUE(bterms$rescor) || length(bterms$adforms) || is.formula(mu$ac)) { return(FALSE) } # supported families and link functions # TODO: support categorical_logit primitive glm_links <- list( gaussian = "identity", bernoulli = "logit", poisson = "log", negbinomial = "log", negbinomial2 = "log" # rstan does not yet support 'ordered_logistic_glm' # cumulative = "logit" ) if (!isTRUE(glm_links[[mu$family$family]] == mu$family$link)) { return(FALSE) } if (!allow_special_terms && has_special_terms(mu)) { # some primitives do not support special terms in the way # required by brms' Stan code generation return(FALSE) } length(all_terms(mu$fe)) > 0 && !is_sparse(mu$fe) } # standard arguments for primitive Stan GLM functions # @param bterms a btl object # @param resp optional name of the response variable # @return a named list of Stan code snippets args_glm_primitive <- function(bterms, resp = "", threads = NULL) { stopifnot(is.btl(bterms)) decomp <- get_decomp(bterms$fe) center_X <- stan_center_X(bterms) slice <- stan_slice(threads) sfx_X <- sfx_b <- "" if (decomp == "QR") { sfx_X <- sfx_b <- "Q" } else if (center_X) { sfx_X <- "c" } x <- glue("X{sfx_X}{resp}{slice}") beta <- glue("b{sfx_b}{resp}") if (has_special_terms(bterms)) { # the intercept vector will contain all the remaining terms alpha <- glue("mu{resp}") } else { if (center_X) { alpha <- glue("Intercept{resp}") } else { alpha <- "0" } } nlist(x, alpha, beta) } # use the ordered_logistic built-in functions use_ordered_logistic <- function(bterms) { stopifnot(is.brmsterms(bterms)) isTRUE(bterms$family$family == "cumulative") && isTRUE(bterms$family$link == "logit") && isTRUE(bterms$fdpars$disc$value == 1) && !has_cs(bterms) } # prepare distribution and arguments for use in Stan sdist <- function(dist, ..., shift = "") { args <- sargs(...) structure(nlist(dist, args, shift), class = "sdist") } # prepare arguments for Stan likelihood statements sargs <- function(...) { dots <- as.character(c(...)) dots <- dots[nzchar(dots)] paste0(dots, collapse = ", ") } is.sdist <- function(x) { inherits(x, "sdist") } tp <- function(wsp = 2) { wsp <- collapse(rep(" ", wsp)) paste0(wsp, "target += ") } brms/R/formula-ad.R0000644000175000017500000003256014111751666013740 0ustar nileshnilesh#' Additional Response Information #' #' Provide additional information on the response variable #' in \pkg{brms} models, such as censoring, truncation, or #' known measurement error. #' #' @name addition-terms #' #' @param x A vector; usually a variable defined in the data. Allowed values #' depend on the function: \code{resp_se} and \code{resp_weights} require #' positive numeric values. \code{resp_trials}, \code{resp_thres}, and #' \code{resp_cat} require positive integers. \code{resp_dec} requires #' \code{0} and \code{1}, or alternatively \code{'lower'} and \code{'upper'}. #' \code{resp_subset} requires \code{0} and \code{1}, or alternatively #' \code{FALSE} and \code{TRUE}. \code{resp_cens} requires \code{'left'}, #' \code{'none'}, \code{'right'}, and \code{'interval'} (or equivalently #' \code{-1}, \code{0}, \code{1}, and \code{2}) to indicate left, no, right, #' or interval censoring. \code{resp_index} does not make any requirements #' other than the value being unique for each observation. #' @param sigma Logical; Indicates whether the residual standard deviation #' parameter \code{sigma} should be included in addition to the known #' measurement error. Defaults to \code{FALSE} for backwards compatibility, #' but setting it to \code{TRUE} is usually the better choice. #' @param scale Logical; Indicates whether weights should be scaled #' so that the average weight equals one. Defaults to \code{FALSE}. #' @param y2 A vector specifying the upper bounds in interval censoring. #' Will be ignored for non-interval censored observations. However, it #' should NOT be \code{NA} even for non-interval censored observations to #' avoid accidental exclusion of these observations. #' @param lb A numeric vector or single numeric value specifying #' the lower truncation bound. #' @param ub A numeric vector or single numeric value specifying #' the upper truncation bound. #' @param sdy Optional known measurement error of the response #' treated as standard deviation. If specified, handles #' measurement error and (completely) missing values #' at the same time using the plausible-values-technique. #' @param denom A vector of positive numeric values specifying #' the denominator values from which the response rates are computed. #' @param gr A vector of grouping indicators. #' @param ... For \code{resp_vreal}, vectors of real values. #' For \code{resp_vint}, vectors of integer values. In Stan, #' these variables will be named \code{vreal1}, \code{vreal2}, ..., #' and \code{vint1}, \code{vint2}, ..., respectively. #' #' @return A list of additional response information to be processed further #' by \pkg{brms}. #' #' @details #' These functions are almost solely useful when #' called in formulas passed to the \pkg{brms} package. #' Within formulas, the \code{resp_} prefix may be omitted. #' More information is given in the 'Details' section #' of \code{\link{brmsformula}}. #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}} #' #' @examples #' \dontrun{ #' ## Random effects meta-analysis #' nstudies <- 20 #' true_effects <- rnorm(nstudies, 0.5, 0.2) #' sei <- runif(nstudies, 0.05, 0.3) #' outcomes <- rnorm(nstudies, true_effects, sei) #' data1 <- data.frame(outcomes, sei) #' fit1 <- brm(outcomes | se(sei, sigma = TRUE) ~ 1, #' data = data1) #' summary(fit1) #' #' ## Probit regression using the binomial family #' n <- sample(1:10, 100, TRUE) # number of trials #' success <- rbinom(100, size = n, prob = 0.4) #' x <- rnorm(100) #' data2 <- data.frame(n, success, x) #' fit2 <- brm(success | trials(n) ~ x, data = data2, #' family = binomial("probit")) #' summary(fit2) #' #' ## Survival regression modeling the time between the first #' ## and second recurrence of an infection in kidney patients. #' fit3 <- brm(time | cens(censored) ~ age * sex + disease + (1|patient), #' data = kidney, family = lognormal()) #' summary(fit3) #' #' ## Poisson model with truncated counts #' fit4 <- brm(count | trunc(ub = 104) ~ zBase * Trt, #' data = epilepsy, family = poisson()) #' summary(fit4) #' } #' NULL #' @rdname addition-terms #' @export resp_se <- function(x, sigma = FALSE) { se <- deparse(substitute(x)) sigma <- as_one_logical(sigma) class_resp_special( "se", call = match.call(), vars = nlist(se), flags = nlist(sigma) ) } #' @rdname addition-terms #' @export resp_weights <- function(x, scale = FALSE) { weights <- deparse(substitute(x)) scale <- as_one_logical(scale) class_resp_special( "weights", call = match.call(), vars = nlist(weights), flags = nlist(scale) ) } #' @rdname addition-terms #' @export resp_trials <- function(x) { trials <- deparse(substitute(x)) class_resp_special("trials", call = match.call(), vars = nlist(trials)) } #' @rdname addition-terms #' @export resp_thres <- function(x, gr = NA) { thres <- deparse(substitute(x)) gr <- deparse(substitute(gr)) class_resp_special("thres", call = match.call(), vars = nlist(thres, gr)) } #' @rdname addition-terms #' @export resp_cat <- function(x) { # deprecated as of brms 2.10.5 # number of thresholds = number of response categories - 1 thres <- deparse(substitute(x)) str_add(thres) <- " - 1" class_resp_special( "thres", call = match.call(), vars = nlist(thres, gr = "NA") ) } #' @rdname addition-terms #' @export resp_dec <- function(x) { dec <- deparse(substitute(x)) class_resp_special("dec", call = match.call(), vars = nlist(dec)) } #' @rdname addition-terms #' @export resp_cens <- function(x, y2 = NA) { cens <- deparse(substitute(x)) y2 <- deparse(substitute(y2)) class_resp_special("cens", call = match.call(), vars = nlist(cens, y2)) } #' @rdname addition-terms #' @export resp_trunc <- function(lb = -Inf, ub = Inf) { lb <- deparse(substitute(lb)) ub <- deparse(substitute(ub)) class_resp_special("trunc", call = match.call(), vars = nlist(lb, ub)) } #' @rdname addition-terms #' @export resp_mi <- function(sdy = NA) { sdy <- deparse(substitute(sdy)) class_resp_special("mi", call = match.call(), vars = nlist(sdy)) } #' @rdname addition-terms #' @export resp_index <- function(x) { index <- deparse(substitute(x)) class_resp_special("index", call = match.call(), vars = nlist(index)) } #' @rdname addition-terms #' @export resp_rate <- function(denom) { denom <- deparse(substitute(denom)) class_resp_special("rate", call = match.call(), vars = nlist(denom)) } #' @rdname addition-terms #' @export resp_subset <- function(x) { subset <- deparse(substitute(x)) class_resp_special("subset", call = match.call(), vars = nlist(subset)) } #' @rdname addition-terms #' @export resp_vreal <- function(...) { vars <- as.list(substitute(list(...)))[-1] class_resp_special("vreal", call = match.call(), vars = vars) } #' @rdname addition-terms #' @export resp_vint <- function(...) { vars <- as.list(substitute(list(...)))[-1] class_resp_special("vint", call = match.call(), vars = vars) } # class underlying response addition terms # @param type type of the addition term # @param call the call to the original addition term function # @param vars named list of unevaluated variables # @param flags named list of (evaluated) logical indicators class_resp_special <- function(type, call, vars = list(), flags = list()) { type <- as_one_character(type) stopifnot(is.call(call), is.list(vars), is.list(flags)) label <- deparse(call) out <- nlist(type, call, label, vars, flags) class(out) <- c("resp_special") out } # computes data for addition arguments eval_rhs <- function(formula, data = NULL) { formula <- as.formula(formula) eval(rhs(formula)[[2]], data, environment(formula)) } # get expression for a variable of an addition term # @param x list with potential $adforms elements # @param ad name of the addition term # @param target name of the element to extract # @type type of the element to extract # @return a character string or NULL get_ad_expr <- function(x, ad, name, type = "vars") { ad <- as_one_character(ad) name <- as_one_character(name) type <- as_one_character(type) if (is.null(x$adforms[[ad]])) { return(NULL) } out <- eval_rhs(x$adforms[[ad]])[[type]][[name]] if (type == "vars" && is_equal(out, "NA")) { out <- NULL } out } # get values of a variable used in an addition term # @return a vector of values or NULL get_ad_values <- function(x, ad, name, data) { expr <- get_ad_expr(x, ad, name, type = "vars") eval2(expr, data) } # get a flag used in an addition term # @return TRUE or FALSE get_ad_flag <- function(x, ad, name) { expr <- get_ad_expr(x, ad, name, type = "flags") as_one_logical(eval2(expr)) } # get variable names used in addition terms get_ad_vars <- function(x, ...) { UseMethod("get_ad_vars") } #' @export get_ad_vars.brmsterms <- function(x, ad, ...) { ad <- as_one_character(ad) all_vars(x$adforms[[ad]]) } #' @export get_ad_vars.mvbrmsterms <- function(x, ad, ...) { unique(ulapply(x$terms, get_ad_vars, ad = ad, ...)) } # coerce censored values into the right format # @param x vector of censoring indicators # @return transformed vector of censoring indicators prepare_cens <- function(x) { .prepare_cens <- function(x) { stopifnot(length(x) == 1L) regx <- paste0("^", x) if (grepl(regx, "left")) { x <- -1 } else if (grepl(regx, "none") || isFALSE(x)) { x <- 0 } else if (grepl(regx, "right") || isTRUE(x)) { x <- 1 } else if (grepl(regx, "interval")) { x <- 2 } return(x) } x <- unname(x) if (is.factor(x)) { x <- as.character(x) } ulapply(x, .prepare_cens) } # extract information on censoring of the response variable # @return vector of censoring indicators or NULL in case of no censoring get_cens <- function(bterms, data, resp = NULL) { if (!is.null(resp)) { bterms <- bterms$terms[[resp]] } out <- NULL if (is.formula(bterms$adforms$cens)) { out <- get_ad_values(bterms, "cens", "cens", data) out <- prepare_cens(out) } out } # extract truncation boundaries # @param bterms a brmsterms object # @param data data.frame containing the truncation variables # @param incl_family include the family in the derivation of the bounds? # @param stan return bounds in form of Stan syntax? # @return a list with elements 'lb' and 'ub' or corresponding Stan code trunc_bounds <- function(bterms, data = NULL, incl_family = FALSE, stan = FALSE, ...) { stopifnot(is.brmsterms(bterms)) if (is.formula(bterms$adforms$trunc)) { trunc <- eval_rhs(bterms$adforms$trunc) } else { trunc <- resp_trunc() } out <- list( lb = eval2(trunc$vars$lb, data), ub = eval2(trunc$vars$ub, data) ) if (incl_family) { family_bounds <- family_bounds(bterms) out$lb <- max(out$lb, family_bounds$lb) out$ub <- min(out$ub, family_bounds$ub) } if (stan) { if (any(out$lb > -Inf | out$ub < Inf)) { tmp <- c( if (out$lb > -Inf) paste0("lower=", out$lb), if (out$ub < Inf) paste0("upper=", out$ub) ) out <- paste0("<", paste0(tmp, collapse = ","), ">") } else { out <- "" } } out } # check if addition argument 'subset' ist used in the model has_subset <- function(bterms) { .has_subset <- function(x) { is.formula(x$adforms$subset) } if (is.brmsterms(bterms)) { out <- .has_subset(bterms) } else if (is.mvbrmsterms(bterms)) { out <- any(ulapply(bterms$terms, .has_subset)) } else { out <- FALSE } out } # construct a list of indices for cross-formula referencing tidy_index <- function(x, data) { out <- .tidy_index(x, data) if (is.brmsterms(x)) { # ensure consistent format for both uni- and multivariate models out <- list(out) names(out)[1] <- terms_resp(x$respform) } out } # internal version of tidy_index .tidy_index <- function(x, ...) { UseMethod(".tidy_index") } #' @export .tidy_index.brmsterms <- function(x, data, ...) { out <- get_ad_values(x, "index", "index", data) if (is.null(out)) { return(NULL) } if (has_subset(x)) { subset <- as.logical(get_ad_values(x, "subset", "subset", data)) out <- out[subset] attr(out, "subset") <- TRUE } if (anyNA(out)) { stop2("NAs are not allowed in 'index' variables.") } if (anyDuplicated(out)) { stop2("Index of response '", names(out), "' contains duplicated values.") } out } #' @export .tidy_index.mvbrmsterms <- function(x, data, ...) { lapply(x$terms, .tidy_index, data = data, ...) } # check if cross-formula referencing is possible in subsetted models check_cross_formula_indexing <- function(bterms) { sp_terms <- ulapply(get_effect(bterms, "sp"), all_terms) me_terms <- get_matches_expr(regex_sp("me"), sp_terms) if (length(me_terms)) { stop2("Cannot use me() terms in subsetted formulas.") } mi_terms <- get_matches_expr(regex_sp("mi"), sp_terms) idx_vars <- lapply(mi_terms, function(x) eval2(x)$idx) if (any(idx_vars == "NA")) { stop2("mi() terms in subsetted formulas require ", "the 'idx' argument to be specified.") } invisible(TRUE) } brms/R/formula-gp.R0000644000175000017500000002717414111751666013767 0ustar nileshnilesh# R helper functions for Gaussian Processes #' Set up Gaussian process terms in \pkg{brms} #' #' Set up a Gaussian process (GP) term in \pkg{brms}. The function does not #' evaluate its arguments -- it exists purely to help set up a model with #' GP terms. #' #' @param ... One or more predictors for the GP. #' @param by A numeric or factor variable of the same length as #' each predictor. In the numeric vector case, the elements multiply #' the values returned by the GP. In the factor variable #' case, a separate GP is fitted for each factor level. #' @param k Optional number of basis functions for computing approximate #' GPs. If \code{NA} (the default), exact GPs are computed. #' @param cov Name of the covariance kernel. By default, #' the exponentiated-quadratic kernel \code{"exp_quad"} is used. #' @param iso A flag to indicate whether an isotropic (\code{TRUE}; the #' default) of a non-isotropic GP should be used. #' In the former case, the same amount of smoothing is applied to all #' predictors. In the latter case, predictors may have different smoothing. #' Ignored if only a single predictors is supplied. #' @param gr Logical; Indicates if auto-grouping should be used (defaults #' to \code{TRUE}). If enabled, observations sharing the same #' predictor values will be represented by the same latent variable #' in the GP. This will improve sampling efficiency #' drastically if the number of unique predictor combinations is small #' relative to the number of observations. #' @param cmc Logical; Only relevant if \code{by} is a factor. If \code{TRUE} #' (the default), cell-mean coding is used for the \code{by}-factor, that is #' one GP per level is estimated. If \code{FALSE}, contrast GPs are estimated #' according to the contrasts set for the \code{by}-factor. #' @param scale Logical; If \code{TRUE} (the default), predictors are #' scaled so that the maximum Euclidean distance between two points #' is 1. This often improves sampling speed and convergence. #' Scaling also affects the estimated length-scale parameters #' in that they resemble those of scaled predictors (not of the original #' predictors) if \code{scale} is \code{TRUE}. #' @param c Numeric value only used in approximate GPs. Defines the #' multiplicative constant of the predictors' range over which #' predictions should be computed. A good default could be \code{c = 5/4} #' but we are still working on providing better recommendations. #' #' @details A GP is a stochastic process, which #' describes the relation between one or more predictors #' \eqn{x = (x_1, ..., x_d)} and a response \eqn{f(x)}, where #' \eqn{d} is the number of predictors. A GP is the #' generalization of the multivariate normal distribution #' to an infinite number of dimensions. Thus, it can be #' interpreted as a prior over functions. Any finite sample #' realized from this stochastic process is jointly multivariate #' normal, with a covariance matrix defined by the covariance #' kernel \eqn{k_p(x)}, where \eqn{p} is the vector of parameters #' of the GP: #' \deqn{f(x) ~ MVN(0, k_p(x))} #' The smoothness and general behavior of the function \eqn{f} #' depends only on the choice of covariance kernel. #' For a more detailed introduction to Gaussian processes, #' see \url{https://en.wikipedia.org/wiki/Gaussian_process}. #' #' Below, we describe the currently supported covariance kernels: #' \itemize{ #' \item{"exp_quad": }{The exponentiated-quadratic kernel is defined as #' \eqn{k(x_i, x_j) = sdgp^2 exp(- || x_i - x_j ||^2 / (2 lscale^2))}, #' where \eqn{|| . ||} is the Euclidean norm, \eqn{sdgp} is a #' standard deviation parameter, and \eqn{lscale} is characteristic #' length-scale parameter. The latter practically measures how close two #' points \eqn{x_i} and \eqn{x_j} have to be to influence each other #' substantially.} #' } #' #' In the current implementation, \code{"exp_quad"} is the only supported #' covariance kernel. More options will follow in the future. #' #' @return An object of class \code{'gp_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @examples #' \dontrun{ #' # simulate data using the mgcv package #' dat <- mgcv::gamSim(1, n = 30, scale = 2) #' #' # fit a simple GP model #' fit1 <- brm(y ~ gp(x2), dat, chains = 2) #' summary(fit1) #' me1 <- conditional_effects(fit1, ndraws = 200, spaghetti = TRUE) #' plot(me1, ask = FALSE, points = TRUE) #' #' # fit a more complicated GP model #' fit2 <- brm(y ~ gp(x0) + x1 + gp(x2) + x3, dat, chains = 2) #' summary(fit2) #' me2 <- conditional_effects(fit2, ndraws = 200, spaghetti = TRUE) #' plot(me2, ask = FALSE, points = TRUE) #' #' # fit a multivariate GP model #' fit3 <- brm(y ~ gp(x1, x2), dat, chains = 2) #' summary(fit3) #' me3 <- conditional_effects(fit3, ndraws = 200, spaghetti = TRUE) #' plot(me3, ask = FALSE, points = TRUE) #' #' # compare model fit #' LOO(fit1, fit2, fit3) #' #' # simulate data with a factor covariate #' dat2 <- mgcv::gamSim(4, n = 90, scale = 2) #' #' # fit separate gaussian processes for different levels of 'fac' #' fit4 <- brm(y ~ gp(x2, by = fac), dat2, chains = 2) #' summary(fit4) #' plot(conditional_effects(fit4), points = TRUE) #' } #' #' @seealso \code{\link{brmsformula}} #' @export gp <- function(..., by = NA, k = NA, cov = "exp_quad", iso = TRUE, gr = TRUE, cmc = TRUE, scale = TRUE, c = NULL) { cov <- match.arg(cov, choices = c("exp_quad")) call <- match.call() label <- deparse(call) vars <- as.list(substitute(list(...)))[-1] by <- deparse(substitute(by)) cmc <- as_one_logical(cmc) if (is.null(call[["gr"]]) && require_old_default("2.12.8")) { # the default of 'gr' has changed in version 2.12.8 gr <- FALSE } else { gr <- as_one_logical(gr) } if (length(vars) > 1L) { iso <- as_one_logical(iso) } else { iso <- TRUE } if (!isNA(k)) { k <- as.integer(as_one_numeric(k)) if (k < 1L) { stop2("'k' must be positive.") } if (is.null(c)) { stop2( "'c' must be specified for approximate GPs. ", "A good default could be c = 5/4 but we are still ", "working on providing better recommendations." ) } c <- as.numeric(c) if (length(c) == 1L) { c <- rep(c, length(vars)) } if (length(c) != length(vars)) { stop2("'c' must be of the same length as the number of covariates.") } if (any(c <= 0)) { stop2("'c' must be positive.") } } else { c <- NA } scale <- as_one_logical(scale) term <- ulapply(vars, deparse, backtick = TRUE, width.cutoff = 500) out <- nlist(term, label, by, cov, k, iso, gr, cmc, scale, c) structure(out, class = "gp_term") } # get labels of gaussian process terms # @param x either a formula or a list containing an element "gp" # @param data data frame containing the covariates # @return a data.frame with one row per GP term tidy_gpef <- function(x, data) { if (is.formula(x)) { x <- brmsterms(x, check_response = FALSE)$dpars$mu } form <- x[["gp"]] if (!is.formula(form)) { return(empty_data_frame()) } out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- nrow(out) out$cons <- out$byvars <- out$covars <- out$sfx1 <- out$sfx2 <- out$c <- vector("list", nterms) for (i in seq_len(nterms)) { gp <- eval2(out$term[i]) out$label[i] <- paste0("gp", rename(collapse(gp$term))) out$cov[i] <- gp$cov out$k[i] <- gp$k out$c[[i]] <- gp$c out$iso[i] <- gp$iso out$cmc[i] <- gp$cmc out$gr[i] <- gp$gr out$scale[i] <- gp$scale out$covars[[i]] <- gp$term if (gp$by != "NA") { out$byvars[[i]] <- gp$by str_add(out$label[i]) <- rename(gp$by) byval <- get(gp$by, data) if (is_like_factor(byval)) { byval <- unique(as.factor(byval)) byform <- str2formula(c(ifelse(gp$cmc, "0", "1"), "byval")) cons <- rename(colnames(model.matrix(byform))) out$cons[[i]] <- rm_wsp(sub("^byval", "", cons)) } } # sfx1 is for sdgp and sfx2 is for lscale out$sfx1[[i]] <- paste0(out$label[i], out$cons[[i]]) if (out$iso[i]) { out$sfx2[[i]] <- matrix(out$sfx1[[i]]) } else { out$sfx2[[i]] <- outer(out$sfx1[[i]], out$covars[[i]], paste0) } } out } # exponential-quadratic covariance matrix # not vectorized over parameter values cov_exp_quad <- function(x, x_new = NULL, sdgp = 1, lscale = 1) { sdgp <- as.numeric(sdgp) lscale <- as.numeric(lscale) Dls <- length(lscale) if (Dls == 1L) { # one dimensional or isotropic GP diff_quad <- diff_quad(x = x, x_new = x_new) out <- sdgp^2 * exp(-diff_quad / (2 * lscale^2)) } else { # multi-dimensional non-isotropic GP diff_quad <- diff_quad(x = x[, 1], x_new = x_new[, 1]) out <- sdgp^2 * exp(-diff_quad / (2 * lscale[1]^2)) for (d in seq_len(Dls)[-1]) { diff_quad <- diff_quad(x = x[, d], x_new = x_new[, d]) out <- out * exp(-diff_quad / (2 * lscale[d]^2)) } } out } # compute squared differences # @param x vector or matrix # @param x_new optional vector of matrix with the same ncol as x # @return an nrow(x) times nrow(x_new) matrix # @details if matrices are passed results are summed over the columns diff_quad <- function(x, x_new = NULL) { x <- as.matrix(x) if (is.null(x_new)) { x_new <- x } else { x_new <- as.matrix(x_new) } .diff_quad <- function(x1, x2) (x1 - x2)^2 out <- 0 for (i in seq_cols(x)) { out <- out + outer(x[, i], x_new[, i], .diff_quad) } out } # spectral density function # vectorized over parameter values spd_cov_exp_quad <- function(x, sdgp = 1, lscale = 1) { NB <- NROW(x) D <- NCOL(x) Dls <- NCOL(lscale) out <- matrix(nrow = length(sdgp), ncol = NB) if (Dls == 1L) { # one dimensional or isotropic GP constant <- sdgp^2 * (sqrt(2 * pi) * lscale)^D neg_half_lscale2 <- -0.5 * lscale^2 for (m in seq_len(NB)) { out[, m] <- constant * exp(neg_half_lscale2 * sum(x[m, ]^2)) } } else { # multi-dimensional non-isotropic GP constant <- sdgp^2 * sqrt(2 * pi)^D * matrixStats::rowProds(lscale) neg_half_lscale2 = -0.5 * lscale^2 for (m in seq_len(NB)) { x2 <- data2draws(x[m, ]^2, dim = dim(lscale)) out[, m] <- constant * exp(rowSums(neg_half_lscale2 * x2)) } } out } # compute the mth eigen value of an approximate GP eigen_val_cov_exp_quad <- function(m, L) { ((m * pi) / (2 * L))^2 } # compute the mth eigen function of an approximate GP eigen_fun_cov_exp_quad <- function(x, m, L) { x <- as.matrix(x) D <- ncol(x) stopifnot(length(m) == D, length(L) == D) out <- vector("list", D) for (i in seq_cols(x)) { out[[i]] <- 1 / sqrt(L[i]) * sin((m[i] * pi) / (2 * L[i]) * (x[, i] + L[i])) } Reduce("*", out) } # extended range of input data for which predictions should be made choose_L <- function(x, c) { if (!length(x)) { range <- 1 } else { range <- max(1, max(x, na.rm = TRUE) - min(x, na.rm = TRUE)) } c * range } # try to evaluate a GP term and # return an informative error message if it fails try_nug <- function(expr, nug) { out <- try(expr, silent = TRUE) if (is(out, "try-error")) { stop2("The Gaussian process covariance matrix is not positive ", "definite.\nThis occurs for numerical reasons. Setting ", "'nug' above ", nug, " may help.") } out } brms/R/brms-package.R0000644000175000017500000001002514105230573014225 0ustar nileshnilesh#' Bayesian Regression Models using 'Stan' #' #' @docType package #' @name brms-package #' @aliases brms #' #' @description #' \if{html}{ #' \figure{stanlogo.png}{options: width="50px" alt="https://mc-stan.org/about/logo/"} #' \emph{Stan Development Team} #' } #' #' The \pkg{brms} package provides an interface to fit Bayesian generalized #' multivariate (non-)linear multilevel models using \pkg{Stan}, which is a C++ #' package for obtaining full Bayesian inference (see #' \url{https://mc-stan.org/}). The formula syntax is an extended version of the #' syntax applied in the \pkg{lme4} package to provide a familiar and simple #' interface for performing regression analyses. #' #' @details #' The main function of \pkg{brms} is \code{\link{brm}}, which uses #' formula syntax to specify a wide range of complex Bayesian models #' (see \code{\link{brmsformula}} for details). Based on the supplied #' formulas, data, and additional information, it writes the Stan code #' on the fly via \code{\link{make_stancode}}, prepares the data via #' \code{\link{make_standata}}, and fits the model using #' \pkg{\link[rstan:rstan]{Stan}}. #' #' Subsequently, a large number of post-processing methods can be applied: #' To get an overview on the estimated parameters, #' \code{\link[brms:summary.brmsfit]{summary}} or #' \code{\link[brms:conditional_effects.brmsfit]{conditional_effects}} #' are perfectly suited. Detailed visual analyses can be performed by applying #' the \code{\link{pp_check}} and \code{\link{stanplot}} methods, which both #' rely on the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} package. #' Model comparisons can be done via \code{\link{loo}} and \code{\link{waic}}, #' which make use of the \pkg{\link[loo:loo-package]{loo}} package as well as #' via \code{\link{bayes_factor}} which relies on the \pkg{bridgesampling} package. #' For a full list of methods to apply, type \code{methods(class = "brmsfit")}. #' #' Because \pkg{brms} is based on \pkg{Stan}, a C++ compiler is required. The #' program Rtools (available on #' \url{https://cran.r-project.org/bin/windows/Rtools/}) comes with a C++ #' compiler for Windows. On Mac, you should use Xcode. For further instructions #' on how to get the compilers running, see the prerequisites section at the #' \href{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}{RStan-Getting-Started} #' page. #' #' When comparing other packages fitting multilevel models to \pkg{brms}, keep #' in mind that the latter needs to compile models before actually fitting them, #' which will require between 20 and 40 seconds depending on your machine, #' operating system and overall model complexity. #' #' Thus, fitting smaller models may be relatively slow as compilation time makes #' up the majority of the whole running time. For larger / more complex #' models however, fitting my take several minutes or even hours, so that the #' compilation time won't make much of a difference for these models. #' #' See \code{vignette("brms_overview")} and \code{vignette("brms_multilevel")} #' for a general introduction and overview of \pkg{brms}. For a full list of #' available vignettes, type \code{vignette(package = "brms")}. #' #' @references #' Paul-Christian Buerkner (2017). brms: An R Package for Bayesian Multilevel #' Models Using Stan. \emph{Journal of Statistical Software}, 80(1), 1-28. #' \code{doi:10.18637/jss.v080.i01} #' #' Paul-Christian Buerkner (2018). Advanced Bayesian Multilevel Modeling #' with the R Package brms. \emph{The R Journal}. 10(1), 395–411. #' \code{doi:10.32614/RJ-2018-017} #' #' The Stan Development Team. \emph{Stan Modeling Language User's Guide and #' Reference Manual}. \url{https://mc-stan.org/users/documentation/}. #' #' Stan Development Team (2020). RStan: the R interface to Stan. R package #' version 2.21.2. \url{https://mc-stan.org/} #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{brmsfamily}}, #' \code{\link{brmsfit}} #' NULL brms/R/formula-ac.R0000644000175000017500000005023014140466622013726 0ustar nileshnilesh#' Autocorrelation structures #' #' Specify autocorrelation terms in \pkg{brms} models. Currently supported terms #' are \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, #' \code{\link{cosy}}, \code{\link{sar}}, \code{\link{car}}, and #' \code{\link{fcor}}. Terms can be directly specified within the formula, or #' passed to the \code{autocor} argument of \code{\link{brmsformula}} in the #' form of a one-sided formula. For deprecated ways of specifying #' autocorrelation terms, see \code{\link{cor_brms}}. #' #' @name autocor-terms #' #' @details The autocor term functions are almost solely useful when called in #' formulas passed to the \pkg{brms} package. They do not evaluate its #' arguments -- but exist purely to help set up a model with autocorrelation #' terms. #' #' @seealso \code{\link{brmsformula}}, \code{\link{acformula}}, #' \code{\link{arma}}, \code{\link{ar}}, \code{\link{ma}}, #' \code{\link{cosy}}, \code{\link{sar}}, \code{\link{car}}, #' \code{\link{fcor}} #' #' @examples #' # specify autocor terms within the formula #' y ~ x + arma(p = 1, q = 1) + car(M) #' #' # specify autocor terms in the 'autocor' argument #' bf(y ~ x, autocor = ~ arma(p = 1, q = 1) + car(M)) #' #' # specify autocor terms via 'acformula' #' bf(y ~ x) + acformula(~ arma(p = 1, q = 1) + car(M)) NULL #' Set up ARMA(p,q) correlation structures #' #' Set up an autoregressive moving average (ARMA) term of order (p, q) in #' \pkg{brms}. The function does not evaluate its arguments -- it exists purely #' to help set up a model with ARMA terms. #' #' @param time An optional time variable specifying the time ordering #' of the observations. By default, the existing order of the observations #' in the data is used. #' @param gr An optional grouping variable. If specified, the correlation #' structure is assumed to apply only to observations within the same grouping #' level. #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is \code{1}. #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is \code{1}. #' @param cov A flag indicating whether ARMA effects should be estimated by #' means of residual covariance matrices. This is currently only possible for #' stationary ARMA effects of order 1. If the model family does not have #' natural residuals, latent residuals are added automatically. If #' \code{FALSE} (the default), a regression formulation is used that is #' considerably faster and allows for ARMA effects of order higher than 1 but #' is only available for \code{gaussian} models and some of its #' generalizations. #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{ar}}, \code{\link{ma}}, #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ arma(p = 2, q = 1), data = LakeHuron) #' summary(fit) #' } #' #' @export arma <- function(time = NA, gr = NA, p = 1, q = 1, cov = FALSE) { label <- deparse(match.call()) time <- deparse(substitute(time)) gr <- deparse(substitute(gr)) .arma(time = time, gr = gr, p = p, q = q, cov = cov, label = label) } #' Set up AR(p) correlation structures #' #' Set up an autoregressive (AR) term of order p in \pkg{brms}. The function #' does not evaluate its arguments -- it exists purely to help set up a model #' with AR terms. #' #' @inheritParams arma #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ma}} #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ ar(p = 2), data = LakeHuron) #' summary(fit) #' } #' #' @export ar <- function(time = NA, gr = NA, p = 1, cov = FALSE) { label <- deparse(match.call()) time <- deparse(substitute(time)) gr <- deparse(substitute(gr)) .arma(time = time, gr = gr, p = p, q = 0, cov = cov, label = label) } #' Set up MA(q) correlation structures #' #' Set up a moving average (MA) term of order q in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model with #' MA terms. #' #' @inheritParams arma #' #' @return An object of class \code{'arma_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}}, \code{\link{arma}}, \code{\link{ar}} #' #' @examples #' \dontrun{ #' data("LakeHuron") #' LakeHuron <- as.data.frame(LakeHuron) #' fit <- brm(x ~ ma(p = 2), data = LakeHuron) #' summary(fit) #' } #' #' @export ma <- function(time = NA, gr = NA, q = 1, cov = FALSE) { label <- deparse(match.call()) time <- deparse(substitute(time)) gr <- deparse(substitute(gr)) .arma(time = time, gr = gr, p = 0, q = q, cov = cov, label = label) } # helper function to validate input to arma() .arma <- function(time, gr, p, q, cov, label) { time <- as_one_variable(time) gr <- as_one_character(gr) stopif_illegal_group(gr) p <- as_one_numeric(p) q <- as_one_numeric(q) if (!(p >= 0 && is_wholenumber(p))) { stop2("Autoregressive order must be a non-negative integer.") } if (!(q >= 0 && is_wholenumber(q))) { stop2("Moving-average order must be a non-negative integer.") } if (!sum(p, q)) { stop2("At least one of 'p' and 'q' should be greater zero.") } cov <- as_one_logical(cov) if (cov && (p > 1 || q > 1)) { stop2("Covariance formulation of ARMA structures is ", "only possible for effects of maximal order one.") } label <- as_one_character(label) out <- nlist(time, gr, p, q, cov, label) class(out) <- c("arma_term", "ac_term") out } #' Set up COSY correlation structures #' #' Set up a compounds symmetry (COSY) term in \pkg{brms}. The function does #' not evaluate its arguments -- it exists purely to help set up a model with #' COSY terms. #' #' @inheritParams arma #' #' @return An object of class \code{'cosy_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' data("lh") #' lh <- as.data.frame(lh) #' fit <- brm(x ~ cosy(), data = lh) #' summary(fit) #' } #' #' @export #' @export cosy <- function(time = NA, gr = NA) { label <- deparse(match.call()) time <- deparse(substitute(time)) time <- as_one_variable(time) gr <- deparse(substitute(gr)) stopif_illegal_group(gr) out <- nlist(time, gr, label) class(out) <- c("cosy_term", "ac_term") out } #' Spatial simultaneous autoregressive (SAR) structures #' #' Set up an spatial simultaneous autoregressive (SAR) term in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up a #' model with SAR terms. #' #' @param M An object specifying the spatial weighting matrix. #' Can be either the spatial weight matrix itself or an #' object of class \code{listw} or \code{nb}, from which #' the spatial weighting matrix can be computed. #' @param type Type of the SAR structure. Either \code{"lag"} #' (for SAR of the response values) or \code{"error"} #' (for SAR of the residuals). More information is #' provided in the 'Details' section. #' #' @details The \code{lagsar} structure implements SAR of the response values: #' \deqn{y = \rho W y + \eta + e} #' The \code{errorsar} structure implements SAR of the residuals: #' \deqn{y = \eta + u, u = \rho W u + e} #' In the above equations, \eqn{\eta} is the predictor term and \eqn{e} are #' independent normally or t-distributed residuals. Currently, only families #' \code{gaussian} and \code{student} support SAR structures. #' #' @return An object of class \code{'sar_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' data(oldcol, package = "spdep") #' fit1 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "lag"), #' data = COL.OLD, data2 = list(COL.nb = COL.nb), #' chains = 2, cores = 2) #' summary(fit1) #' plot(fit1) #' #' fit2 <- brm(CRIME ~ INC + HOVAL + sar(COL.nb, type = "error"), #' data = COL.OLD, data2 = list(COL.nb = COL.nb), #' chains = 2, cores = 2) #' summary(fit2) #' plot(fit2) #' } #' #' @export sar <- function(M, type = "lag") { label <- deparse(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in sar().") } M <- deparse(substitute(M)) M <- as_one_variable(M) options <- c("lag", "error") type <- match.arg(type, options) out <- nlist(M, type, label) class(out) <- c("sar_term", "ac_term") out } #' Spatial conditional autoregressive (CAR) structures #' #' Set up an spatial conditional autoregressive (CAR) term in \pkg{brms}. The #' function does not evaluate its arguments -- it exists purely to help set up a #' model with CAR terms. #' #' @param M Adjacency matrix of locations. All non-zero entries are treated as #' if the two locations are adjacent. If \code{gr} is specified, the row names #' of \code{M} have to match the levels of the grouping factor. #' @param gr An optional grouping factor mapping observations to spatial #' locations. If not specified, each observation is treated as a separate #' location. It is recommended to always specify a grouping factor to allow #' for handling of new data in post-processing methods. #' @param type Type of the CAR structure. Currently implemented are #' \code{"escar"} (exact sparse CAR), \code{"esicar"} (exact sparse intrinsic #' CAR), \code{"icar"} (intrinsic CAR), and \code{"bym2"}. More information is #' provided in the 'Details' section. #' #' @return An object of class \code{'car_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @details The \code{escar} and \code{esicar} types are #' implemented based on the case study of Max Joseph #' (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and #' \code{bym2} type is implemented based on the case study of Mitzi Morris #' (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). #' #' @examples #' \dontrun{ #' # generate some spatial data #' east <- north <- 1:10 #' Grid <- expand.grid(east, north) #' K <- nrow(Grid) #' #' # set up distance and neighbourhood matrices #' distance <- as.matrix(dist(Grid)) #' W <- array(0, c(K, K)) #' W[distance == 1] <- 1 #' #' # generate the covariates and response data #' x1 <- rnorm(K) #' x2 <- rnorm(K) #' theta <- rnorm(K, sd = 0.05) #' phi <- rmulti_normal( #' 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) #' ) #' eta <- x1 + x2 + phi #' prob <- exp(eta) / (1 + exp(eta)) #' size <- rep(50, K) #' y <- rbinom(n = K, size = size, prob = prob) #' dat <- data.frame(y, size, x1, x2) #' #' # fit a CAR model #' fit <- brm(y | trials(size) ~ x1 + x2 + car(W), #' data = dat, data2 = list(W = W), #' family = binomial()) #' summary(fit) #' } #' #' @export car <- function(M, gr = NA, type = "escar") { label <- deparse(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in car().") } M <- deparse(substitute(M)) M <- as_one_variable(M) gr <- deparse(substitute(gr)) stopif_illegal_group(gr) options <- c("escar", "esicar", "icar", "bym2") type <- match.arg(type, options) out <- nlist(M, gr, type, label) class(out) <- c("car_term", "ac_term") out } #' Fixed residual correlation (FCOR) structures #' #' Set up a fixed residual correlation (FCOR) term in \pkg{brms}. The function #' does not evaluate its arguments -- it exists purely to help set up a model #' with FCOR terms. #' #' @param M Known correlation/covariance matrix of the response variable. #' If a vector is passed, it will be used as diagonal entries #' (variances) and correlations/covariances will be set to zero. #' The actual covariance matrix used in the likelihood is obtained #' by multiplying \code{M} by the square of the residual standard #' deviation parameter \code{sigma} estimated as part of the model. #' #' @return An object of class \code{'fcor_term'}, which is a list #' of arguments to be interpreted by the formula #' parsing functions of \pkg{brms}. #' #' @seealso \code{\link{autocor-terms}} #' #' @examples #' \dontrun{ #' dat <- data.frame(y = rnorm(3)) #' V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) #' fit <- brm(y ~ 1 + fcor(V), data = dat, data2 = list(V = V)) #' } #' #' @export fcor <- function(M) { label <- deparse(match.call()) if (missing(M)) { stop2("Argument 'M' is missing in fcor().") } M <- deparse(substitute(M)) M <- as_one_variable(M) out <- nlist(M, label) class(out) <- c("fcor_term", "ac_term") out } # validate 'autocor' argument validate_autocor <- function(autocor) { if (is.null(autocor) || is.cor_empty(autocor)) { return(NULL) } if (is.cor_brms(autocor)) { warning2("Using 'cor_brms' objects for 'autocor' is deprecated. ", "Please see ?cor_brms for details.") autocor <- as_formula_cor_brms(autocor) } if (is.null(autocor)) { return(NULL) } autocor <- as.formula(autocor) att <- attributes(autocor) autocor <- terms_ac(autocor) if (!is.null(autocor) && !is.formula(autocor)) { stop2("Argument 'autocor' must be coercible to a formula.") } attributes(autocor)[names(att)] <- att autocor } # gather information on autocor terms # @return a data.frame with one row per autocor term tidy_acef <- function(x, ...) { UseMethod("tidy_acef") } #' @export tidy_acef.default <- function(x, ...) { x <- brmsterms(x, check_response = FALSE) tidy_acef(x, ...) } #' @export tidy_acef.mvbrmsterms <- function(x, ...) { out <- lapply(x$terms, tidy_acef, ...) out <- do_call(rbind, out) structure(out, class = acef_class()) } #' @export tidy_acef.brmsterms <- function(x, ...) { out <- lapply(x$dpars, tidy_acef, ...) out <- do_call(rbind, out) if (!NROW(out)) { return(empty_acef()) } out <- structure(out, class = acef_class()) if (has_ac_class(out, "sar")) { if (any(c("sigma", "nu") %in% names(x$dpars))) { stop2("SAR models are not implemented when predicting 'sigma' or 'nu'.") } } if (use_ac_cov(out)) { if (isTRUE(x$rescor)) { stop2("Explicit covariance terms cannot be modeled ", "when 'rescor' is estimated at the same time.") } } out } #' @export tidy_acef.btl <- function(x, data = NULL, ...) { form <- x[["ac"]] if (!is.formula(form)) { return(empty_acef()) } if (is.mixfamily(x$family)) { stop2("Autocorrelation terms cannot be applied in mixture models.") } px <- check_prefix(x) out <- data.frame(term = all_terms(form), stringsAsFactors = FALSE) nterms <- NROW(out) cnames <- c("class", "dim", "type", "time", "gr", "p", "q", "M") out[cnames] <- list(NA) out$cov <- out$nat_cov <- FALSE out[names(px)] <- px for (i in seq_len(nterms)) { ac <- eval2(out$term[i]) if (is.arma_term(ac)) { out$class[i] <- "arma" out$dim[i] <- "time" out$time[i] <- ac$time out$gr[i] <- ac$gr out$p[i] <- ac$p out$q[i] <- ac$q out$cov[i] <- ac$cov } if (is.cosy_term(ac)) { out$class[i] <- "cosy" out$dim[i] <- "time" out$time[i] <- ac$time out$gr[i] <- ac$gr out$cov[i] <- TRUE } if (is.sar_term(ac)) { out$class[i] <- "sar" out$dim[i] <- "space" out$type[i] <- ac$type out$M[i] <- ac$M out$cov[i] <- TRUE } if (is.car_term(ac)) { out$class[i] <- "car" out$dim[i] <- "space" out$type[i] <- ac$type out$gr[i] <- ac$gr out$M[i] <- ac$M } if (is.fcor_term(ac)) { out$class[i] <- "fcor" out$M[i] <- ac$M out$cov[i] <- TRUE } } # covariance matrices of natural residuals will be handled # directly in the likelihood function while latent residuals will # be added to the linear predictor of the main parameter 'mu' out$nat_cov <- out$cov & has_natural_residuals(x) class(out) <- acef_class() # validate specified autocor terms if (any(duplicated(out$class))) { stop2("Can only model one term per autocorrelation class.") } if (NROW(subset2(out, dim = "time")) > 1) { stop2("Can only model one time-series term.") } if (NROW(subset2(out, dim = "space")) > 1) { stop2("Can only model one spatial term.") } if (NROW(subset2(out, nat_cov = TRUE)) > 1) { stop2("Can only model one covariance matrix of natural residuals.") } if (use_ac_cov(out) || has_ac_class(out, "arma")) { if (any(!out$dpar %in% c("", "mu") | nzchar(out$nlpar))) { stop2("Explicit covariance terms can only be specified on 'mu'.") } } out } #' @export tidy_acef.btnl <- function(x, ... ) { tidy_acef.btl(x, ...) } #' @export tidy_acef.acef <- function(x, ...) { x } #' @export tidy_acef.NULL <- function(x, ...) { empty_acef() } empty_acef <- function() { structure(empty_data_frame(), class = acef_class()) } acef_class <- function() { c("acef", "data.frame") } # get names of certain autocor variables get_ac_vars <- function(x, var, ...) { var <- match.arg(var, c("time", "gr", "M")) acef <- subset2(tidy_acef(x), ...) out <- unique(acef[[var]]) setdiff(na.omit(out), "NA") } # get names of autocor grouping variables get_ac_groups <- function(x, ...) { get_ac_vars(x, "gr", ...) } # is certain subset of autocor terms is present? has_ac_subset <- function(x, ...) { NROW(subset2(tidy_acef(x), ...)) > 0L } # is a certain autocorrelation class present? has_ac_class <- function(x, class) { has_ac_subset(x, class = class) } # use explicit residual covariance structure? use_ac_cov <- function(x) { has_ac_subset(x, cov = TRUE) } # use explicit residual covariance structure for time-series? use_ac_cov_time <- function(x) { has_ac_subset(x, cov = TRUE, dim = "time") } # does the model need latent residuals for autocor structures? has_ac_latent_residuals <- function(bterms) { !has_natural_residuals(bterms) && (use_ac_cov(bterms) || has_ac_class(bterms, "arma")) } # validate SAR matrices validate_sar_matrix <- function(M) { if (is(M, "listw")) { require_package("spdep") M <- spdep::listw2mat(M) } else if (is(M, "nb")) { require_package("spdep") M <- spdep::nb2mat(M) } if (length(dim(M)) != 2L) { stop2("'M' for SAR terms must be of class 'matrix', 'listw', or 'nb'.") } M <- Matrix::Matrix(M, sparse = TRUE) M } # validate CAR matrices validate_car_matrix <- function(M) { if (length(dim(M)) != 2L) { stop2("'M' for CAR terms must be a matrix.") } M <- Matrix::Matrix(M, sparse = TRUE) if (!Matrix::isSymmetric(M, check.attributes = FALSE)) { stop2("'M' for CAR terms must be symmetric.") } colnames(M) <- rownames(M) not_binary <- M@x != 1 if (any(not_binary)) { message("Converting all non-zero values in 'M' to 1.") M@x[not_binary] <- 1 } M } # validate FCOR matrices validate_fcor_matrix <- function(M) { if (length(dim(M)) <= 1L) { M <- diag(as.vector(M), length(M)) } if (length(dim(M)) != 2L) { stop2("'M' for FCOR terms must be a matrix.") } M <- as.matrix(M) if (!isSymmetric(M, check.attributes = FALSE)) { stop2("'M' for FCOR terms must be symmetric.") } if (min(eigen(M)$values <= 0)) { stop2("'M' for FCOR terms must be positive definite.") } M } # regex to extract all parameter names of autocorrelation structures regex_autocor_pars <- function() { p <- c("ar", "ma", "sderr", "cosy", "lagsar", "errorsar", "car", "sdcar", "rhocar") p <- paste0("(", p, ")", collapse = "|") paste0("^(", p, ")(\\[|_|$)") } is.ac_term <- function(x) { inherits(x, "ac_term") } is.arma_term <- function(x) { inherits(x, "arma_term") } is.cosy_term <- function(x) { inherits(x, "cosy_term") } is.sar_term <- function(x) { inherits(x, "sar_term") } is.car_term <- function(x) { inherits(x, "car_term") } is.fcor_term <- function(x) { inherits(x, "fcor_term") } brms/R/conditional_smooths.R0000644000175000017500000001761614144712222015764 0ustar nileshnilesh#' Display Smooth Terms #' #' Display smooth \code{s} and \code{t2} terms of models #' fitted with \pkg{brms}. #' #' @aliases marginal_smooths marginal_smooths.brmsfit #' #' @inheritParams conditional_effects.brmsfit #' @param smooths Optional character vector of smooth terms #' to display. If \code{NULL} (the default) all smooth terms #' are shown. #' @param ndraws Positive integer indicating how many #' posterior draws should be used. #' If \code{NULL} (the default) all draws are used. #' Ignored if \code{draw_ids} is not \code{NULL}. #' @param draw_ids An integer vector specifying #' the posterior draws to be used. #' If \code{NULL} (the default), all draws are used. #' @param nsamples Deprecated alias of \code{ndraws}. #' @param subset Deprecated alias of \code{draw_ids}. #' @param ... Currently ignored. #' #' @return For the \code{brmsfit} method, #' an object of class \code{brms_conditional_effects}. See #' \code{\link{conditional_effects}} for #' more details and documentation of the related plotting function. #' #' @details Two-dimensional smooth terms will be visualized using #' either contour or raster plots. #' #' @examples #' \dontrun{ #' set.seed(0) #' dat <- mgcv::gamSim(1, n = 200, scale = 2) #' fit <- brm(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' # show all smooth terms #' plot(conditional_smooths(fit), rug = TRUE, ask = FALSE) #' # show only the smooth term s(x2) #' plot(conditional_smooths(fit, smooths = "s(x2)"), ask = FALSE) #' #' # fit and plot a two-dimensional smooth term #' fit2 <- brm(y ~ t2(x0, x2), data = dat) #' ms <- conditional_smooths(fit2) #' plot(ms, stype = "contour") #' plot(ms, stype = "raster") #' } #' #' @export conditional_smooths.brmsfit <- function(x, smooths = NULL, int_conditions = NULL, prob = 0.95, spaghetti = FALSE, resolution = 100, too_far = 0, ndraws = NULL, draw_ids = NULL, nsamples = NULL, subset = NULL, probs = NULL, ...) { probs <- validate_ci_bounds(prob, probs = probs) spaghetti <- as_one_logical(spaghetti) draw_ids <- use_alias(draw_ids, subset) ndraws <- use_alias(ndraws, nsamples) contains_draws(x) x <- restructure(x) x <- exclude_terms(x, incl_autocor = FALSE) smooths <- rm_wsp(as.character(smooths)) conditions <- prepare_conditions(x) draw_ids <- validate_draw_ids(x, draw_ids, ndraws) bterms <- brmsterms(exclude_terms(x$formula, smooths_only = TRUE)) out <- conditional_smooths( bterms, fit = x, smooths = smooths, conditions = conditions, int_conditions = int_conditions, too_far = too_far, resolution = resolution, probs = probs, spaghetti = spaghetti, draw_ids = draw_ids ) if (!length(out)) { stop2("No valid smooth terms found in the model.") } structure(out, class = "brms_conditional_effects", smooths_only = TRUE) } #' @rdname conditional_smooths.brmsfit #' @export conditional_smooths <- function(x, ...) { UseMethod("conditional_smooths") } #' @export conditional_smooths.default <- function(x, ...) { NULL } #' @export conditional_smooths.mvbrmsterms <- function(x, ...) { out <- list() for (r in names(x$terms)) { c(out) <- conditional_smooths(x$terms[[r]], ...) } out } #' @export conditional_smooths.brmsterms <- function(x, ...) { out <- list() for (dp in names(x$dpars)) { c(out) <- conditional_smooths(x$dpars[[dp]], ...) } for (nlp in names(x$nlpars)) { c(out) <- conditional_smooths(x$nlpars[[nlp]], ...) } out } # conditional smooths for a single predicted parameter # @param fit brmsfit object # @param smooths optional names of smooth terms to plot # @param conditions output of prepare_conditions # @param int_conditions values of by-vars at which to evalute smooths # @param ...: currently ignored # @return a named list with one element per smooth term #' @export conditional_smooths.btl <- function(x, fit, smooths, conditions, int_conditions, probs, resolution, too_far, spaghetti, ...) { stopifnot(is.brmsfit(fit)) out <- list() mf <- model.frame(fit) smef <- tidy_smef(x, mf) # fixes issue #1265 smef$term <- rm_wsp(smef$term) smterms <- unique(smef$term) if (!length(smooths)) { I <- seq_along(smterms) } else { I <- which(smterms %in% smooths) } for (i in I) { # loop over smooth terms and compute their predictions smooth <- smterms[i] sub_smef <- subset2(smef, term = smooth) # extract raw variable names before transformations covars <- all_vars(sub_smef$covars[[1]]) byvars <- all_vars(sub_smef$byvars[[1]]) ncovars <- length(covars) if (ncovars > 2L) { byvars <- c(covars[3:ncovars], byvars) covars <- covars[1:2] ncovars <- 2L } vars <- c(covars, byvars) values <- named_list(vars) is_numeric <- setNames(rep(FALSE, ncovars), covars) for (cv in covars) { if (is.numeric(mf[[cv]])) { is_numeric[cv] <- TRUE values[[cv]] <- seq( min(mf[[cv]]), max(mf[[cv]]), length.out = resolution ) } else { values[[cv]] <- levels(factor(mf[[cv]])) } } for (cv in byvars) { if (cv %in% names(int_conditions)) { int_cond <- int_conditions[[cv]] if (is.function(int_cond)) { int_cond <- int_cond(mf[[cv]]) } values[[cv]] <- int_cond } else if (is.numeric(mf[[cv]])) { mean2 <- mean(mf[[cv]], na.rm = TRUE) sd2 <- sd(mf[[cv]], na.rm = TRUE) values[[cv]] <- (-1:1) * sd2 + mean2 } else { values[[cv]] <- levels(factor(mf[[cv]])) } } newdata <- expand.grid(values) if (ncovars == 2L && too_far > 0) { # exclude prediction grid points too far from data ex_too_far <- mgcv::exclude.too.far( g1 = newdata[[covars[1]]], g2 = newdata[[covars[2]]], d1 = mf[, covars[1]], d2 = mf[, covars[2]], dist = too_far ) newdata <- newdata[!ex_too_far, ] } other_vars <- setdiff(names(conditions), vars) newdata <- fill_newdata(newdata, other_vars, conditions) eta <- posterior_smooths(x, fit, smooth, newdata, ...) effects <- na.omit(sub_smef$covars[[1]][1:2]) cond_data <- add_effects__(newdata[, vars, drop = FALSE], effects) if (length(byvars)) { # byvars will be plotted as facets cond_data$cond__ <- rows2labels(cond_data[, byvars, drop = FALSE]) } else { cond_data$cond__ <- factor(1) } spa_data <- NULL if (spaghetti && ncovars == 1L && is_numeric[1]) { sample <- rep(seq_rows(eta), each = ncol(eta)) spa_data <- data.frame(as.numeric(t(eta)), factor(sample)) colnames(spa_data) <- c("estimate__", "sample__") spa_data <- cbind(cond_data, spa_data) } eta <- posterior_summary(eta, robust = TRUE, probs = probs) colnames(eta) <- c("estimate__", "se__", "lower__", "upper__") eta <- cbind(cond_data, eta) response <- combine_prefix(x, keep_mu = TRUE) response <- paste0(response, ": ", smooth) points <- mf[, vars, drop = FALSE] points <- add_effects__(points, covars) attr(eta, "response") <- response attr(eta, "effects") <- effects attr(eta, "surface") <- all(is_numeric) && ncovars == 2L attr(eta, "spaghetti") <- spa_data attr(eta, "points") <- points out[[response]] <- eta } out } # the name 'marginal_smooths' is deprecated as of brms 2.10.3 # do not remove it eventually as it has been used in the brms papers #' @export marginal_smooths <- function(x, ...) { UseMethod("marginal_smooths") } #' @export marginal_smooths.brmsfit <- function(x, ...) { warning2("Method 'marginal_smooths' is deprecated. ", "Please use 'conditional_smooths' instead.") conditional_smooths.brmsfit(x, ...) } brms/R/data-predictor.R0000644000175000017500000010260314135330664014604 0ustar nileshnilesh#' Prepare Predictor Data #' #' Prepare data related to predictor variables in \pkg{brms}. #' Only exported for use in package development. #' #' @param x An \R object. #' @param ... Further arguments passed to or from other methods. #' #' @return A named list of data related to predictor variables. #' #' @keywords internal #' @export data_predictor <- function(x, ...) { UseMethod("data_predictor") } #' @export data_predictor.mvbrmsterms <- function(x, data, basis = NULL, ...) { out <- list(N = nrow(data)) for (r in names(x$terms)) { bs <- basis$resps[[r]] c(out) <- data_predictor(x$terms[[r]], data = data, basis = bs, ...) } out } #' @export data_predictor.brmsterms <- function(x, data, data2, prior, ranef, basis = NULL, ...) { out <- list() data <- subset_data(data, x) resp <- usc(combine_prefix(x)) args_eff <- nlist(data, data2, ranef, prior, ...) for (dp in names(x$dpars)) { args_eff_spec <- list(x = x$dpars[[dp]], basis = basis$dpars[[dp]]) c(out) <- do_call(data_predictor, c(args_eff_spec, args_eff)) } for (dp in names(x$fdpars)) { if (is.numeric(x$fdpars[[dp]]$value)) { out[[paste0(dp, resp)]] <- x$fdpars[[dp]]$value } } for (nlp in names(x$nlpars)) { args_eff_spec <- list(x = x$nlpars[[nlp]], basis = basis$nlpars[[nlp]]) c(out) <- do_call(data_predictor, c(args_eff_spec, args_eff)) } c(out) <- data_gr_local(x, data = data, ranef = ranef) c(out) <- data_mixture(x, data2 = data2, prior = prior) out } # prepare data for all types of effects for use in Stan # @param data the data passed by the user # @param ranef object retuend by 'tidy_ranef' # @param prior an object of class brmsprior # @param basis information from original Stan data used to correctly # predict from new data. See 'standata_basis' for details. # @param ... currently ignored # @return a named list of data to be passed to Stan #' @export data_predictor.btl <- function(x, data, ranef = empty_ranef(), prior = brmsprior(), data2 = list(), index = NULL, basis = NULL, ...) { out <- c( data_fe(x, data), data_sp(x, data, data2 = data2, prior = prior, index = index, basis = basis$sp), data_re(x, data, ranef = ranef), data_cs(x, data), data_sm(x, data, basis = basis$sm), data_gp(x, data, basis = basis$gp), data_ac(x, data, data2 = data2, basis = basis$ac), data_offset(x, data), data_bhaz(x, data, data2 = data2, prior = prior, basis = basis$bhaz) ) c(out) <- data_prior(x, data, prior = prior, sdata = out) out } # prepare data for non-linear parameters for use in Stan #' @export data_predictor.btnl <- function(x, data, data2 = list(), basis = NULL, ...) { out <- list() c(out) <- data_cnl(x, data) c(out) <- data_ac(x, data, data2 = data2, basis = basis$ac) out } # prepare data of fixed effects data_fe <- function(bterms, data) { out <- list() p <- usc(combine_prefix(bterms)) # the intercept is removed inside the Stan code for ordinal models cols2remove <- if (is_ordinal(bterms)) "(Intercept)" X <- get_model_matrix(rhs(bterms$fe), data, cols2remove = cols2remove) avoid_dpars(colnames(X), bterms = bterms) out[[paste0("K", p)]] <- ncol(X) out[[paste0("X", p)]] <- X out } # data preparation for splines data_sm <- function(bterms, data, basis = NULL) { out <- list() smterms <- all_terms(bterms[["sm"]]) if (!length(smterms)) { return(out) } p <- usc(combine_prefix(bterms)) new <- length(basis) > 0L if (!new) { knots <- get_knots(data) basis <- named_list(smterms) for (i in seq_along(smterms)) { # the spline penalty has changed in 2.8.7 (#646) diagonal.penalty <- !require_old_default("2.8.7") basis[[i]] <- smoothCon( eval2(smterms[i]), data = data, knots = knots, absorb.cons = TRUE, diagonal.penalty = diagonal.penalty ) } } bylevels <- named_list(smterms) ns <- 0 lXs <- list() for (i in seq_along(basis)) { # may contain multiple terms when 'by' is a factor for (j in seq_along(basis[[i]])) { ns <- ns + 1 sm <- basis[[i]][[j]] if (length(sm$by.level)) { bylevels[[i]][j] <- sm$by.level } if (new) { # prepare rasm for use with new data rasm <- s2rPred(sm, data) } else { rasm <- mgcv::smooth2random(sm, names(data), type = 2) } lXs[[ns]] <- rasm$Xf if (NCOL(lXs[[ns]])) { colnames(lXs[[ns]]) <- paste0(sm$label, "_", seq_cols(lXs[[ns]])) } Zs <- rasm$rand Zs <- setNames(Zs, paste0("Zs", p, "_", ns, "_", seq_along(Zs))) tmp <- list(length(Zs), as.array(ulapply(Zs, ncol))) tmp <- setNames(tmp, paste0(c("nb", "knots"), p, "_", ns)) c(out) <- c(tmp, Zs) } } Xs <- do_call(cbind, lXs) avoid_dpars(colnames(Xs), bterms = bterms) smcols <- lapply(lXs, function(x) which(colnames(Xs) %in% colnames(x))) Xs <- structure(Xs, smcols = smcols, bylevels = bylevels) colnames(Xs) <- rename(colnames(Xs)) out[[paste0("Ks", p)]] <- ncol(Xs) out[[paste0("Xs", p)]] <- Xs out } # prepare data for group-level effects for use in Stan data_re <- function(bterms, data, ranef) { out <- list() px <- check_prefix(bterms) take <- find_rows(ranef, ls = px) & !find_rows(ranef, type = "sp") ranef <- ranef[take, ] if (!nrow(ranef)) { return(out) } gn <- unique(ranef$gn) for (i in seq_along(gn)) { r <- subset2(ranef, gn = gn[i]) Z <- get_model_matrix(r$form[[1]], data = data, rename = FALSE) idp <- paste0(r$id[1], usc(combine_prefix(px))) Znames <- paste0("Z_", idp, "_", r$cn) if (r$gtype[1] == "mm") { ng <- length(r$gcall[[1]]$groups) if (r$type[1] == "cs") { stop2("'cs' is not supported in multi-membership terms.") } if (r$type[1] == "mmc") { # see issue #353 for the general idea mmc_expr <- "^mmc\\([^:]*\\)" mmc_terms <- get_matches_expr(mmc_expr, colnames(Z)) for (t in mmc_terms) { pos <- which(grepl_expr(escape_all(t), colnames(Z))) if (length(pos) != ng) { stop2("Invalid term '", t, "': Expected ", ng, " coefficients but found ", length(pos), ".") } for (j in seq_along(Znames)) { for (k in seq_len(ng)) { out[[paste0(Znames[j], "_", k)]] <- as.array(Z[, pos[k]]) } } } } else { for (j in seq_along(Znames)) { out[paste0(Znames[j], "_", seq_len(ng))] <- list(as.array(Z[, j])) } } } else { if (r$type[1] == "cs") { ncatM1 <- nrow(r) / ncol(Z) Z_temp <- vector("list", ncol(Z)) for (k in seq_along(Z_temp)) { Z_temp[[k]] <- replicate(ncatM1, Z[, k], simplify = FALSE) } Z <- do_call(cbind, unlist(Z_temp, recursive = FALSE)) } if (r$type[1] == "mmc") { stop2("'mmc' is only supported in multi-membership terms.") } for (j in seq_cols(Z)) { out[[Znames[j]]] <- as.array(Z[, j]) } } } out } # compute data for each group-level-ID per univariate model data_gr_local <- function(bterms, data, ranef) { stopifnot(is.brmsterms(bterms)) out <- list() ranef <- subset2(ranef, resp = bterms$resp) resp <- usc(bterms$resp) for (id in unique(ranef$id)) { id_ranef <- subset2(ranef, id = id) idresp <- paste0(id, resp) nranef <- nrow(id_ranef) group <- id_ranef$group[1] levels <- attr(ranef, "levels")[[group]] if (id_ranef$gtype[1] == "mm") { # multi-membership grouping term gs <- id_ranef$gcall[[1]]$groups ngs <- length(gs) weights <- id_ranef$gcall[[1]]$weights if (is.formula(weights)) { scale <- isTRUE(attr(weights, "scale")) weights <- as.matrix(eval_rhs(weights, data)) if (!identical(dim(weights), c(nrow(data), ngs))) { stop2( "Grouping structure 'mm' expects 'weights' to be ", "a matrix with as many columns as grouping factors." ) } if (scale) { if (isTRUE(any(weights < 0))) { stop2("Cannot scale negative weights.") } weights <- sweep(weights, 1, rowSums(weights), "/") } } else { # all members get equal weights by default weights <- matrix(1 / ngs, nrow = nrow(data), ncol = ngs) } for (i in seq_along(gs)) { gdata <- get(gs[i], data) J <- match(gdata, levels) if (anyNA(J)) { # occurs for new levels only new_gdata <- gdata[!gdata %in% levels] new_levels <- unique(new_gdata) J[is.na(J)] <- match(new_gdata, new_levels) + length(levels) } out[[paste0("J_", idresp, "_", i)]] <- as.array(J) out[[paste0("W_", idresp, "_", i)]] <- as.array(weights[, i]) } } else { # ordinary grouping term g <- id_ranef$gcall[[1]]$groups gdata <- get(g, data) J <- match(gdata, levels) if (anyNA(J)) { # occurs for new levels only new_gdata <- gdata[!gdata %in% levels] new_levels <- unique(new_gdata) J[is.na(J)] <- match(new_gdata, new_levels) + length(levels) } out[[paste0("J_", idresp)]] <- as.array(J) } } out } # prepare global data for each group-level-ID data_gr_global <- function(ranef, data2) { out <- list() for (id in unique(ranef$id)) { tmp <- list() id_ranef <- subset2(ranef, id = id) nranef <- nrow(id_ranef) group <- id_ranef$group[1] levels <- attr(ranef, "levels")[[group]] tmp$N <- length(levels) tmp$M <- nranef tmp$NC <- as.integer(nranef * (nranef - 1) / 2) # prepare number of levels of an optional 'by' variable if (nzchar(id_ranef$by[1])) { stopifnot(!nzchar(id_ranef$type[1])) bylevels <- id_ranef$bylevels[[1]] Jby <- match(attr(levels, "by"), bylevels) tmp$Nby <- length(bylevels) tmp$Jby <- as.array(Jby) } # prepare within-group covariance matrices cov <- id_ranef$cov[1] if (nzchar(cov)) { # validation is only necessary here for compatibility with 'cov_ranef' cov_mat <- validate_recov_matrix(data2[[cov]]) found_levels <- rownames(cov_mat) found <- levels %in% found_levels if (any(!found)) { stop2("Levels of the within-group covariance matrix for '", group, "' do not match names of the grouping levels.") } cov_mat <- cov_mat[levels, levels, drop = FALSE] tmp$Lcov <- t(chol(cov_mat)) } names(tmp) <- paste0(names(tmp), "_", id) c(out) <- tmp } out } # prepare data for special effects for use in Stan data_sp <- function(bterms, data, data2, prior, index = NULL, basis = NULL) { out <- list() spef <- tidy_spef(bterms, data) if (!nrow(spef)) return(out) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) # prepare general data out[[paste0("Ksp", p)]] <- nrow(spef) Csp <- sp_model_matrix(bterms$sp, data) avoid_dpars(colnames(Csp), bterms = bterms) Csp <- Csp[, spef$Ic > 0, drop = FALSE] Csp <- lapply(seq_cols(Csp), function(i) as.array(Csp[, i])) if (length(Csp)) { Csp_names <- paste0("Csp", p, "_", seq_along(Csp)) out <- c(out, setNames(Csp, Csp_names)) } if (any(lengths(spef$Imo) > 0)) { # prepare data specific to monotonic effects out[[paste0("Imo", p)]] <- max(unlist(spef$Imo)) Xmo <- lapply(unlist(spef$calls_mo), get_mo_values, data = data) Xmo_names <- paste0("Xmo", p, "_", seq_along(Xmo)) c(out) <- setNames(Xmo, Xmo_names) if (!is.null(basis$Jmo)) { # take information from original data Jmo <- basis$Jmo } else { Jmo <- as.array(ulapply(Xmo, max)) } out[[paste0("Jmo", p)]] <- Jmo # prepare prior concentration of simplex parameters simo_coef <- get_simo_labels(spef, use_id = TRUE) ids <- unlist(spef$ids_mo) for (j in seq_along(simo_coef)) { # index of first ID appearance j_id <- match(ids[j], ids) if (is.na(ids[j]) || j_id == j) { # only evaluate priors without ID or first appearance of the ID # all other parameters will be copied over in the Stan code simo_prior <- subset2(prior, class = "simo", coef = simo_coef[j], ls = px ) con_simo <- eval_dirichlet(simo_prior$prior, Jmo[j], data2) out[[paste0("con_simo", p, "_", j)]] <- as.array(con_simo) } } } uni_mi <- attr(spef, "uni_mi") for (j in seq_rows(uni_mi)) { if (!is.na(uni_mi$idx[j])) { idxl <- get(uni_mi$idx[j], data) if (is.null(index[[uni_mi$var[j]]])) { # the 'idx' argument needs to be mapped against 'index' addition terms stop2("Response '", uni_mi$var[j], "' needs to have an 'index' addition ", "term to compare with 'idx'. See ?mi for examples.") } idxl <- match(idxl, index[[uni_mi$var[j]]]) if (anyNA(idxl)) { stop2("Could not match all indices in response '", uni_mi$var[j], "'.") } idxl_name <- paste0("idxl", p, "_", uni_mi$var[j], "_", uni_mi$idx2[j]) out[[idxl_name]] <- as.array(idxl) } else if (isTRUE(attr(index[[uni_mi$var[j]]], "subset"))) { # cross-formula referencing is required for subsetted variables stop2("mi() terms of subsetted variables require ", "the 'idx' argument to be specified.") } } out } # prepare data for category specific effects data_cs <- function(bterms, data) { out <- list() if (length(all_terms(bterms[["cs"]]))) { p <- usc(combine_prefix(bterms)) Xcs <- get_model_matrix(bterms$cs, data) avoid_dpars(colnames(Xcs), bterms = bterms) out <- c(out, list(Kcs = ncol(Xcs), Xcs = Xcs)) out <- setNames(out, paste0(names(out), p)) } out } # prepare global data for noise free variables data_Xme <- function(meef, data) { stopifnot(is.meef_frame(meef)) out <- list() groups <- unique(meef$grname) for (i in seq_along(groups)) { g <- groups[i] K <- which(meef$grname %in% g) Mme <- length(K) out[[paste0("Mme_", i)]] <- Mme out[[paste0("NCme_", i)]] <- Mme * (Mme - 1) / 2 if (nzchar(g)) { levels <- get_levels(meef)[[g]] gr <- get_me_group(meef$term[K[1]], data) Jme <- match(gr, levels) if (anyNA(Jme)) { # occurs for new levels only # replace NAs with unique values; fixes issue #706 gr[is.na(gr)] <- paste0("new_", seq_len(sum(is.na(gr))), "__") new_gr <- gr[!gr %in% levels] new_levels <- unique(new_gr) Jme[is.na(Jme)] <- length(levels) + match(new_gr, new_levels) } ilevels <- unique(Jme) out[[paste0("Nme_", i)]] <- length(ilevels) out[[paste0("Jme_", i)]] <- Jme } for (k in K) { Xn <- get_me_values(meef$term[k], data) noise <- get_me_noise(meef$term[k], data) if (nzchar(g)) { for (l in ilevels) { # validate values of the same level take <- Jme %in% l if (length(unique(Xn[take])) > 1L || length(unique(noise[take])) > 1L) { stop2( "Measured values and measurement error should be ", "unique for each group. Occured for level '", levels[l], "' of group '", g, "'." ) } } Xn <- get_one_value_per_group(Xn, Jme) noise <- get_one_value_per_group(noise, Jme) } out[[paste0("Xn_", k)]] <- as.array(Xn) out[[paste0("noise_", k)]] <- as.array(noise) } } out } # prepare data for Gaussian process terms # @param internal store some intermediate data for internal post-processing? # @param ... passed to '.data_gp' data_gp <- function(bterms, data, internal = FALSE, basis = NULL, ...) { out <- list() internal <- as_one_logical(internal) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) gpef <- tidy_gpef(bterms, data) for (i in seq_rows(gpef)) { pi <- paste0(p, "_", i) Xgp <- lapply(gpef$covars[[i]], eval2, data) D <- length(Xgp) out[[paste0("Dgp", pi)]] <- D invalid <- ulapply(Xgp, function(x) !is.numeric(x) || isTRUE(length(dim(x)) > 1L) ) if (any(invalid)) { stop2("Predictors of Gaussian processes should be numeric vectors.") } Xgp <- do_call(cbind, Xgp) cmc <- gpef$cmc[i] scale <- gpef$scale[i] gr <- gpef$gr[i] k <- gpef$k[i] c <- gpef$c[[i]] if (!isNA(k)) { out[[paste0("NBgp", pi)]] <- k ^ D Ks <- as.matrix(do_call(expand.grid, repl(seq_len(k), D))) } byvar <- gpef$byvars[[i]] byfac <- length(gpef$cons[[i]]) > 0L bynum <- !is.null(byvar) && !byfac if (byfac) { # for categorical 'by' variables prepare one GP per level # as.factor will keep unused levels needed for new data byval <- as.factor(get(byvar, data)) byform <- str2formula(c(ifelse(cmc, "0", "1"), "byval")) con_mat <- model.matrix(byform) cons <- colnames(con_mat) out[[paste0("Kgp", pi)]] <- length(cons) Ngp <- Nsubgp <- vector("list", length(cons)) for (j in seq_along(cons)) { # loop along contrasts of 'by' Cgp <- con_mat[, j] sfx <- paste0(pi, "_", j) tmp <- .data_gp( Xgp, k = k, gr = gr, sfx = sfx, Cgp = Cgp, c = c, scale = scale, internal = internal, basis = basis, ... ) Ngp[[j]] <- attributes(tmp)[["Ngp"]] Nsubgp[[j]] <- attributes(tmp)[["Nsubgp"]] c(out) <- tmp } out[[paste0("Ngp", pi)]] <- unlist(Ngp) if (gr) { out[[paste0("Nsubgp", pi)]] <- unlist(Nsubgp) } } else { out[[paste0("Kgp", pi)]] <- 1L c(out) <- .data_gp( Xgp, k = k, gr = gr, sfx = pi, c = c, scale = scale, internal = internal, basis = basis, ... ) if (bynum) { Cgp <- as.numeric(get(byvar, data)) out[[paste0("Cgp", pi)]] <- as.array(Cgp) } } } if (length(basis)) { # original covariate values are required in new GP prediction Xgp_old <- basis[grepl("^Xgp", names(basis))] names(Xgp_old) <- paste0(names(Xgp_old), "_old") out[names(Xgp_old)] <- Xgp_old } out } # helper function to preparae GP related data # @inheritParams data_gp # @param Xgp matrix of covariate values # @param k, gr, c see 'tidy_gpef' # @param sfx suffix to put at the end of data names # @param Cgp optional vector of values belonging to # a certain contrast of a factor 'by' variable .data_gp <- function(Xgp, k, gr, sfx, Cgp = NULL, c = NULL, scale = TRUE, internal = FALSE, basis = NULL) { out <- list() if (!is.null(Cgp)) { Cgp <- unname(Cgp) Igp <- which(Cgp != 0) Xgp <- Xgp[Igp, , drop = FALSE] out[[paste0("Igp", sfx)]] <- as.array(Igp) out[[paste0("Cgp", sfx)]] <- as.array(Cgp[Igp]) attr(out, "Ngp") <- length(Igp) } if (gr) { groups <- factor(match_rows(Xgp, Xgp)) ilevels <- levels(groups) Jgp <- match(groups, ilevels) Nsubgp <- length(ilevels) if (!is.null(Cgp)) { attr(out, "Nsubgp") <- Nsubgp } else { out[[paste0("Nsubgp", sfx)]] <- Nsubgp } out[[paste0("Jgp", sfx)]] <- as.array(Jgp) not_dupl_Jgp <- !duplicated(Jgp) Xgp <- Xgp[not_dupl_Jgp, , drop = FALSE] } if (scale) { # scale predictor for easier specification of priors if (length(basis)) { # scale Xgp based on the original data dmax <- basis[[paste0("dmax", sfx)]] } else { dmax <- sqrt(max(diff_quad(Xgp))) } if (!isTRUE(dmax > 0)) { stop2("Could not scale GP covariates. Please set 'scale' to FALSE in 'gp'.") } if (internal) { # required for scaling of GPs with new data out[[paste0("dmax", sfx)]] <- dmax } Xgp <- Xgp / dmax } if (length(basis)) { # center Xgp based on the original data cmeans <- basis[[paste0("cmeans", sfx)]] } else { cmeans <- colMeans(Xgp) } if (internal) { # required for centering of approximate GPs with new data out[[paste0("cmeans", sfx)]] <- cmeans # required to compute inverse-gamma priors for length-scales out[[paste0("Xgp_prior", sfx)]] <- Xgp } if (!isNA(k)) { # basis function approach requires centered variables Xgp <- sweep(Xgp, 2, cmeans) D <- NCOL(Xgp) L <- choose_L(Xgp, c = c) Ks <- as.matrix(do_call(expand.grid, repl(seq_len(k), D))) XgpL <- matrix(nrow = NROW(Xgp), ncol = NROW(Ks)) slambda <- matrix(nrow = NROW(Ks), ncol = D) for (m in seq_rows(Ks)) { XgpL[, m] <- eigen_fun_cov_exp_quad(Xgp, m = Ks[m, ], L = L) slambda[m, ] <- sqrt(eigen_val_cov_exp_quad(m = Ks[m, ], L = L)) } out[[paste0("Xgp", sfx)]] <- XgpL out[[paste0("slambda", sfx)]] <- slambda } else { out[[paste0("Xgp", sfx)]] <- as.array(Xgp) } out } # data for autocorrelation variables # @param locations optional original locations for CAR models data_ac <- function(bterms, data, data2, basis = NULL, ...) { out <- list() N <- nrow(data) acef <- tidy_acef(bterms) if (has_ac_subset(bterms, dim = "time")) { gr <- subset2(acef, dim = "time")$gr if (gr != "NA") { tgroup <- as.numeric(factor(data[[gr]])) } else { tgroup <- rep(1, N) } } if (has_ac_class(acef, "arma")) { # ARMA correlations acef_arma <- subset2(acef, class = "arma") out$Kar <- acef_arma$p out$Kma <- acef_arma$q if (!use_ac_cov_time(acef_arma)) { # data for the 'predictor' version of ARMA max_lag <- max(out$Kar, out$Kma) out$J_lag <- as.array(rep(0, N)) for (n in seq_len(N)[-N]) { ind <- n:max(1, n + 1 - max_lag) # indexes errors to be used in the n+1th prediction out$J_lag[n] <- sum(tgroup[ind] %in% tgroup[n + 1]) } } } if (use_ac_cov_time(acef)) { # data for the 'covariance' versions of time-series structures out$N_tg <- length(unique(tgroup)) out$begin_tg <- as.array(ulapply(unique(tgroup), match, tgroup)) out$nobs_tg <- as.array(with(out, c(if (N_tg > 1L) begin_tg[2:N_tg], N + 1) - begin_tg )) out$end_tg <- with(out, begin_tg + nobs_tg - 1) } if (has_ac_class(acef, "sar")) { acef_sar <- subset2(acef, class = "sar") M <- data2[[acef_sar$M]] rmd_rows <- attr(data, "na.action") if (!is.null(rmd_rows)) { class(rmd_rows) <- NULL M <- M[-rmd_rows, -rmd_rows, drop = FALSE] } if (!is_equal(dim(M), rep(N, 2))) { stop2("Dimensions of 'M' for SAR terms must be equal to ", "the number of observations.") } out$Msar <- as.matrix(M) out$eigenMsar <- eigen(M)$values # simplifies code of choose_N out$N_tg <- 1 } if (has_ac_class(acef, "car")) { acef_car <- subset2(acef, class = "car") locations <- NULL if (length(basis)) { locations <- basis$locations } M <- data2[[acef_car$M]] if (acef_car$gr != "NA") { loc_data <- get(acef_car$gr, data) new_locations <- levels(factor(loc_data)) if (is.null(locations)) { locations <- new_locations } else { invalid_locations <- setdiff(new_locations, locations) if (length(invalid_locations)) { stop2("Cannot handle new locations in CAR models.") } } Nloc <- length(locations) Jloc <- as.array(match(loc_data, locations)) if (is.null(rownames(M))) { stop2("Row names are required for 'M' in CAR terms.") } found <- locations %in% rownames(M) if (any(!found)) { stop2("Row names of 'M' for CAR terms do not match ", "the names of the grouping levels.") } M <- M[locations, locations, drop = FALSE] } else { warning2( "Using CAR terms without a grouping factor is deprecated. ", "Please use argument 'gr' even if each observation ", "represents its own location." ) Nloc <- N Jloc <- as.array(seq_len(Nloc)) if (!is_equal(dim(M), rep(Nloc, 2))) { if (length(basis)) { stop2("Cannot handle new data in CAR terms ", "without a grouping factor.") } else { stop2("Dimensions of 'M' for CAR terms must be equal ", "to the number of observations.") } } } edges_rows <- (Matrix::tril(M)@i + 1) edges_cols <- sort(Matrix::triu(M)@i + 1) ## sort to make consistent with rows edges <- cbind("rows" = edges_rows, "cols" = edges_cols) c(out) <- nlist( Nloc, Jloc, Nedges = length(edges_rows), edges1 = as.array(edges_rows), edges2 = as.array(edges_cols) ) if (acef_car$type %in% c("escar", "esicar")) { Nneigh <- Matrix::colSums(M) if (any(Nneigh == 0) && !length(basis)) { stop2( "For exact sparse CAR, all locations should have at ", "least one neighbor within the provided data set. ", "Consider using type = 'icar' instead." ) } inv_sqrt_D <- diag(1 / sqrt(Nneigh)) eigenMcar <- t(inv_sqrt_D) %*% M %*% inv_sqrt_D eigenMcar <- eigen(eigenMcar, TRUE, only.values = TRUE)$values c(out) <- nlist(Nneigh, eigenMcar) } else if (acef_car$type %in% "bym2") { c(out) <- list(car_scale = .car_scale(edges, Nloc)) } } if (has_ac_class(acef, "fcor")) { acef_fcor <- subset2(acef, class = "fcor") M <- data2[[acef_fcor$M]] rmd_rows <- attr(data, "na.action") if (!is.null(rmd_rows)) { class(rmd_rows) <- NULL M <- M[-rmd_rows, -rmd_rows, drop = FALSE] } if (nrow(M) != N) { stop2("Dimensions of 'M' for FCOR terms must be equal ", "to the number of observations.") } out$Mfcor <- M # simplifies code of choose_N out$N_tg <- 1 } if (length(out)) { resp <- usc(combine_prefix(bterms)) out <- setNames(out, paste0(names(out), resp)) } out } # prepare data of offsets for use in Stan data_offset <- function(bterms, data) { out <- list() px <- check_prefix(bterms) if (is.formula(bterms$offset)) { p <- usc(combine_prefix(px)) mf <- rm_attr(data, "terms") mf <- model.frame(bterms$offset, mf, na.action = na.pass) offset <- model.offset(mf) if (length(offset) == 1L) { offset <- rep(offset, nrow(data)) } # use 'offsets' as 'offset' will be reserved in stanc3 out[[paste0("offsets", p)]] <- as.array(offset) } out } # data for covariates in non-linear models # @param x a btnl object # @return a named list of data passed to Stan data_cnl <- function(bterms, data) { stopifnot(is.btnl(bterms)) out <- list() covars <- all.vars(bterms$covars) if (!length(covars)) { return(out) } p <- usc(combine_prefix(bterms)) for (i in seq_along(covars)) { cvalues <- get(covars[i], data) if (is_like_factor(cvalues)) { # need to apply factor contrasts cform <- str2formula(covars[i]) cvalues <- get_model_matrix(cform, data, cols2remove = "(Intercept)") if (NCOL(cvalues) > 1) { stop2("Factors with more than two levels are not allowed as covariates.") } cvalues <- cvalues[, 1] } out[[paste0("C", p, "_", i)]] <- as.array(cvalues) } out } # compute the spatial scaling factor of CAR models # @param edges matrix with two columns defining the adjacency of the locations # @param Nloc number of locations # @return a scalar scaling factor .car_scale <- function(edges, Nloc) { # amended from Imad Ali's code of CAR models in rstanarm stopifnot(is.matrix(edges), NCOL(edges) == 2) # Build the adjacency matrix adj_matrix <- Matrix::sparseMatrix( i = edges[, 1], j = edges[, 2], x = 1, symmetric = TRUE ) # The ICAR precision matrix (which is singular) Q <- Matrix::Diagonal(Nloc, Matrix::rowSums(adj_matrix)) - adj_matrix # Add a small jitter to the diagonal for numerical stability Q_pert <- Q + Matrix::Diagonal(Nloc) * max(Matrix::diag(Q)) * sqrt(.Machine$double.eps) # Compute the diagonal elements of the covariance matrix subject to the # constraint that the entries of the ICAR sum to zero. .Q_inv <- function(Q) { Sigma <- Matrix::solve(Q) A <- matrix(1, 1, NROW(Sigma)) W <- Sigma %*% t(A) Sigma <- Sigma - W %*% solve(A %*% W) %*% Matrix::t(W) return(Sigma) } Q_inv <- .Q_inv(Q_pert) # Compute the geometric mean of the variances (diagonal of Q_inv) exp(mean(log(Matrix::diag(Q_inv)))) } # data for special priors such as horseshoe and lasso data_prior <- function(bterms, data, prior, sdata = NULL) { out <- list() px <- check_prefix(bterms) p <- usc(combine_prefix(px)) special <- get_special_prior(prior, px) if (!is.null(special$horseshoe)) { # data for the horseshoe prior hs_names <- c("df", "df_global", "df_slab", "scale_global", "scale_slab") hs_data <- special$horseshoe[hs_names] if (!is.null(special$horseshoe$par_ratio)) { hs_data$scale_global <- special$horseshoe$par_ratio / sqrt(nrow(data)) } names(hs_data) <- paste0("hs_", hs_names, p) out <- c(out, hs_data) } if (!is.null(special$R2D2)) { # data for the R2D2 prior R2D2_names <- c("mean_R2", "prec_R2", "cons_D2") R2D2_data <- special$R2D2[R2D2_names] # number of coefficients minus the intercept K <- sdata[[paste0("K", p)]] - ifelse(stan_center_X(bterms), 1, 0) if (length(R2D2_data$cons_D2) == 1L) { R2D2_data$cons_D2 <- rep(R2D2_data$cons_D2, K) } if (length(R2D2_data$cons_D2) != K) { stop2("Argument 'cons_D2' of the R2D2 prior must be of length 1 or ", K) } R2D2_data$cons_D2 <- as.array(R2D2_data$cons_D2) names(R2D2_data) <- paste0("R2D2_", R2D2_names, p) out <- c(out, R2D2_data) } if (!is.null(special$lasso)) { lasso_names <- c("df", "scale") lasso_data <- special$lasso[lasso_names] names(lasso_data) <- paste0("lasso_", lasso_names, p) out <- c(out, lasso_data) } out } # Construct design matrices for brms models # @param formula a formula object # @param data A data frame created with model.frame. # If another sort of object, model.frame is called first. # @param cols2remove names of the columns to remove from # the model matrix; mainly used for intercepts # @param rename rename column names via rename()? # @param ... passed to stats::model.matrix # @return # The design matrix for the given formula and data. # For details see ?stats::model.matrix get_model_matrix <- function(formula, data = environment(formula), cols2remove = NULL, rename = TRUE, ...) { stopifnot(is.atomic(cols2remove)) terms <- validate_terms(formula) if (is.null(terms)) { return(NULL) } if (no_int(terms)) { cols2remove <- union(cols2remove, "(Intercept)") } X <- stats::model.matrix(terms, data, ...) cols2remove <- which(colnames(X) %in% cols2remove) if (length(cols2remove)) { X <- X[, -cols2remove, drop = FALSE] } if (rename) { colnames(X) <- rename(colnames(X), check_dup = TRUE) } X } # convenient wrapper around mgcv::PredictMat PredictMat <- function(object, data, ...) { data <- rm_attr(data, "terms") out <- mgcv::PredictMat(object, data = data, ...) if (length(dim(out)) < 2L) { # fixes issue #494 out <- matrix(out, nrow = 1) } out } # convenient wrapper around mgcv::smoothCon smoothCon <- function(object, data, ...) { data <- rm_attr(data, "terms") vars <- setdiff(c(object$term, object$by), "NA") for (v in vars) { if (is_like_factor(data[[v]])) { # allow factor-like variables #562 data[[v]] <- as.factor(data[[v]]) } else if (inherits(data[[v]], "difftime")) { # mgcv cannot handle 'difftime' variables data[[v]] <- as.numeric(data[[v]]) } } mgcv::smoothCon(object, data = data, ...) } # Aid prediction from smooths represented as 'type = 2' # originally provided by Simon Wood # @param sm output of mgcv::smoothCon # @param data new data supplied for prediction # @return A list of the same structure as returned by mgcv::smoothCon s2rPred <- function(sm, data) { re <- mgcv::smooth2random(sm, names(data), type = 2) # prediction matrix for new data X <- PredictMat(sm, data) # transform to RE parameterization if (!is.null(re$trans.U)) { X <- X %*% re$trans.U } X <- t(t(X) * re$trans.D) # re-order columns according to random effect re-ordering X[, re$rind] <- X[, re$pen.ind != 0] # re-order penalization index in same way pen.ind <- re$pen.ind pen.ind[re$rind] <- pen.ind[pen.ind > 0] # start returning the object Xf <- X[, which(re$pen.ind == 0), drop = FALSE] out <- list(rand = list(), Xf = Xf) for (i in seq_along(re$rand)) { # loop over random effect matrices out$rand[[i]] <- X[, which(pen.ind == i), drop = FALSE] attr(out$rand[[i]], "s.label") <- attr(re$rand[[i]], "s.label") } names(out$rand) <- names(re$rand) out } brms/R/posterior_samples.R0000644000175000017500000002064614111751666015465 0ustar nileshnilesh#' (Deprecated) Extract Posterior Samples #' #' Extract posterior samples of specified parameters. The #' \code{posterior_samples} method is deprecated. We recommend using the more #' modern and consistent \code{\link[brms:draws-brms]{as_draws_*}} extractor #' functions of the \pkg{posterior} package instead. #' #' @param x An \code{R} object typically of class \code{brmsfit} #' @param pars Names of parameters for which posterior samples #' should be returned, as given by a character vector or regular expressions. #' By default, all posterior samples of all parameters are extracted. #' @param fixed Indicates whether parameter names #' should be matched exactly (\code{TRUE}) or treated as #' regular expressions (\code{FALSE}). Default is \code{FALSE}. #' @param add_chain A flag indicating if the returned \code{data.frame} #' should contain two additional columns. The \code{chain} column #' indicates the chain in which each sample was generated, the \code{iter} #' column indicates the iteration number within each chain. #' @param subset A numeric vector indicating the rows #' (i.e., posterior samples) to be returned. #' If \code{NULL} (the default), all posterior samples are returned. #' @param as.matrix Should the output be a \code{matrix} #' instead of a \code{data.frame}? Defaults to \code{FALSE}. #' @param as.array Should the output be an \code{array} #' instead of a \code{data.frame}? Defaults to \code{FALSE}. #' @param ... Arguments passed to individual methods (if applicable). #' #' @return A data.frame (matrix or array) containing the posterior samples. #' #' @seealso \code{\link[brms:draws-brms]{as_draws}}, #' \code{\link[brms:as.data.frame.brmsfit]{as.data.frame}} #' #' @examples #' \dontrun{ #' fit <- brm(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' #' # extract posterior samples of population-level effects #' samples1 <- posterior_samples(fit, pars = "^b") #' head(samples1) #' #' # extract posterior samples of group-level standard deviations #' samples2 <- posterior_samples(fit, pars = "^sd_") #' head(samples2) #' } #' #' @export posterior_samples.brmsfit <- function(x, pars = NA, fixed = FALSE, add_chain = FALSE, subset = NULL, as.matrix = FALSE, as.array = FALSE, ...) { if (as.matrix && as.array) { stop2("Cannot use 'as.matrix' and 'as.array' at the same time.") } if (add_chain && as.array) { stop2("Cannot use 'add_chain' and 'as.array' at the same time.") } contains_draws(x) pars <- extract_pars(pars, variables(x), fixed = fixed, ...) # get basic information on the samples iter <- x$fit@sim$iter warmup <- x$fit@sim$warmup thin <- x$fit@sim$thin chains <- x$fit@sim$chains final_iter <- ceiling((iter - warmup) / thin) samples_taken <- seq(warmup + 1, iter, thin) samples <- NULL if (length(pars)) { if (as.matrix) { samples <- as.matrix(x$fit, pars = pars) } else if (as.array) { samples <- as.array(x$fit, pars = pars) } else { samples <- as.data.frame(x$fit, pars = pars) } if (add_chain) { # name the column 'chain' not 'chains' (#32) samples <- cbind(samples, chain = factor(rep(1:chains, each = final_iter)), iter = rep(samples_taken, chains) ) } if (!is.null(subset)) { if (as.array) { samples <- samples[subset, , , drop = FALSE] } else { samples <- samples[subset, , drop = FALSE] } } } samples } #' @rdname posterior_samples.brmsfit #' @export posterior_samples <- function(x, pars = NA, ...) { warning2("Method 'posterior_samples' is deprecated. ", "Please see ?as_draws for recommended alternatives.") UseMethod("posterior_samples") } #' @export posterior_samples.default <- function(x, pars = NA, fixed = FALSE, ...) { x <- as.data.frame(x) if (!anyNA(pars)) { pars <- extract_pars(pars, all_pars = names(x), fixed = fixed, ...) x <- x[, pars, drop = FALSE] } if (!ncol(x)) { x <- NULL } x } #' Extract Parameter Names #' #' Extract all parameter names of a given model. #' #' @aliases parnames.brmsfit #' #' @param x An \R object #' @param ... Further arguments passed to or from other methods. #' #' @return A character vector containing the parameter names of the model. #' #' @export parnames <- function(x, ...) { warning2("'parnames' is deprecated. Please use 'variables' instead.") UseMethod("parnames") } #' @export parnames.default <- function(x, ...) { names(x) } #' @export parnames.brmsfit <- function(x, ...) { out <- dimnames(x$fit) if (is.list(out)) { out <- out$parameters } out } # extract all valid parameter names that match pars # @param pars A character vector or regular expression # @param all_pars all parameter names of the fitted model # @param fixed should parameter names be matched exactly? # @param exact_match deprecated alias of fixed # @param na_value: what should be returned if pars is NA? # @param ... Further arguments to be passed to grepl # @return A character vector of parameter names extract_pars <- function(pars, all_pars, fixed = FALSE, exact_match = FALSE, na_value = all_pars, ...) { if (!(anyNA(pars) || is.character(pars))) { stop2("Argument 'pars' must be NA or a character vector.") } fixed <- check_deprecated_fixed(fixed, exact_match) if (!anyNA(pars)) { fixed <- as_one_logical(fixed) if (fixed) { out <- intersect(pars, all_pars) } else { out <- vector("list", length(pars)) for (i in seq_along(pars)) { out[[i]] <- all_pars[grepl(pars[i], all_pars, ...)] } out <- unique(unlist(out)) } } else { out <- na_value } out } # check deprecated alias of argument 'fixed' check_deprecated_fixed <- function(fixed, exact_match) { if (!isFALSE(exact_match)) { # deprecated as of brms 2.10.6; remove in brms 3.0 warning2("Argument 'exact_match' is deprecated. ", "Please use 'fixed' instead.") fixed <- exact_match } fixed } #' Extract posterior samples for use with the \pkg{coda} package #' #' @aliases as.mcmc #' #' @inheritParams posterior_samples.brmsfit #' @param ... currently unused #' @param combine_chains Indicates whether chains should be combined. #' @param inc_warmup Indicates if the warmup samples should be included. #' Default is \code{FALSE}. Warmup samples are used to tune the #' parameters of the sampling algorithm and should not be analyzed. #' #' @return If \code{combine_chains = TRUE} an \code{mcmc} object is returned. #' If \code{combine_chains = FALSE} an \code{mcmc.list} object is returned. #' #' @method as.mcmc brmsfit #' @export #' @export as.mcmc #' @importFrom coda as.mcmc as.mcmc.brmsfit <- function(x, pars = NA, fixed = FALSE, combine_chains = FALSE, inc_warmup = FALSE, ...) { warning2("as.mcmc.brmsfit is deprecated and will eventually be removed.") contains_draws(x) pars <- extract_pars(pars, all_pars = variables(x), fixed = fixed, ...) combine_chains <- as_one_logical(combine_chains) inc_warmup <- as_one_logical(inc_warmup) if (combine_chains) { if (inc_warmup) { stop2("Cannot include warmup samples when 'combine_chains' is TRUE.") } out <- as.matrix(x$fit, pars) ndraws <- nrow(out) end <- x$fit@sim$iter * x$fit@sim$chains thin <- x$fit@sim$thin start <- end - (ndraws - 1) * thin mcpar <- c(start, end, thin) attr(out, "mcpar") <- mcpar class(out) <- "mcmc" } else { thin <- x$fit@sim$thin if (inc_warmup && thin >= 2) { stop2("Cannot include warmup samples when 'thin' >= 2.") } ps <- rstan::extract(x$fit, pars, permuted = FALSE, inc_warmup = inc_warmup) ndraws <- dim(ps)[1] end <- x$fit@sim$iter start <- end - (ndraws - 1) * thin mcpar <- c(start, end, thin) out <- vector("list", length = dim(ps)[2]) for (i in seq_along(out)) { out[[i]] <- ps[, i, ] attr(out[[i]], "mcpar") <- mcpar class(out[[i]]) <- "mcmc" } class(out) <- "mcmc.list" } out } brms/R/pp_check.R0000644000175000017500000001503014111751666013456 0ustar nileshnilesh#' Posterior Predictive Checks for \code{brmsfit} Objects #' #' Perform posterior predictive checks with the help #' of the \pkg{bayesplot} package. #' #' @aliases pp_check #' #' @param object An object of class \code{brmsfit}. #' @param type Type of the ppc plot as given by a character string. #' See \code{\link[bayesplot:PPC-overview]{PPC}} for an overview #' of currently supported types. You may also use an invalid #' type (e.g. \code{type = "xyz"}) to get a list of supported #' types in the resulting error message. #' @param ndraws Positive integer indicating how many #' posterior draws should be used. #' If \code{NULL} all draws are used. If not specified, #' the number of posterior draws is chosen automatically. #' Ignored if \code{draw_ids} is not \code{NULL}. #' @param group Optional name of a factor variable in the model #' by which to stratify the ppc plot. This argument is required for #' ppc \code{*_grouped} types and ignored otherwise. #' @param x Optional name of a variable in the model. #' Only used for ppc types having an \code{x} argument #' and ignored otherwise. #' @param ... Further arguments passed to \code{\link{predict.brmsfit}} #' as well as to the PPC function specified in \code{type}. #' @inheritParams prepare_predictions.brmsfit #' #' @return A ggplot object that can be further #' customized using the \pkg{ggplot2} package. #' #' @details For a detailed explanation of each of the ppc functions, #' see the \code{\link[bayesplot:PPC-overview]{PPC}} #' documentation of the \pkg{\link[bayesplot:bayesplot-package]{bayesplot}} #' package. #' #' @examples #' \dontrun{ #' fit <- brm(count ~ zAge + zBase * Trt #' + (1|patient) + (1|obs), #' data = epilepsy, family = poisson()) #' #' pp_check(fit) # shows dens_overlay plot by default #' pp_check(fit, type = "error_hist", ndraws = 11) #' pp_check(fit, type = "scatter_avg", ndraws = 100) #' pp_check(fit, type = "stat_2d") #' pp_check(fit, type = "rootogram") #' pp_check(fit, type = "loo_pit") #' #' ## get an overview of all valid types #' pp_check(fit, type = "xyz") #' } #' #' @importFrom bayesplot pp_check #' @export pp_check #' @export pp_check.brmsfit <- function(object, type, ndraws = NULL, nsamples = NULL, group = NULL, x = NULL, newdata = NULL, resp = NULL, draw_ids = NULL, subset = NULL, ...) { dots <- list(...) if (missing(type)) { type <- "dens_overlay" } type <- as_one_character(type) if (!is.null(group)) { group <- as_one_character(group) } if (!is.null(x)) { x <- as_one_character(x) } ndraws_given <- any(c("ndraws", "nsamples") %in% names(match.call())) ndraws <- use_alias(ndraws, nsamples) draw_ids <- use_alias(draw_ids, subset) resp <- validate_resp(resp, object, multiple = FALSE) valid_types <- as.character(bayesplot::available_ppc("")) valid_types <- sub("^ppc_", "", valid_types) if (!type %in% valid_types) { stop2("Type '", type, "' is not a valid ppc type. ", "Valid types are:\n", collapse_comma(valid_types)) } ppc_fun <- get(paste0("ppc_", type), asNamespace("bayesplot")) object <- restructure(object) stopifnot_resp(object, resp) family <- family(object, resp = resp) if (has_multicol(family)) { stop2("'pp_check' is not implemented for this family.") } valid_vars <- names(model.frame(object)) if ("group" %in% names(formals(ppc_fun))) { if (is.null(group)) { stop2("Argument 'group' is required for ppc type '", type, "'.") } if (!group %in% valid_vars) { stop2("Variable '", group, "' could not be found in the data.") } } if ("x" %in% names(formals(ppc_fun))) { if (!is.null(x) && !x %in% valid_vars) { stop2("Variable '", x, "' could not be found in the data.") } } if (type == "error_binned") { if (is_polytomous(family)) { stop2("Type '", type, "' is not available for polytomous models.") } method <- "posterior_epred" } else { method <- "posterior_predict" } if (!ndraws_given) { aps_types <- c( "error_scatter_avg", "error_scatter_avg_vs_x", "intervals", "intervals_grouped", "loo_pit", "loo_intervals", "loo_ribbon", "ribbon", "ribbon_grouped", "rootogram", "scatter_avg", "scatter_avg_grouped", "stat", "stat_2d", "stat_freqpoly_grouped", "stat_grouped", "violin_grouped" ) if (!is.null(draw_ids)) { ndraws <- NULL } else if (type %in% aps_types) { ndraws <- NULL message("Using all posterior draws for ppc type '", type, "' by default.") } else { ndraws <- 10 message("Using 10 posterior draws for ppc type '", type, "' by default.") } } y <- get_y(object, resp = resp, newdata = newdata, ...) draw_ids <- validate_draw_ids(object, draw_ids, ndraws) pred_args <- list( object, newdata = newdata, resp = resp, draw_ids = draw_ids, ... ) yrep <- do_call(method, pred_args) if (anyNA(y)) { warning2("NA responses are not shown in 'pp_check'.") take <- !is.na(y) y <- y[take] yrep <- yrep[, take, drop = FALSE] } data <- current_data( object, newdata = newdata, resp = resp, re_formula = NA, check_response = TRUE, ... ) # censored responses are misleading when displayed in pp_check bterms <- brmsterms(object$formula) cens <- get_cens(bterms, data, resp = resp) if (!is.null(cens)) { warning2("Censored responses are not shown in 'pp_check'.") take <- !cens if (!any(take)) { stop2("No non-censored responses found.") } y <- y[take] yrep <- yrep[, take, drop = FALSE] } # most ... arguments are ment for the prediction function for_pred <- names(dots) %in% names(formals(prepare_predictions.brmsfit)) ppc_args <- c(list(y, yrep), dots[!for_pred]) if ("psis_object" %in% setdiff(names(formals(ppc_fun)), names(ppc_args))) { ppc_args$psis_object <- do_call( compute_loo, c(pred_args, criterion = "psis") ) } if ("lw" %in% setdiff(names(formals(ppc_fun)), names(ppc_args))) { ppc_args$lw <- weights( do_call(compute_loo, c(pred_args, criterion = "psis")) ) } if (!is.null(group)) { ppc_args$group <- data[[group]] } if (!is.null(x)) { ppc_args$x <- data[[x]] if (!is_like_factor(ppc_args$x)) { ppc_args$x <- as.numeric(ppc_args$x) } } do_call(ppc_fun, ppc_args) } brms/R/brmsterms.R0000644000175000017500000010625614111751665013732 0ustar nileshnilesh#' Parse Formulas of \pkg{brms} Models #' #' Parse formulas objects for use in \pkg{brms}. #' #' @aliases parse_bf #' #' @inheritParams brm #' @param check_response Logical; Indicates whether the left-hand side #' of \code{formula} (i.e. response variables and addition arguments) #' should be parsed. If \code{FALSE}, \code{formula} may also be one-sided. #' @param resp_rhs_all Logical; Indicates whether to also include response #' variables on the right-hand side of formula \code{.$allvars}, #' where \code{.} represents the output of \code{brmsterms}. #' @param ... Further arguments passed to or from other methods. #' #' @return An object of class \code{brmsterms} or \code{mvbrmsterms} #' (for multivariate models), which is a \code{list} containing all #' required information initially stored in \code{formula} #' in an easier to use format, basically a list of formulas #' (not an abstract syntax tree). #' #' @details This is the main formula parsing function of \pkg{brms}. #' It should usually not be called directly, but is exported to allow #' package developers making use of the formula syntax implemented #' in \pkg{brms}. As long as no other packages depend on this functions, #' it may be changed without deprecation warnings, when new features make #' this necessary. #' #' @seealso #' \code{\link{brm}}, #' \code{\link{brmsformula}}, #' \code{\link{mvbrmsformula}} #' #' @export brmsterms <- function(formula, ...) { UseMethod("brmsterms") } # the name 'parse_bf' is deprecated as of brms 2.12.4 # remove it eventually in brms 3.0 #' @export parse_bf <- function(x, ...) { warning2("Method 'parse_bf' is deprecated. Please use 'brmsterms' instead.") UseMethod("brmsterms") } #' @rdname brmsterms #' @export brmsterms.default <- function(formula, ...) { brmsterms(validate_formula(formula), ...) } #' @rdname brmsterms #' @export brmsterms.brmsformula <- function(formula, check_response = TRUE, resp_rhs_all = TRUE, ...) { x <- validate_formula(formula) mv <- isTRUE(x$mv) rescor <- mv && isTRUE(x$rescor) mecor <- isTRUE(x$mecor) formula <- x$formula family <- x$family y <- nlist(formula, family, mv, rescor, mecor) y$cov_ranef <- x$cov_ranef class(y) <- "brmsterms" if (check_response) { # extract response variables y$respform <- validate_resp_formula(formula, empty_ok = FALSE) if (mv) { y$resp <- terms_resp(y$respform) } else { y$resp <- "" } } # extract addition arguments adforms <- terms_ad(formula, family, check_response) advars <- str2formula(ulapply(adforms, all_vars)) y$adforms[names(adforms)] <- adforms # centering would lead to incorrect results for grouped threshold vectors # as each threshold vector only affects a subset of observations if (!is.null(get_ad_expr(y, "thres", "gr"))) { attr(formula, "center") <- FALSE dp_classes <- dpar_class(names(x$pforms)) mu_names <- names(x$pforms)[dp_classes == "mu"] for (dp in mu_names) { attr(x$pforms[[dp]], "center") <- FALSE } } # combine the main formula with formulas for the 'mu' parameters if (is.mixfamily(family)) { mu_dpars <- paste0("mu", seq_along(family$mix)) for (dp in mu_dpars) { x$pforms[[dp]] <- combine_formulas(formula, x$pforms[[dp]], dp) } x$pforms <- move2start(x$pforms, mu_dpars) } else if (conv_cats_dpars(x$family)) { mu_dpars <- str_subset(x$family$dpars, "^mu") for (dp in mu_dpars) { x$pforms[[dp]] <- combine_formulas(formula, x$pforms[[dp]], dp) } x$pforms <- move2start(x$pforms, mu_dpars) } else { x$pforms[["mu"]] <- combine_formulas(formula, x$pforms[["mu"]], "mu") x$pforms <- move2start(x$pforms, "mu") } # predicted distributional parameters resp <- ifelse(mv && !is.null(y$resp), y$resp, "") dpars <- intersect(names(x$pforms), valid_dpars(family)) dpar_forms <- x$pforms[dpars] nlpars <- setdiff(names(x$pforms), dpars) y$dpars <- named_list(dpars) for (dp in dpars) { if (get_nl(dpar_forms[[dp]])) { y$dpars[[dp]] <- terms_nlf(dpar_forms[[dp]], nlpars, resp) } else { y$dpars[[dp]] <- terms_lf(dpar_forms[[dp]]) } y$dpars[[dp]]$family <- dpar_family(family, dp) y$dpars[[dp]]$dpar <- dp y$dpars[[dp]]$resp <- resp if (dpar_class(dp) == "mu") { y$dpars[[dp]]$respform <- y$respform y$dpars[[dp]]$adforms <- y$adforms } check_cs(y$dpars[[dp]]) } y$nlpars <- named_list(nlpars) if (length(nlpars)) { nlpar_forms <- x$pforms[nlpars] for (nlp in nlpars) { if (is.null(attr(nlpar_forms[[nlp]], "center"))) { # design matrices of non-linear parameters will not be # centered by default to make prior specification easier attr(nlpar_forms[[nlp]], "center") <- FALSE } if (get_nl(nlpar_forms[[nlp]])) { y$nlpars[[nlp]] <- terms_nlf(nlpar_forms[[nlp]], nlpars, resp) } else { y$nlpars[[nlp]] <- terms_lf(nlpar_forms[[nlp]]) } y$nlpars[[nlp]]$nlpar <- nlp y$nlpars[[nlp]]$resp <- resp check_cs(y$nlpars[[nlp]]) } used_nlpars <- ulapply(c(y$dpars, y$nlpars), "[[", "used_nlpars") unused_nlpars <- setdiff(nlpars, used_nlpars) if (length(unused_nlpars)) { stop2( "The parameter '", unused_nlpars[1], "' is not a ", "valid distributional or non-linear parameter. ", "Did you forget to set 'nl = TRUE'?" ) } # sort non-linear parameters after dependency used_nlpars <- lapply(y$nlpars, "[[", "used_nlpars") sorted_nlpars <- sort_dependencies(used_nlpars) y$nlpars <- y$nlpars[sorted_nlpars] } # fixed distributional parameters valid_dpars <- valid_dpars(y) inv_fixed_dpars <- setdiff(names(x$pfix), valid_dpars) if (length(inv_fixed_dpars)) { stop2("Invalid fixed parameters: ", collapse_comma(inv_fixed_dpars)) } if ("sigma" %in% valid_dpars && no_sigma(y)) { # some models require setting sigma to 0 if ("sigma" %in% c(names(x$pforms), names(x$pfix))) { stop2("Cannot predict or fix 'sigma' in this model.") } x$pfix$sigma <- 0 } if ("nu" %in% valid_dpars && no_nu(y)) { if ("nu" %in% c(names(x$pforms), names(x$pfix))) { stop2("Cannot predict or fix 'nu' in this model.") } x$pfix$nu <- 1 } disc_pars <- valid_dpars[dpar_class(valid_dpars) %in% "disc"] for (dp in disc_pars) { # 'disc' is set to 1 and not estimated by default if (!dp %in% c(names(x$pforms), names(x$pfix))) { x$pfix[[dp]] <- 1 } } for (dp in names(x$pfix)) { y$fdpars[[dp]] <- list(value = x$pfix[[dp]], dpar = dp) } check_fdpars(y$fdpars) # make a formula containing all required variables unused_vars <- all_vars(attr(x$formula, "unused")) lhsvars <- if (resp_rhs_all) all_vars(y$respform) y$allvars <- allvars_formula( lhsvars, advars, lapply(y$dpars, get_allvars), lapply(y$nlpars, get_allvars), y$time$allvars, unused_vars ) if (check_response) { # add y$respform to the left-hand side of y$allvars # avoid using update.formula as it is inefficient for longer formulas formula_allvars <- y$respform formula_allvars[[3]] <- y$allvars[[2]] y$allvars <- formula_allvars } environment(y$allvars) <- environment(formula) y } #' @rdname brmsterms #' @export brmsterms.mvbrmsformula <- function(formula, ...) { x <- validate_formula(formula) x$rescor <- isTRUE(x$rescor) x$mecor <- isTRUE(x$mecor) out <- structure(list(), class = "mvbrmsterms") out$terms <- named_list(names(x$forms)) for (i in seq_along(out$terms)) { x$forms[[i]]$rescor <- x$rescor x$forms[[i]]$mecor <- x$mecor x$forms[[i]]$mv <- TRUE out$terms[[i]] <- brmsterms(x$forms[[i]], ...) } out$allvars <- allvars_formula(lapply(out$terms, get_allvars)) # required to find variables used solely in the response part lhs_resp <- function(x) deparse_combine(lhs(x$respform)[[2]]) out$respform <- paste0(ulapply(out$terms, lhs_resp), collapse = ",") out$respform <- formula(paste0("mvbind(", out$respform, ") ~ 1")) out$responses <- ulapply(out$terms, "[[", "resp") out$rescor <- x$rescor out$mecor <- x$mecor out$cov_ranef <- x$cov_ranef out } # parse linear/additive formulas # @param formula an ordinary model formula # @return a 'btl' object terms_lf <- function(formula) { formula <- rhs(as.formula(formula)) y <- nlist(formula) formula <- terms(formula) check_accidental_helper_functions(formula) types <- setdiff(all_term_types(), excluded_term_types(formula)) for (t in types) { tmp <- do_call(paste0("terms_", t), list(formula)) if (is.data.frame(tmp) || is.formula(tmp)) { y[[t]] <- tmp } } y$allvars <- allvars_formula( get_allvars(y$fe), get_allvars(y$re), get_allvars(y$cs), get_allvars(y$sp), get_allvars(y$sm), get_allvars(y$gp), get_allvars(y$ac), get_allvars(y$offset) ) environment(y$allvars) <- environment(formula) structure(y, class = "btl") } # parse non-linear formulas # @param formula non-linear model formula # @param nlpars names of all non-linear parameters # @param resp optional name of a response variable # @return a 'btnl' object terms_nlf <- function(formula, nlpars, resp = "") { if (!length(nlpars)) { stop2("No non-linear parameters specified.") } loop <- !isFALSE(attr(formula, "loop")) formula <- rhs(as.formula(formula)) y <- nlist(formula) all_vars <- all_vars(formula) y$used_nlpars <- intersect(all_vars, nlpars) covars <- setdiff(all_vars, nlpars) y$covars <- structure(str2formula(covars), int = FALSE) if (!"ac" %in% excluded_term_types(formula)) { y$ac <- terms_ac(attr(formula, "autocor")) } y$allvars <- allvars_formula(covars, get_allvars(y$ac)) environment(y$allvars) <- environment(formula) y$loop <- loop structure(y, class = "btnl") } # extract addition arguments out of formula # @return a list of formulas each containg a single addition term terms_ad <- function(formula, family = NULL, check_response = TRUE) { x <- list() ad_funs <- lsp("brms", what = "exports", pattern = "^resp_") ad_funs <- sub("^resp_", "", ad_funs) families <- family_names(family) if (is.family(family) && any(nzchar(families))) { str_formula <- formula2str(formula) ad <- get_matches("(?<=\\|)[^~]*(?=~)", str_formula, perl = TRUE) valid_ads <- family_info(family, "ad") if (length(ad)) { ad_terms <- terms(str2formula(ad)) if (length(attr(ad_terms, "offset"))) { stop2("Offsets are not allowed in addition terms.") } ad_terms <- attr(ad_terms, "term.labels") for (a in ad_funs) { matches <- grep(paste0("^(resp_)?", a, "\\(.*\\)$"), ad_terms) if (length(matches) == 1L) { x[[a]] <- ad_terms[matches] if (!grepl("^resp_", x[[a]])) { x[[a]] <- paste0("resp_", x[[a]]) } ad_terms <- ad_terms[-matches] if (!is.na(x[[a]]) && a %in% valid_ads) { x[[a]] <- str2formula(x[[a]]) } else { stop2("Argument '", a, "' is not supported for ", "family '", summary(family), "'.") } } else if (length(matches) > 1L) { stop2("Each addition argument may only be defined once.") } } if (length(ad_terms)) { stop2("The following addition terms are invalid:\n", collapse_comma(ad_terms)) } } if (check_response && "wiener" %in% families && !is.formula(x$dec)) { stop2("Addition argument 'dec' is required for family 'wiener'.") } if (is.formula(x$cat)) { # 'cat' was replaced by 'thres' in brms 2.10.5 x$thres <- x$cat } } x } # extract fixed effects terms terms_fe <- function(formula) { if (!is.terms(formula)) { formula <- terms(formula) } all_terms <- all_terms(formula) sp_terms <- find_terms(all_terms, "all", complete = FALSE) re_terms <- all_terms[grepl("\\|", all_terms)] int_term <- attr(formula, "intercept") fe_terms <- setdiff(all_terms, c(sp_terms, re_terms)) out <- paste(c(int_term, fe_terms), collapse = "+") out <- str2formula(out) attr(out, "allvars") <- allvars_formula(out) attr(out, "decomp") <- get_decomp(formula) if (has_rsv_intercept(out, has_intercept(formula))) { attr(out, "int") <- FALSE } if (no_cmc(formula)) { attr(out, "cmc") <- FALSE } if (no_center(formula)) { attr(out, "center") <- FALSE } if (is_sparse(formula)) { attr(out, "sparse") <- TRUE } out } # gather information of group-level terms # @return a data.frame with one row per group-level term terms_re <- function(formula) { re_terms <- get_re_terms(formula, brackets = FALSE) if (!length(re_terms)) { return(NULL) } re_terms <- split_re_terms(re_terms) re_parts <- re_parts(re_terms) out <- allvars <- vector("list", length(re_terms)) type <- attr(re_terms, "type") for (i in seq_along(re_terms)) { gcall <- eval2(re_parts$rhs[i]) form <- str2formula(re_parts$lhs[i]) group <- paste0(gcall$type, collapse(gcall$groups)) out[[i]] <- data.frame( group = group, gtype = gcall$type, gn = i, id = gcall$id, type = type[i], cor = gcall$cor, stringsAsFactors = FALSE ) out[[i]]$gcall <- list(gcall) out[[i]]$form <- list(form) # gather all variables used in the group-level term # at this point 'cs' terms are no longer recognized as such ftype <- str_if(type[i] %in% "cs", "", type[i]) re_allvars <- get_allvars(form, type = ftype) allvars[[i]] <- allvars_formula(re_allvars, gcall$allvars) } out <- do_call(rbind, out) out <- out[order(out$group), ] attr(out, "allvars") <- allvars_formula(allvars) if (no_cmc(formula)) { # disabling cell-mean coding in all group-level terms # has to come last to avoid removal of attributes for (i in seq_rows(out)) { attr(out$form[[i]], "cmc") <- FALSE } } out } # extract category specific terms for ordinal models terms_cs <- function(formula) { out <- find_terms(formula, "cs") if (!length(out)) { return(NULL) } out <- ulapply(out, eval2, envir = environment()) out <- str2formula(out) attr(out, "allvars") <- allvars_formula(out) # do not test whether variables were supplied to 'cs' # to allow category specific group-level intercepts attr(out, "int") <- FALSE out } # extract special effects terms terms_sp <- function(formula) { types <- c("mo", "me", "mi") out <- find_terms(formula, types, complete = FALSE) if (!length(out)) { return(NULL) } uni_mo <- trim_wsp(get_matches_expr(regex_sp("mo"), out)) uni_me <- trim_wsp(get_matches_expr(regex_sp("me"), out)) uni_mi <- trim_wsp(get_matches_expr(regex_sp("mi"), out)) # remove the intercept as it is handled separately out <- str2formula(c("0", out)) attr(out, "int") <- FALSE attr(out, "uni_mo") <- uni_mo attr(out, "uni_me") <- uni_me attr(out, "uni_mi") <- uni_mi attr(out, "allvars") <- str2formula(all_vars(out)) # TODO: do we need sp_fake_formula at all? # attr(out, "allvars") <- sp_fake_formula(uni_mo, uni_me, uni_mi) out } # extract spline terms terms_sm <- function(formula) { out <- find_terms(formula, "sm") if (!length(out)) { return(NULL) } if (any(grepl("^(te|ti)\\(", out))) { stop2("Tensor product smooths 'te' and 'ti' are not yet ", "implemented in brms. Consider using 't2' instead.") } out <- str2formula(out) attr(out, "allvars") <- mgcv::interpret.gam(out)$fake.formula out } # extract gaussian process terms terms_gp <- function(formula) { out <- find_terms(formula, "gp") if (!length(out)) { return(NULL) } eterms <- lapply(out, eval2, envir = environment()) covars <- lapply(eterms, "[[", "term") byvars <- lapply(eterms, "[[", "by") allvars <- str2formula(unlist(c(covars, byvars))) allvars <- str2formula(all_vars(allvars)) if (!length(all_vars(allvars))) { stop2("No variable supplied to function 'gp'.") } out <- str2formula(out) attr(out, "allvars") <- allvars out } # extract autocorrelation terms terms_ac <- function(formula) { autocor <- attr(formula, "autocor") out <- c(find_terms(formula, "ac"), find_terms(autocor, "ac")) if (!length(out)) { return(NULL) } eterms <- lapply(out, eval2, envir = environment()) allvars <- unlist(c( lapply(eterms, "[[", "time"), lapply(eterms, "[[", "gr") )) allvars <- str2formula(all_vars(allvars)) out <- str2formula(out) attr(out, "allvars") <- allvars out } # extract offset terms terms_offset <- function(formula) { if (!is.terms(formula)) { formula <- terms(as.formula(formula)) } pos <- attr(formula, "offset") if (is.null(pos)) { return(NULL) } vars <- attr(formula, "variables") out <- ulapply(pos, function(i) deparse(vars[[i + 1]])) out <- str2formula(out) attr(out, "allvars") <- str2formula(all_vars(out)) out } # extract multiple covariates in multi-membership terms terms_mmc <- function(formula) { out <- find_terms(formula, "mmc") if (!length(out)) { return(NULL) } # remove the intercept as it is handled separately out <- str2formula(c("0", out)) attr(out, "allvars") <- allvars_formula(out) attr(out, "int") <- FALSE out } # extract response variable names # assumes multiple response variables to be combined via mvbind terms_resp <- function(formula, check_names = TRUE) { formula <- lhs(as.formula(formula)) if (is.null(formula)) { return(NULL) } expr <- validate_resp_formula(formula)[[2]] if (length(expr) <= 1L) { out <- deparse_no_string(expr) } else { str_fun <- deparse_no_string(expr[[1]]) used_mvbind <- grepl("^(brms:::?)?mvbind$", str_fun) if (used_mvbind) { out <- ulapply(expr[-1], deparse_no_string) } else { out <- deparse_no_string(expr) } } if (check_names) { out <- make_stan_names(out) } out } #' Checks if argument is a \code{brmsterms} object #' #' @param x An \R object #' #' @seealso \code{\link[brms:brmsterms]{brmsterms}} #' #' @export is.brmsterms <- function(x) { inherits(x, "brmsterms") } #' Checks if argument is a \code{mvbrmsterms} object #' #' @param x An \R object #' #' @seealso \code{\link[brms:brmsterms]{brmsterms}} #' #' @export is.mvbrmsterms <- function(x) { inherits(x, "mvbrmsterms") } is.btl <- function(x) { inherits(x, "btl") } is.btnl <- function(x) { inherits(x, "btnl") } # transform mvbrmsterms objects for use in stan_llh.brmsterms as.brmsterms <- function(x) { stopifnot(is.mvbrmsterms(x), x$rescor) families <- ulapply(x$terms, function(y) y$family$family) stopifnot(all(families == families[1])) out <- structure(list(), class = "brmsterms") out$family <- structure( list(family = paste0(families[1], "_mv"), link = "identity"), class = c("brmsfamily", "family") ) info <- get(paste0(".family_", families[1]))() out$family[names(info)] <- info out$sigma_pred <- any(ulapply(x$terms, function(x) "sigma" %in% names(x$dpar) || is.formula(x$adforms$se) )) weight_forms <- rmNULL(lapply(x$terms, function(x) x$adforms$weights)) if (length(weight_forms)) { str_wf <- unique(ulapply(weight_forms, formula2str)) if (length(str_wf) > 1L) { stop2("All responses should use the same", "weights if 'rescor' is estimated.") } out$adforms$weights <- weight_forms[[1]] } miforms <- rmNULL(lapply(x$terms, function(x) x$adforms$mi)) if (length(miforms)) { out$adforms$mi <- miforms[[1]] } out } # names of supported term types all_term_types <- function() { c("fe", "re", "sp", "cs", "sm", "gp", "ac", "offset") } # avoid ambiguous parameter names # @param names names to check for ambiguity # @param bterms a brmsterms object avoid_dpars <- function(names, bterms) { dpars <- c(names(bterms$dpars), "sp", "cs") if (length(dpars)) { dpars_prefix <- paste0("^", dpars, "_") invalid <- any(ulapply(dpars_prefix, grepl, names)) if (invalid) { dpars <- paste0("'", dpars, "_'", collapse = ", ") stop2("Variable names starting with ", dpars, " are not allowed for this model.") } } invisible(NULL) } vars_prefix <- function() { c("dpar", "resp", "nlpar") } # check and tidy parameter prefixes check_prefix <- function(x, keep_mu = FALSE) { vpx <- vars_prefix() if (is.data.frame(x) && nrow(x) == 0) { # avoids a bug in data.frames with zero rows x <- list() } x[setdiff(vpx, names(x))] <- "" x <- x[vpx] for (i in seq_along(x)) { x[[i]] <- as.character(x[[i]]) if (!length(x[[i]])) { x[[i]] <- "" } x[[i]] <- ifelse( !keep_mu & names(x)[i] == "dpar" & x[[i]] %in% "mu", yes = "", no = x[[i]] ) x[[i]] <- ifelse( keep_mu & names(x)[i] == "dpar" & x[[i]] %in% "", yes = "mu", no = x[[i]] ) } x } # combined parameter prefixes # @param prefix object from which to extract prefixes # @param keep_mu keep the 'mu' prefix if available or remove it? # @param nlp include the 'nlp' prefix for non-linear parameters? combine_prefix <- function(prefix, keep_mu = FALSE, nlp = FALSE) { prefix <- check_prefix(prefix, keep_mu = keep_mu) if (is_nlpar(prefix) && nlp) { prefix$dpar <- "nlp" } prefix <- lapply(prefix, usc) sub("^_", "", do_call(paste0, prefix)) } # check validity of fixed distributional parameters check_fdpars <- function(x) { stopifnot(is.null(x) || is.list(x)) pos_pars <- c( "sigma", "shape", "nu", "phi", "kappa", "beta", "disc", "bs", "ndt", "theta" ) prob_pars <- c("zi", "hu", "bias", "quantile") for (dp in names(x)) { apc <- dpar_class(dp) value <- x[[dp]]$value if (apc %in% pos_pars && value < 0) { stop2("Parameter '", dp, "' must be positive.") } if (apc %in% prob_pars && (value < 0 || value > 1)) { stop2("Parameter '", dp, "' must be between 0 and 1.") } } invisible(TRUE) } # combine all variables in one formuula # @param x (list of) formulas or character strings # @return a formula with all variables on the right-hand side allvars_formula <- function(...) { out <- rmNULL(c(...)) out <- collapse(ulapply(out, plus_rhs)) all_vars <- all_vars(out) invalid_vars <- setdiff(all_vars, make.names(all_vars)) if (length(invalid_vars)) { stop2("The following variable names are invalid: ", collapse_comma(invalid_vars)) } str2formula(c(out, all_vars)) } # conveniently extract a formula of all relevant variables # @param x any object from which to extract 'allvars' # @param type predictor type; requires a 'parse_' function # @return a formula with all variables on the right-hand side # or NULL if 'allvars' cannot be found get_allvars <- function(x, type = "") { out <- attr(x, "allvars", TRUE) if (is.null(out) && "allvars" %in% names(x)) { out <- x[["allvars"]] } if (is.null(out) && is.formula(x)) { type <- as_one_character(type) type <- str_if(nzchar(type), type, "fe") terms_fun <- get(paste0("terms_", type), mode = "function") out <- attr(terms_fun(x), "allvars") } stopifnot(is.null(out) || is.formula(out)) out } # add 'x' to the right-hand side of a formula plus_rhs <- function(x) { if (is.formula(x)) { x <- sub("^[^~]*~", "", formula2str(x)) } if (length(x) && all(nzchar(x))) { out <- paste0(" + ", paste(x, collapse = "+")) } else { out <- " + 1" } out } # like stats::terms but keeps attributes if possible terms <- function(formula, ...) { old_attributes <- attributes(formula) formula <- stats::terms(formula, ...) new_attributes <- attributes(formula) sel_names <- setdiff(names(old_attributes), names(new_attributes)) attributes(formula)[sel_names] <- old_attributes[sel_names] formula } is.terms <- function(x) { inherits(x, "terms") } # combine formulas for distributional parameters # @param formula1 primary formula from which to take the RHS # @param formula2 secondary formula used to update the RHS of formula1 # @param lhs character string to define the left-hand side of the output # @param update a flag to indicate whether updating should be allowed. # Defaults to FALSE to maintain backwards compatibility # @return a formula object combine_formulas <- function(formula1, formula2, lhs = "", update = FALSE) { stopifnot(is.formula(formula1)) stopifnot(is.null(formula2) || is.formula(formula2)) lhs <- as_one_character(lhs) update <- as_one_logical(update) if (is.null(formula2)) { rhs <- str_rhs(formula1) att <- attributes(formula1) } else if (update && has_terms(formula1)) { # TODO: decide about intuitive updating behavior if (get_nl(formula1) || get_nl(formula2)) { stop2("Cannot combine non-linear formulas.") } old_formula <- eval2(paste0("~ ", str_rhs(formula1))) new_formula <- eval2(paste0("~ . + ", str_rhs(formula2))) rhs <- str_rhs(update(old_formula, new_formula)) att <- attributes(formula1) att[names(attributes(formula2))] <- attributes(formula2) } else { rhs <- str_rhs(formula2) att <- attributes(formula2) } out <- eval2(paste0(lhs, " ~ ", rhs)) attributes(out)[names(att)] <- att out } # does the formula contain any terms? # @return TRUE or FALSE has_terms <- function(formula) { stopifnot(is.formula(formula)) terms <- try(terms(rhs(formula)), silent = TRUE) is(terms, "try-error") || length(attr(terms, "term.labels")) || length(attr(terms, "offset")) } # has a linear formula any terms except overall effects? has_special_terms <- function(x) { if (!is.btl(x)) { return(FALSE) } special_terms <- c("sp", "sm", "gp", "ac", "cs", "offset") NROW(x[["re"]]) > 0 || any(lengths(x[special_terms])) } # indicate if the predictor term belongs to a non-linear parameter is_nlpar <- function(x) { isTRUE(nzchar(x[["nlpar"]])) } # indicate if the intercept should be removed no_int <- function(x) { isFALSE(attr(x, "int", exact = TRUE)) } # indicate if cell mean coding should be disabled no_cmc <- function(x) { isFALSE(attr(x, "cmc", exact = TRUE)) } # indicate if centering of the design matrix should be disabled no_center <- function(x) { isFALSE(attr(x, "center", exact = TRUE)) } # indicate if the design matrix should be handled as sparse is_sparse <- function(x) { isTRUE(attr(x, "sparse", exact = TRUE)) } # get the decomposition type of the design matrix get_decomp <- function(x) { out <- attr(x, "decomp", exact = TRUE) if (is.null(out)) { out <- "none" } as_one_character(out) } # extract different types of effects get_effect <- function(x, ...) { UseMethod("get_effect") } #' @export get_effect.default <- function(x, ...) { NULL } #' @export get_effect.brmsfit <- function(x, ...) { get_effect(x$formula, ...) } #' @export get_effect.brmsformula <- function(x, ...) { get_effect(brmsterms(x), ...) } #' @export get_effect.mvbrmsformula <- function(x, ...) { get_effect(brmsterms(x), ...) } #' @export get_effect.mvbrmsterms <- function(x, ...) { ulapply(x$terms, get_effect, recursive = FALSE, ...) } # extract formulas of a certain effect type # @param target effect type to return # @param all logical; include effects of nlpars and dpars? # @return a list of formulas #' @export get_effect.brmsterms <- function(x, target = "fe", ...) { out <- named_list(c(names(x$dpars), names(x$nlpars))) for (dp in names(x$dpars)) { out[[dp]] <- get_effect(x$dpars[[dp]], target = target) } for (nlp in names(x$nlpars)) { out[[nlp]] <- get_effect(x$nlpars[[nlp]], target = target) } unlist(out, recursive = FALSE) } #' @export get_effect.btl <- function(x, target = "fe", ...) { x[[target]] } #' @export get_effect.btnl <- function(x, target = "fe", ...) { NULL } all_terms <- function(x) { if (!length(x)) { return(character(0)) } if (!is.terms(x)) { x <- terms(as.formula(x)) } trim_wsp(attr(x, "term.labels")) } # generate a regular expression to extract special terms # @param type one or more special term types to be extracted regex_sp <- function(type = "all") { choices <- c("all", "sp", "sm", "gp", "cs", "mmc", "ac", all_sp_types()) type <- unique(match.arg(type, choices, several.ok = TRUE)) funs <- c( sm = "(s|(t2)|(te)|(ti))", gp = "gp", cs = "cse?", mmc = "mmc", ac = "((arma)|(ar)|(ma)|(cosy)|(sar)|(car)|(fcor))" ) funs[all_sp_types()] <- all_sp_types() if ("sp" %in% type) { # allows extracting all 'sp' terms at once type <- setdiff(type, "sp") type <- union(type, all_sp_types()) } if ("all" %in% type) { # allows extracting all special terms at once type <- names(funs) } funs <- funs[type] allow_colon <- c("cs", "mmc", "ac") inner <- ifelse(names(funs) %in% allow_colon, ".*", "[^:]*") out <- paste0("^(", funs, ")\\(", inner, "\\)$") paste0("(", out, ")", collapse = "|") } # find special terms of a certain type # @param x formula object of character vector from which to extract terms # @param type special terms type to be extracted. see regex_sp() # @param complete check if terms consist completely of single special terms? # @param ranef include group-level terms? # @return a character vector of matching terms find_terms <- function(x, type, complete = TRUE, ranef = FALSE) { if (is.formula(x)) { x <- all_terms(x) } else { x <- as.character(x) } complete <- as_one_logical(complete) ranef <- as_one_logical(ranef) regex <- regex_sp(type) is_match <- grepl_expr(regex, x) if (!ranef) { is_match <- is_match & !grepl("\\|", x) } out <- x[is_match] if (complete) { matches <- lapply(out, get_matches_expr, pattern = regex) # each term may contain only one special function call inv <- out[lengths(matches) > 1L] if (!length(inv)) { # each term must be exactly equal to the special function call inv <- out[trim_wsp(unlist(matches)) != out] } if (length(inv)) { stop2("The term '", inv[1], "' is invalid in brms syntax.") } } out } # validate a terms object (or one that can be coerced to it) # for use primarily in 'get_model_matrix' # @param x any R object # @return a (possibly amended) terms object or NULL # if 'x' could not be coerced to a terms object validate_terms <- function(x) { no_int <- no_int(x) no_cmc <- no_cmc(x) if (is.formula(x) && !is.terms(x)) { x <- terms(x) } if (!is.terms(x)) { return(NULL) } if (no_int || !has_intercept(x) && no_cmc) { # allows to remove the intercept without causing cell mean coding attr(x, "intercept") <- 1 attr(x, "int") <- FALSE } x } # checks if the formula contains an intercept has_intercept <- function(formula) { if (is.terms(formula)) { out <- as.logical(attr(formula, "intercept")) } else { formula <- as.formula(formula) try_terms <- try(terms(formula), silent = TRUE) if (is(try_terms, "try-error")) { out <- FALSE } else { out <- as.logical(attr(try_terms, "intercept")) } } out } # check if model makes use of the reserved intercept variables # @param has_intercept does the model have an intercept? # if NULL this will be inferred from formula itself has_rsv_intercept <- function(formula, has_intercept = NULL) { .has_rsv_intercept <- function(terms, has_intercept) { has_intercept <- as_one_logical(has_intercept) intercepts <- c("intercept", "Intercept") out <- !has_intercept && any(intercepts %in% all_vars(rhs(terms))) return(out) } if (is.terms(formula)) { if (is.null(has_intercept)) { has_intercept <- has_intercept(formula) } return(.has_rsv_intercept(formula, has_intercept)) } formula <- try(as.formula(formula), silent = TRUE) if (is(formula, "try-error")) { return(FALSE) } if (is.null(has_intercept)) { try_terms <- try(terms(formula), silent = TRUE) if (is(try_terms, "try-error")) { return(FALSE) } has_intercept <- has_intercept(try_terms) } .has_rsv_intercept(formula, has_intercept) } # names of reserved variables rsv_vars <- function(bterms) { stopifnot(is.brmsterms(bterms) || is.mvbrmsterms(bterms)) .rsv_vars <- function(x) { rsv_int <- any(ulapply(x$dpars, has_rsv_intercept)) if (rsv_int) c("intercept", "Intercept") else NULL } if (is.mvbrmsterms(bterms)) { out <- unique(ulapply(bterms$terms, .rsv_vars)) } else { out <- .rsv_vars(bterms) } out } # are category specific effects present? has_cs <- function(bterms) { length(get_effect(bterms, target = "cs")) > 0L || any(get_re(bterms)$type %in% "cs") } # check if category specific effects are allowed check_cs <- function(bterms) { stopifnot(is.btl(bterms) || is.btnl(bterms)) if (has_cs(bterms)) { if (!is_equal(dpar_class(bterms$dpar), "mu")) { stop2("Category specific effects are only supported ", "for the main parameter 'mu'.") } if (!(is.null(bterms$family) || allow_cs(bterms$family))) { stop2("Category specific effects are not supported for this family.") } if (needs_ordered_cs(bterms$family)) { warning2("Category specific effects for this family should be ", "considered experimental and may have convergence issues.") } } invisible(NULL) } # check for the presence of helper functions accidentally used # within a formula instead of added to bf(). See #1103 check_accidental_helper_functions <- function(formula) { terms <- all_terms(formula) # see help("brmsformula-helpers") for the list of functions funs <- c("nlf", "lf", "acformula", "set_nl", "set_rescor", "set_mecor") regex <- paste0("(", funs, ")", collapse = "|") regex <- paste0("^(", regex, ")\\(") matches <- get_matches(regex, terms, first = TRUE) matches <- sub("\\($", "", matches) matches <- unique(matches) matches <- matches[nzchar(matches)] for (m in matches) { loc <- utils::find(m, mode = "function") if (is_equal(loc[1], "package:brms")) { stop2("Function '", m, "' should not be part of the right-hand side ", "of a formula. See help('brmsformula-helpers') for the correct syntax.") } } invisible(TRUE) } # extract elements from objects # @param x object from which to extract elements # @param name name of the element to be extracted get_element <- function(x, name, ...) { UseMethod("get_element") } #' @export get_element.default <- function(x, name, ...) { x[[name]] } #' @export get_element.mvbrmsformula <- function(x, name, ...) { lapply(x$forms, get_element, name = name, ...) } #' @export get_element.mvbrmsterms <- function(x, name, ...) { lapply(x$terms, get_element, name = name, ...) } brms/R/loo_predict.R0000644000175000017500000002102114111751666014202 0ustar nileshnilesh#' Compute Weighted Expectations Using LOO #' #' These functions are wrappers around the \code{\link[loo]{E_loo}} #' function of the \pkg{loo} package. #' #' @aliases loo_predict loo_linpred loo_predictive_interval #' #' @param object An object of class \code{brmsfit}. #' @param type The statistic to be computed on the results. #' Can by either \code{"mean"} (default), \code{"var"}, or #' \code{"quantile"}. #' @param probs A vector of quantiles to compute. #' Only used if \code{type = quantile}. #' @param prob For \code{loo_predictive_interval}, a scalar in \eqn{(0,1)} #' indicating the desired probability mass to include in the intervals. The #' default is \code{prob = 0.9} (\eqn{90}\% intervals). #' @param psis_object An optional object returned by \code{\link[loo]{psis}}. #' If \code{psis_object} is missing then \code{\link[loo]{psis}} is executed #' internally, which may be time consuming for models fit to very large datasets. #' @param ... Optional arguments passed to the underlying methods that is #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, as well as #' \code{\link[brms:posterior_predict.brmsfit]{posterior_predict}} or #' \code{\link[brms:posterior_linpred.brmsfit]{posterior_linpred}}. #' @inheritParams posterior_predict.brmsfit #' #' @return \code{loo_predict} and \code{loo_linpred} return a vector with one #' element per observation. The only exception is if \code{type = "quantile"} #' and \code{length(probs) >= 2}, in which case a separate vector for each #' element of \code{probs} is computed and they are returned in a matrix with #' \code{length(probs)} rows and one column per observation. #' #' \code{loo_predictive_interval} returns a matrix with one row per #' observation and two columns. #' \code{loo_predictive_interval(..., prob = p)} is equivalent to #' \code{loo_predict(..., type = "quantile", probs = c(a, 1-a))} with #' \code{a = (1 - p)/2}, except it transposes the result and adds informative #' column names. #' #' @examples #' \dontrun{ #' ## data from help("lm") #' ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) #' trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) #' d <- data.frame( #' weight = c(ctl, trt), #' group = gl(2, 10, 20, labels = c("Ctl", "Trt")) #' ) #' fit <- brm(weight ~ group, data = d) #' loo_predictive_interval(fit, prob = 0.8) #' #' ## optionally log-weights can be pre-computed and reused #' psis <- loo::psis(-log_lik(fit), cores = 2) #' loo_predictive_interval(fit, prob = 0.8, psis_object = psis) #' loo_predict(fit, type = "var", psis_object = psis) #' } #' #' @method loo_predict brmsfit #' @importFrom rstantools loo_predict #' @export loo_predict #' @export loo_predict.brmsfit <- function(object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ...) { type <- match.arg(type) stopifnot_resp(object, resp) if (is.null(psis_object)) { message("Running PSIS to compute weights") psis_object <- compute_loo(object, criterion = "psis", resp = resp, ...) } preds <- posterior_predict(object, resp = resp, ...) loo::E_loo(preds, psis_object, type = type, probs = probs)$value } #' @rdname loo_predict.brmsfit #' @method loo_linpred brmsfit #' @importFrom rstantools loo_linpred #' @export loo_linpred #' @export loo_linpred.brmsfit <- function(object, type = c("mean", "var", "quantile"), probs = 0.5, psis_object = NULL, resp = NULL, ...) { type <- match.arg(type) stopifnot_resp(object, resp) family <- family(object, resp = resp) if (is_ordinal(family) || is_categorical(family)) { stop2("Method 'loo_linpred' is not implemented ", "for categorical or ordinal models") } if (is.null(psis_object)) { message("Running PSIS to compute weights") psis_object <- compute_loo(object, criterion = "psis", resp = resp, ...) } preds <- posterior_linpred(object, resp = resp, ...) loo::E_loo(preds, psis_object, type = type, probs = probs)$value } #' @rdname loo_predict.brmsfit #' @method loo_predictive_interval brmsfit #' @importFrom rstantools loo_predictive_interval #' @export loo_predictive_interval #' @export loo_predictive_interval.brmsfit <- function(object, prob = 0.9, psis_object = NULL, ...) { if (length(prob) != 1L) { stop2("Argument 'prob' should be of length 1.") } alpha <- (1 - prob) / 2 probs <- c(alpha, 1 - alpha) labs <- paste0(100 * probs, "%") intervals <- loo_predict( object, type = "quantile", probs = probs, psis_object = psis_object, ... ) rownames(intervals) <- labs t(intervals) } #' Compute a LOO-adjusted R-squared for regression models #' #' @aliases loo_R2 #' #' @inheritParams bayes_R2.brmsfit #' @param ... Further arguments passed to #' \code{\link[brms:posterior_epred.brmsfit]{posterior_epred}} and #' \code{\link[brms:log_lik.brmsfit]{log_lik}}, #' which are used in the computation of the R-squared values. #' #' @return If \code{summary = TRUE}, an M x C matrix is returned #' (M = number of response variables and c = \code{length(probs) + 2}) #' containing summary statistics of the LOO-adjusted R-squared values. #' If \code{summary = FALSE}, the posterior draws of the LOO-adjusted #' R-squared values are returned in an S x M matrix (S is the number of draws). #' #' @examples #' \dontrun{ #' fit <- brm(mpg ~ wt + cyl, data = mtcars) #' summary(fit) #' loo_R2(fit) #' #' # compute R2 with new data #' nd <- data.frame(mpg = c(10, 20, 30), wt = c(4, 3, 2), cyl = c(8, 6, 4)) #' loo_R2(fit, newdata = nd) #' } #' #' @method loo_R2 brmsfit #' @importFrom rstantools loo_R2 #' @export loo_R2 #' @export loo_R2.brmsfit <- function(object, resp = NULL, summary = TRUE, robust = FALSE, probs = c(0.025, 0.975), ...) { contains_draws(object) object <- restructure(object) resp <- validate_resp(resp, object) summary <- as_one_logical(summary) # check for precomputed values R2 <- get_criterion(object, "loo_R2") if (is.matrix(R2)) { # assumes unsummarized 'loo_R2' as ensured by 'add_criterion' take <- colnames(R2) %in% paste0("R2", resp) R2 <- R2[, take, drop = FALSE] if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } return(R2) } family <- family(object, resp = resp) if (conv_cats_dpars(family)) { stop2("'loo_R2' is not defined for unordered categorical models.") } if (is_ordinal(family)) { warning2( "Predictions are treated as continuous variables in ", "'loo_R2' which is likely invalid for ordinal families." ) } args_y <- list(object, warn = TRUE, ...) args_ypred <- list(object, sort = TRUE, ...) R2 <- named_list(paste0("R2", resp)) for (i in seq_along(R2)) { # assumes expectations of different responses to be independent args_ypred$resp <- args_y$resp <- resp[i] y <- do_call(get_y, args_y) ypred <- do_call(posterior_epred, args_ypred) ll <- do_call(log_lik, args_ypred) r_eff <- r_eff_log_lik(ll, object) if (is_ordinal(family(object, resp = resp[i]))) { ypred <- ordinal_probs_continuous(ypred) } R2[[i]] <- .loo_R2(y, ypred, ll, r_eff) } R2 <- do_call(cbind, R2) colnames(R2) <- paste0("R2", resp) if (summary) { R2 <- posterior_summary(R2, probs = probs, robust = robust) } R2 } # internal function of loo_R2.brmsfit # see http://discourse.mc-stan.org/t/stan-summary-r2-or-adjusted-r2/4308/4 # and https://github.com/stan-dev/rstanarm/blob/master/R/bayes_R2.R .loo_R2 <- function(y, ypred, ll, r_eff) { psis_object <- loo::psis(log_ratios = -ll, r_eff = r_eff) ypredloo <- loo::E_loo(ypred, psis_object, log_ratios = -ll)$value err_loo <- ypredloo - y # simulated dirichlet weights S <- nrow(ypred) N <- ncol(ypred) exp_draws <- matrix(rexp(S * N, rate = 1), nrow = S, ncol = N) weights <- exp_draws / rowSums(exp_draws) var_y <- (N / (N - 1)) * (rowSums(sweep(weights, 2, y^2, FUN = "*")) - rowSums(sweep(weights, 2, y, FUN = "*"))^2) var_err_loo <- (N / (N - 1)) * (rowSums(sweep(weights, 2, err_loo^2, FUN = "*")) - rowSums(sweep(weights, 2, err_loo, FUN = "*")^2)) out <- unname(1 - var_err_loo / var_y) out[out < -1] <- -1 out[out > 1] <- 1 as.matrix(out) } brms/R/stan-prior.R0000644000175000017500000005657714135236374014025 0ustar nileshnilesh# unless otherwise specified, functions return a single character # string defining the likelihood of the model in Stan language # Define priors for parameters in Stan language # @param prior an object of class 'brmsprior' # @param class the parameter class # @param coef the coefficients of this class # @param group the name of a grouping factor # @param type Stan type used in the definition of the parameter # if type is empty the parameter is not initialized inside 'stan_prior' # @param dim stan array dimension to be specified after the parameter name # cannot be merged with 'suffix' as the latter should apply to # individual coefficients while 'dim' should not # TODO: decide whether to support arrays for parameters at all # an alternative would be to specify elements directly as parameters # @param coef_type Stan type used in the definition of individual parameter # coefficients; only relevant when mixing estimated and fixed coefficients # @param prefix a prefix to put at the parameter class # @param suffix a suffix to put at the parameter class # @param broadcast Stan type to which the prior should be broadcasted # in order to handle vectorized prior statements # supported values are 'vector' or 'matrix' # @param comment character string containing a comment for the parameter # @param px list or data.frame after which to subset 'prior' # @return a named list of character strings in Stan language stan_prior <- function(prior, class, coef = NULL, group = NULL, type = "real", dim = "", coef_type = "real", prefix = "", suffix = "", broadcast = "vector", header_type = "", comment = "", px = list(), normalize = TRUE) { prior_only <- isTRUE(attr(prior, "sample_prior") == "only") prior <- subset2( prior, class = class, coef = c(coef, ""), group = c(group, ""), ls = px ) # special priors cannot be passed literally to Stan is_special_prior <- is_special_prior(prior$prior) if (any(is_special_prior)) { special_prior <- prior$prior[is_special_prior] stop2("Prior ", collapse_comma(special_prior), " is used in an invalid ", "context. See ?set_prior for details on how to use special priors.") } px <- as.data.frame(px, stringsAsFactors = FALSE) upx <- unique(px) if (nrow(upx) > 1L) { # TODO: find a better solution to handle this case # can only happen for SD parameters of the same ID base_prior <- rep(NA, nrow(upx)) for (i in seq_rows(upx)) { sub_upx <- lapply(upx[i, ], function(x) c(x, "")) sub_prior <- subset2(prior, ls = sub_upx) base_prior[i] <- stan_base_prior(sub_prior) } if (length(unique(base_prior)) > 1L) { # define prior for single coefficients manually # as there is not single base_prior anymore take_coef_prior <- nzchar(prior$coef) & !nzchar(prior$prior) prior_of_coefs <- prior[take_coef_prior, vars_prefix()] take_base_prior <- match_rows(prior_of_coefs, upx) prior$prior[take_coef_prior] <- base_prior[take_base_prior] } base_prior <- base_prior[1] bound <- "" } else { base_prior <- stan_base_prior(prior) bound <- prior[!nzchar(prior$coef), "bound"] } # generate stan prior statements out <- list() par <- paste0(prefix, class, suffix) has_constant_priors <- FALSE has_coef_prior <- any(with(prior, nzchar(coef) & nzchar(prior))) if (has_coef_prior || nzchar(dim) && length(coef)) { # priors on individual coefficients are also individually set # priors are always set on individual coefficients for arrays index_two_dims <- is.matrix(coef) coef <- as.matrix(coef) prior <- subset2(prior, coef = coef) estimated_coef_indices <- list() used_base_prior <- FALSE for (i in seq_rows(coef)) { for (j in seq_cols(coef)) { index <- i if (index_two_dims) { c(index) <- j } prior_ij <- subset2(prior, coef = coef[i, j]) if (NROW(px) > 1L) { # disambiguate priors of coefficients with the same name # coming from different model components stopifnot(NROW(px) == NROW(coef)) prior_ij <- subset2(prior_ij, ls = px[i, ]) } # zero rows can happen if only global priors present stopifnot(nrow(prior_ij) <= 1L) coef_prior <- prior_ij$prior if (!isTRUE(nzchar(coef_prior))) { used_base_prior <- TRUE coef_prior <- base_prior } if (!stan_is_constant_prior(coef_prior)) { # all parameters with non-constant priors are estimated c(estimated_coef_indices) <- list(index) } if (nzchar(coef_prior)) { # implies a proper prior or constant if (type == coef_type && !nzchar(dim)) { # the single coefficient of that parameter equals the parameter stopifnot(all(index == 1L)) par_ij <- par } else { par_ij <- paste0(par, collapse("[", index, "]")) } if (stan_is_constant_prior(coef_prior)) { coef_prior <- stan_constant_prior( coef_prior, par_ij, broadcast = broadcast ) str_add(out$tpar_prior) <- paste0(coef_prior, ";\n") } else { coef_prior <- stan_target_prior( coef_prior, par_ij, broadcast = broadcast, bound = bound, resp = px$resp[1], normalize = normalize ) str_add(out$prior) <- paste0(tp(), coef_prior, ";\n") } } } } # the base prior may be improper flat in which no Stan code is added # but we still have estimated coefficients if the base prior is used has_estimated_priors <- isTRUE(nzchar(out$prior)) || used_base_prior && !stan_is_constant_prior(base_prior) has_constant_priors <- isTRUE(nzchar(out$tpar_prior)) if (has_estimated_priors && has_constant_priors) { # need to mix definition in the parameters and transformed parameters block if (!nzchar(coef_type)) { stop2("Can either estimate or fix all values of parameter '", par, "'.") } for (i in seq_along(estimated_coef_indices)) { index <- estimated_coef_indices[[i]] iu <- paste0(index, collapse = "_") str_add(out$par) <- glue( " {coef_type} par_{par}_{iu};\n" ) ib <- collapse("[", index, "]") str_add(out$tpar_prior) <- cglue( " {par}{ib} = par_{par}_{iu};\n" ) } } } else if (nzchar(base_prior)) { # only a global prior is present and will be broadcasted ncoef <- length(coef) has_constant_priors <- stan_is_constant_prior(base_prior) if (has_constant_priors) { constant_base_prior <- stan_constant_prior( base_prior, par = par, ncoef = ncoef, broadcast = broadcast ) str_add(out$tpar_prior) <- paste0(constant_base_prior, ";\n") } else { target_base_prior <- stan_target_prior( base_prior, par = par, ncoef = ncoef, bound = bound, broadcast = broadcast, resp = px$resp[1], normalize = normalize ) str_add(out$prior) <- paste0(tp(), target_base_prior, ";\n") } } if (nzchar(type)) { # only define the parameter here if type is non-empty comment <- stan_comment(comment) par_definition <- glue(" {type} {par}{dim};{comment}\n") if (has_constant_priors) { # parameter must be defined in the transformed parameters block str_add(out$tpar_def) <- par_definition } else { # parameter can be defined in the parameters block str_add(out$par) <- par_definition } if (nzchar(header_type)) { str_add(out$pll_args) <- glue(", {header_type} {par}") } } else { if (has_constant_priors) { stop2("Cannot fix parameter '", par, "' in this model.") } } has_improper_prior <- !is.null(out$par) && is.null(out$prior) if (prior_only && has_improper_prior) { stop2("Sampling from priors is not possible as ", "some parameters have no proper priors. ", "Error occurred for parameter '", par, "'.") } out } # get the base prior for all coefficients # this is the lowest level non-coefficient prior # @param prior a brmsprior object # @return a character string defining the base prior stan_base_prior <- function(prior) { stopifnot(length(unique(prior$class)) <= 1) take <- with(prior, !nzchar(coef) & nzchar(prior)) prior <- prior[take, ] if (!NROW(prior)) { return("") } vars <- c("group", "nlpar", "dpar", "resp", "class") for (v in vars) { take <- nzchar(prior[[v]]) if (any(take)) { prior <- prior[take, ] } } stopifnot(NROW(prior) == 1) prior$prior } # Stan prior in target += notation # @param prior character string defining the prior # @param par name of the parameter on which to set the prior # @param ncoef number of coefficients in the parameter # @param bound bounds of the parameter in Stan language # @param broadcast Stan type to which the prior should be broadcasted # @param name of the response variable # @return a character string defining the prior in Stan language stan_target_prior <- function(prior, par, ncoef = 0, broadcast = "vector", bound = "", resp = "", normalize = TRUE) { prior <- gsub("[[:space:]]+\\(", "(", prior) prior_name <- get_matches( "^[^\\(]+(?=\\()", prior, perl = TRUE, simplify = FALSE ) for (i in seq_along(prior_name)) { if (length(prior_name[[i]]) != 1L) { stop2("The prior '", prior[i], "' is invalid.") } } prior_name <- unlist(prior_name) prior_args <- rep(NA, length(prior)) for (i in seq_along(prior)) { prior_args[i] <- sub(glue("^{prior_name[i]}\\("), "", prior[i]) prior_args[i] <- sub(")$", "", prior_args[i]) } if (broadcast == "matrix" && ncoef > 0) { # apply a scalar prior to all elements of a matrix par <- glue("to_vector({par})") } if (nzchar(prior_args)) { str_add(prior_args, start = TRUE) <- " | " } lpdf <- stan_lpdf_name(normalize) out <- glue("{prior_name}_{lpdf}({par}{prior_args})") par_class <- unique(get_matches("^[^_]+", par)) par_bound <- par_bounds(par_class, bound, resp = resp) prior_bound <- prior_bounds(prior_name) trunc_lb <- is.character(par_bound$lb) || par_bound$lb > prior_bound$lb trunc_ub <- is.character(par_bound$ub) || par_bound$ub < prior_bound$ub if (normalize) { # obtain correct normalization constants for truncated priors if (trunc_lb || trunc_ub) { wsp <- wsp(nsp = 4) # scalar parameters are of length 1 but have no coefficients ncoef <- max(1, ncoef) if (trunc_lb && !trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * {prior_name}_lccdf({par_bound$lb}{prior_args})" ) } else if (!trunc_lb && trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * {prior_name}_lcdf({par_bound$ub}{prior_args})" ) } else if (trunc_lb && trunc_ub) { str_add(out) <- glue( "\n{wsp}- {ncoef} * log_diff_exp(", "{prior_name}_lcdf({par_bound$ub}{prior_args}), ", "{prior_name}_lcdf({par_bound$lb}{prior_args}))" ) } } } out } # fix parameters to constants in Stan language # @param prior character string defining the prior # @param par name of the parameter on which to set the prior # @param ncoef number of coefficients in the parameter # @param broadcast Stan type to which the prior should be broadcasted # @return a character string defining the prior in Stan language stan_constant_prior <- function(prior, par, ncoef = 0, broadcast = "vector") { stopifnot(grepl("^constant\\(", prior)) prior_args <- gsub("(^constant\\()|(\\)$)", "", prior) if (broadcast == "vector") { if (ncoef > 0) { # broadcast the scalar prior on the whole parameter vector prior_args <- glue("rep_vector({prior_args}, rows({par}))") } # no action required for individual coefficients of vectors } else if (broadcast == "matrix") { if (ncoef > 0) { # broadcast the scalar prior on the whole parameter matrix prior_args <- glue("rep_matrix({prior_args}, rows({par}), cols({par}))") } else { # single coefficient is a row in the parameter matrix prior_args <- glue("rep_row_vector({prior_args}, cols({par}))") } } glue(" {par} = {prior_args}") } # Stan code for global parameters of special priors # currently implemented are horseshoe and lasso stan_special_prior_global <- function(bterms, data, prior, normalize, ...) { out <- list() tp <- tp() lpdf <- stan_lpdf_name(normalize) px <- check_prefix(bterms) p <- usc(combine_prefix(px)) special <- get_special_prior(prior, px) if (!is.null(special$horseshoe)) { str_add(out$data) <- glue( " // data for the horseshoe prior\n", " real hs_df{p}; // local degrees of freedom\n", " real hs_df_global{p}; // global degrees of freedom\n", " real hs_df_slab{p}; // slab degrees of freedom\n", " real hs_scale_global{p}; // global prior scale\n", " real hs_scale_slab{p}; // slab prior scale\n" ) str_add(out$par) <- glue( " // horseshoe shrinkage parameters\n", " real hs_global{p}; // global shrinkage parameters\n", " real hs_slab{p}; // slab regularization parameter\n" ) hs_scale_global <- glue("hs_scale_global{p}") if (isTRUE(special$horseshoe$autoscale)) { str_add(hs_scale_global) <- glue(" * sigma{usc(px$resp)}") } str_add(out$prior) <- glue( "{tp}student_t_{lpdf}(hs_global{p} | hs_df_global{p}, 0, {hs_scale_global})", str_if(normalize, "\n - 1 * log(0.5)"), ";\n", "{tp}inv_gamma_{lpdf}(hs_slab{p} | 0.5 * hs_df_slab{p}, 0.5 * hs_df_slab{p});\n" ) } if (!is.null(special$R2D2)) { str_add(out$data) <- glue( " // data for the R2D2 prior\n", " real R2D2_mean_R2{p}; // mean of the R2 prior\n", " real R2D2_prec_R2{p}; // precision of the R2 prior\n" ) str_add(out$par) <- glue( " // R2D2 shrinkage parameters\n", " real R2D2_R2{p}; // R2 parameter\n" ) if (isTRUE(special$R2D2$autoscale)) { var_mult <- glue("sigma{usc(px$resp)}^2 * ") } str_add(out$tpar_def) <- glue( " real R2D2_tau2{p}; // global R2D2 scale parameter\n" ) str_add(out$tpar_comp) <- glue( " R2D2_tau2{p} = {var_mult}R2D2_R2{p} / (1 - R2D2_R2{p});\n" ) str_add(out$prior) <- glue( "{tp}beta_{lpdf}(R2D2_R2{p} | R2D2_mean_R2{p} * R2D2_prec_R2{p}, ", "(1 - R2D2_mean_R2{p}) * R2D2_prec_R2{p});\n" ) } if (!is.null(special$lasso)) { str_add(out$data) <- glue( " // data for the lasso prior\n", " real lasso_df{p}; // prior degrees of freedom\n", " real lasso_scale{p}; // prior scale\n" ) str_add(out$par) <- glue( " // lasso shrinkage parameter\n", " real lasso_inv_lambda{p};\n" ) str_add(out$prior) <- glue( "{tp}chi_square_{lpdf}(lasso_inv_lambda{p} | lasso_df{p});\n" ) } out } # Stan code for local parameters of special priors # currently implemented are 'horseshoe' # @param class name of the parameter class # @param prior a brmsprior object # @param ncoef number of coefficients in the parameter # @param px named list to subset 'prior' # @param center_X is the design matrix centered? # @param suffix optional suffix of the 'b' coefficient vector stan_special_prior_local <- function(prior, class, ncoef, px, center_X = FALSE, suffix = "", normalize = TRUE) { class <- as_one_character(class) stopifnot(class %in% c("b", "bsp")) out <- list() lpdf <- stan_lpdf_name(normalize) p <- usc(combine_prefix(px)) sp <- paste0(sub("^b", "", class), p) ct <- str_if(center_X, "c") tp <- tp() special <- get_special_prior(prior, px) if (!is.null(special$horseshoe)) { str_add(out$par) <- glue( " // local parameters for horseshoe prior\n", " vector[K{ct}{sp}] zb{sp};\n", " vector[K{ct}{sp}] hs_local{sp};\n" ) hs_args <- sargs( glue("zb{sp}"), glue("hs_local{sp}"), glue("hs_global{p}"), glue("hs_scale_slab{p}^2 * hs_slab{p}") ) str_add(out$tpar_reg_prior) <- glue( " // compute actual regression coefficients\n", " b{suffix}{sp} = horseshoe({hs_args});\n" ) str_add(out$prior) <- glue( "{tp}std_normal_{lpdf}(zb{sp});\n", "{tp}student_t_{lpdf}(hs_local{sp} | hs_df{p}, 0, 1)", str_if(normalize, "\n - rows(hs_local{sp}) * log(0.5)"), ";\n" ) } if (!is.null(special$R2D2)) { if (class != "b") { stop2("The R2D2 prior does not yet support special coefficient classes.") } m1 <- str_if(center_X, " -1") str_add(out$data) <- glue( " // concentration vector of the D2 prior\n", " vector[K{sp}{m1}] R2D2_cons_D2{sp};\n" ) str_add(out$par) <- glue( " // local parameters for the R2D2 prior\n", " vector[K{ct}{sp}] zb{sp};\n", " simplex[K{ct}{sp}] R2D2_phi{sp};\n" ) R2D2_args <- sargs( glue("zb{sp}"), glue("R2D2_phi{sp}"), glue("R2D2_tau2{p}") ) str_add(out$tpar_reg_prior) <- glue( " // compute actual regression coefficients\n", " b{suffix}{sp} = R2D2({R2D2_args});\n" ) str_add(out$prior) <- glue( "{tp}std_normal_{lpdf}(zb{sp});\n", "{tp}dirichlet_{lpdf}(R2D2_phi{sp} | R2D2_cons_D2{p});\n" ) } out } # combine unchecked priors for use in Stan # @param prior a brmsprior object # @return a single character string in Stan language stan_unchecked_prior <- function(prior) { stopifnot(is.brmsprior(prior)) if (all(nzchar(prior$class))) { return("") } prior <- subset2(prior, class = "") collapse(" ", prior$prior, ";\n") } # Stan code to sample separately from priors # @param prior character string taken from stan_prior # @param par_declars the parameters block of the Stan code # required to extract boundaries # @param gen_quantities Stan code from the generated quantities block # @param prior_special a list of values pertaining to special priors # such as horseshoe or lasso # @param sample_prior take draws from priors? stan_rngprior <- function(prior, par_declars, gen_quantities, prior_special, sample_prior = "yes") { if (!is_equal(sample_prior, "yes")) { return(list()) } prior <- strsplit(gsub(" |\\n", "", prior), ";")[[1]] # D will contain all relevant information about the priors D <- data.frame(prior = prior[nzchar(prior)]) pars_regex <- "(?<=(_lpdf\\())[^|]+" D$par <- get_matches(pars_regex, D$prior, perl = TRUE, first = TRUE) # 'std_normal' has no '|' and thus the above regex matches too much np <- !grepl("\\|", D$prior) np_regex <- ".+(?=\\)$)" D$par[np] <- get_matches(np_regex, D$par[np], perl = TRUE, first = TRUE) # 'to_vector' should be removed from the parameter names has_tv <- grepl("^to_vector\\(", D$par) tv_regex <- "(^to_vector\\()|(\\)(?=((\\[[[:digit:]]+\\])?)$))" D$par[has_tv] <- gsub(tv_regex, "", D$par[has_tv], perl = TRUE) # do not sample from some auxiliary parameters excl_regex <- c("z", "zs", "zb", "zgp", "Xn", "Y", "hs", "tmp") excl_regex <- paste0("(", excl_regex, ")", collapse = "|") excl_regex <- paste0("^(", excl_regex, ")(_|$)") D <- D[!grepl(excl_regex, D$par), ] if (!NROW(D)) return(list()) # rename parameters containing indices has_ind <- grepl("\\[[[:digit:]]+\\]", D$par) D$par[has_ind] <- ulapply(D$par[has_ind], function(par) { ind_regex <- "(?<=\\[)[[:digit:]]+(?=\\])" ind <- get_matches(ind_regex, par, perl = TRUE) gsub("\\[[[:digit:]]+\\]", paste0("_", ind), par) }) # cannot handle priors on variable transformations D <- D[D$par %in% stan_all_vars(D$par), ] if (!NROW(D)) return(list()) class_old <- c("^L_", "^Lrescor") class_new <- c("cor_", "rescor") D$par <- rename(D$par, class_old, class_new, fixed = FALSE) dis_regex <- "(?<=target\\+=)[^\\(]+(?=_lpdf\\()" D$dist <- get_matches(dis_regex, D$prior, perl = TRUE, first = TRUE) D$dist <- sub("corr_cholesky$", "corr", D$dist) args_regex <- "(?<=\\|)[^$\\|]+(?=\\)($|-))" D$args <- get_matches(args_regex, D$prior, perl = TRUE, first = TRUE) # 'std_normal_rng' does not exist in Stan has_std_normal <- D$dist == "std_normal" D$dist[has_std_normal] <- "normal" D$args[has_std_normal] <- "0,1" # extract information from the initial parameter definition par_declars <- unlist(strsplit(par_declars, "\n", fixed = TRUE)) par_declars <- gsub("^[[:blank:]]*", "", par_declars) par_declars <- par_declars[!grepl("^//", par_declars)] all_pars_regex <- "(?<= )[^[:blank:]]+(?=;)" all_pars <- get_matches(all_pars_regex, par_declars, perl = TRUE) all_pars <- rename(all_pars, class_old, class_new, fixed = FALSE) all_bounds <- get_matches("<.+>", par_declars, first = TRUE) all_types <- get_matches("^[^[:blank:]]+", par_declars) all_dims <- get_matches( "(?<=\\[)[^\\]]*", par_declars, first = TRUE, perl = TRUE ) # define parameter types and boundaries D$dim <- D$bounds <- "" D$type <- "real" for (i in seq_along(all_pars)) { k <- which(grepl(paste0("^", all_pars[i]), D$par)) D$dim[k] <- all_dims[i] D$bounds[k] <- all_bounds[i] if (grepl("^((simo_)|(theta)|(R2D2_phi))", all_pars[i])) { D$type[k] <- all_types[i] } } # exclude priors which depend on other priors # TODO: enable sampling from these priors as well found_vars <- lapply(D$args, find_vars, dot = FALSE, brackets = FALSE) contains_other_pars <- ulapply(found_vars, function(x) any(x %in% all_pars)) D <- D[!contains_other_pars, ] if (!NROW(D)) return(list()) out <- list() # sample priors in the generated quantities block D$lkj <- grepl("^lkj_corr$", D$dist) D$args <- paste0(ifelse(D$lkj, paste0(D$dim, ","), ""), D$args) D$lkj_index <- ifelse(D$lkj, "[1, 2]", "") D$prior_par <- glue("prior_{D$par}") str_add(out$gen_def) <- " // additionally sample draws from priors\n" str_add(out$gen_def) <- cglue( " {D$type} {D$prior_par} = {D$dist}_rng({D$args}){D$lkj_index};\n" ) # sample from truncated priors using rejection sampling D$lb <- stan_extract_bounds(D$bounds, bound = "lower") D$ub <- stan_extract_bounds(D$bounds, bound = "upper") Ibounds <- which(nzchar(D$bounds)) if (length(Ibounds)) { str_add(out$gen_comp) <- " // use rejection sampling for truncated priors\n" for (i in Ibounds) { wl <- if (nzchar(D$lb[i])) glue("{D$prior_par[i]} < {D$lb[i]}") wu <- if (nzchar(D$ub[i])) glue("{D$prior_par[i]} > {D$ub[i]}") prior_while <- paste0(c(wl, wu), collapse = " || ") str_add(out$gen_comp) <- glue( " while ({prior_while}) {{\n", " {D$prior_par[i]} = {D$dist[i]}_rng({D$args[i]}){D$lkj_index[i]};\n", " }}\n" ) } } out } # check if any constant priors are present # @param prior a vector of character strings stan_is_constant_prior <- function(prior) { grepl("^constant\\(", prior) } # extract Stan boundaries expression from a string stan_extract_bounds <- function(x, bound = c("lower", "upper")) { bound <- match.arg(bound) x <- rm_wsp(x) regex <- glue("(?<={bound}=)[^,>]*") get_matches(regex, x, perl = TRUE, first = TRUE) } # choose the right suffix for Stan probability densities stan_lpdf_name <- function(normalize, int = FALSE) { if (normalize) { out <- ifelse(int, "lpmf", "lpdf") } else { out <- ifelse(int, "lupmf", "lupdf") } out } brms/R/make_standata.R0000644000175000017500000002347414111751666014511 0ustar nileshnilesh#' Data for \pkg{brms} Models #' #' Generate data for \pkg{brms} models to be passed to \pkg{Stan} #' #' @inheritParams brm #' @param ... Other arguments for internal use. #' #' @return A named list of objects containing the required data #' to fit a \pkg{brms} model with \pkg{Stan}. #' #' @author Paul-Christian Buerkner \email{paul.buerkner@@gmail.com} #' #' @examples #' sdata1 <- make_standata(rating ~ treat + period + carry + (1|subject), #' data = inhaler, family = "cumulative") #' str(sdata1) #' #' sdata2 <- make_standata(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = "poisson") #' str(sdata2) #' #' @export make_standata <- function(formula, data, family = gaussian(), prior = NULL, autocor = NULL, data2 = NULL, cov_ranef = NULL, sample_prior = "no", stanvars = NULL, threads = NULL, knots = NULL, ...) { if (is.brmsfit(formula)) { stop2("Use 'standata' to extract Stan data from 'brmsfit' objects.") } formula <- validate_formula( formula, data = data, family = family, autocor = autocor, cov_ranef = cov_ranef ) bterms <- brmsterms(formula) data2 <- validate_data2( data2, bterms = bterms, get_data2_autocor(formula), get_data2_cov_ranef(formula) ) data <- validate_data( data, bterms = bterms, knots = knots, data2 = data2 ) prior <- .validate_prior( prior, bterms = bterms, data = data, sample_prior = sample_prior, require_nlpar_prior = FALSE ) stanvars <- validate_stanvars(stanvars) threads <- validate_threads(threads) .make_standata( bterms, data = data, prior = prior, data2 = data2, stanvars = stanvars, threads = threads, ... ) } # internal work function of 'make_stancode' # @param check_response check validity of the response? # @param only_response extract data related to the response only? # @param internal prepare Stan data for use in post-processing methods? # @param basis original Stan data as prepared by 'standata_basis' # @param ... currently ignored # @return names list of data passed to Stan .make_standata <- function(bterms, data, prior, stanvars, data2, threads = threading(), check_response = TRUE, only_response = FALSE, internal = FALSE, basis = NULL, ...) { check_response <- as_one_logical(check_response) only_response <- as_one_logical(only_response) internal <- as_one_logical(internal) # order data for use in autocorrelation models data <- order_data(data, bterms = bterms) out <- data_response( bterms, data, check_response = check_response, internal = internal, basis = basis ) if (!only_response) { ranef <- tidy_ranef(bterms, data, old_levels = basis$levels) meef <- tidy_meef(bterms, data, old_levels = basis$levels) index <- tidy_index(bterms, data) c(out) <- data_predictor( bterms, data = data, prior = prior, data2 = data2, ranef = ranef, index = index, basis = basis ) c(out) <- data_gr_global(ranef, data2 = data2) c(out) <- data_Xme(meef, data = data) } out$prior_only <- as.integer(is_prior_only(prior)) if (use_threading(threads)) { out$grainsize <- threads$grainsize if (is.null(out$grainsize)) { out$grainsize <- ceiling(out$N / (2 * threads$threads)) out$grainsize <- max(100, out$grainsize) } } if (is.stanvars(stanvars)) { stanvars <- subset_stanvars(stanvars, block = "data") inv_names <- intersect(names(stanvars), names(out)) if (length(inv_names)) { stop2("Cannot overwrite existing variables: ", collapse_comma(inv_names)) } out[names(stanvars)] <- lapply(stanvars, "[[", "sdata") } if (internal) { # allows to recover the original order of the data attr(out, "old_order") <- attr(data, "old_order") # ensures current grouping levels are known in post-processing ranef_new <- tidy_ranef(bterms, data) meef_new <- tidy_meef(bterms, data) attr(out, "levels") <- get_levels(ranef_new, meef_new) } structure(out, class = c("standata", "list")) } #' Extract data passed to Stan #' #' Extract all data that was used by Stan to fit the model. #' #' @aliases standata.brmsfit #' #' @param object An object of class \code{brmsfit}. #' @param ... More arguments passed to \code{\link{make_standata}} #' and \code{\link{validate_newdata}}. #' @inheritParams prepare_predictions #' #' @return A named list containing the data originally passed to Stan. #' #' @export standata.brmsfit <- function(object, newdata = NULL, re_formula = NULL, newdata2 = NULL, new_objects = NULL, incl_autocor = TRUE, ...) { object <- restructure(object) # allows functions to fall back to old default behavior # which was used when originally fitting the model options(.brmsfit_version = object$version$brms) on.exit(options(.brmsfit_version = NULL)) object <- exclude_terms(object, incl_autocor = incl_autocor) formula <- update_re_terms(object$formula, re_formula) bterms <- brmsterms(formula) newdata2 <- use_alias(newdata2, new_objects) data2 <- current_data2(object, newdata2) data <- current_data( object, newdata, newdata2 = data2, re_formula = re_formula, ... ) stanvars <- add_newdata_stanvars(object$stanvars, data2) basis <- NULL if (!is.null(newdata)) { # 'basis' contains information from original Stan data # required to correctly predict from new data basis <- standata_basis(bterms, data = object$data) } .make_standata( bterms, data = data, prior = object$prior, data2 = data2, stanvars = stanvars, threads = object$threads, basis = basis, ... ) } #' @rdname standata.brmsfit #' @export standata <- function(object, ...) { UseMethod("standata") } # prepare basis data required for correct predictions from new data standata_basis <- function(x, data, ...) { UseMethod("standata_basis") } #' @export standata_basis.default <- function(x, data, ...) { list() } #' @export standata_basis.mvbrmsterms <- function(x, data, ...) { out <- list() for (r in names(x$terms)) { out$resps[[r]] <- standata_basis(x$terms[[r]], data, ...) } out$levels <- get_levels(tidy_meef(x, data), tidy_ranef(x, data)) out } #' @export standata_basis.brmsterms <- function(x, data, ...) { out <- list() data <- subset_data(data, x) for (dp in names(x$dpars)) { out$dpars[[dp]] <- standata_basis(x$dpars[[dp]], data, ...) } for (nlp in names(x$nlpars)) { out$nlpars[[nlp]] <- standata_basis(x$nlpars[[nlp]], data, ...) } # old levels are required to select the right indices for new levels out$levels <- get_levels(tidy_meef(x, data), tidy_ranef(x, data)) if (has_trials(x$family)) { # trials should not be computed based on new data datr <- data_response(x, data, check_response = FALSE, internal = TRUE) # partially match via $ to be independent of the response suffix out$trials <- datr$trials } if (is_binary(x$family) || is_categorical(x$family)) { y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out$resp_levels <- levels(as.factor(y)) } out } #' @export standata_basis.btnl <- function(x, data, ...) { list() } #' @export standata_basis.btl <- function(x, data, ...) { out <- list() out$sm <- standata_basis_sm(x, data, ...) out$gp <- standata_basis_gp(x, data, ...) out$sp <- standata_basis_sp(x, data, ...) out$ac <- standata_basis_ac(x, data, ...) out$bhaz <- standata_basis_bhaz(x, data, ...) out } # prepare basis data related to smooth terms standata_basis_sm <- function(x, data, ...) { stopifnot(is.btl(x)) smterms <- all_terms(x[["sm"]]) out <- named_list(smterms) if (length(smterms)) { knots <- get_knots(data) data <- rm_attr(data, "terms") # the spline penalty has changed in 2.8.7 (#646) diagonal.penalty <- !require_old_default("2.8.7") gam_args <- list( data = data, knots = knots, absorb.cons = TRUE, modCon = 3, diagonal.penalty = diagonal.penalty ) for (i in seq_along(smterms)) { sc_args <- c(list(eval2(smterms[i])), gam_args) out[[i]] <- do_call(smoothCon, sc_args) } } out } # prepare basis data related to gaussian processes standata_basis_gp <- function(x, data, ...) { stopifnot(is.btl(x)) out <- data_gp(x, data, internal = TRUE) out <- out[grepl("^((Xgp)|(dmax)|(cmeans))", names(out))] out } # prepare basis data related to special terms standata_basis_sp <- function(x, data, ...) { stopifnot(is.btl(x)) out <- list() if (length(attr(x$sp, "uni_mo"))) { # do it like data_sp() spef <- tidy_spef(x, data) Xmo <- lapply(unlist(spef$calls_mo), get_mo_values, data = data) out$Jmo <- as.array(ulapply(Xmo, max)) } out } # prepare basis data related to autocorrelation structures standata_basis_ac <- function(x, data, ...) { out <- list() if (has_ac_class(x, "car")) { gr <- get_ac_vars(x, "gr", class = "car") stopifnot(length(gr) <= 1L) if (isTRUE(nzchar(gr))) { out$locations <- levels(factor(get(gr, data))) } else { out$locations <- NA } } out } # prepare basis data for baseline hazards of the cox model standata_basis_bhaz <- function(x, data, ...) { out <- list() if (is_cox(x$family)) { # compute basis matrix of the baseline hazard for the Cox model y <- model.response(model.frame(x$respform, data, na.action = na.pass)) out$basis_matrix <- bhaz_basis_matrix(y, args = x$family$bhaz) } out } brms/R/reloo.R0000644000175000017500000001356614136713710013031 0ustar nileshnilesh#' Compute exact cross-validation for problematic observations #' #' Compute exact cross-validation for problematic observations for which #' approximate leave-one-out cross-validation may return incorrect results. #' Models for problematic observations can be run in parallel using the #' \pkg{future} package. #' #' @inheritParams predict.brmsfit #' @param x An \R object of class \code{brmsfit} or \code{loo} depending #' on the method. #' @param loo An \R object of class \code{loo}. #' @param fit An \R object of class \code{brmsfit}. #' @param k_threshold The threshold at which Pareto \eqn{k} #' estimates are treated as problematic. Defaults to \code{0.7}. #' See \code{\link[loo:pareto-k-diagnostic]{pareto_k_ids}} #' for more details. #' @param check Logical; If \code{TRUE} (the default), some checks #' check are performed if the \code{loo} object was generated #' from the \code{brmsfit} object passed to argument \code{fit}. #' @param ... Further arguments passed to #' \code{\link{update.brmsfit}} and \code{\link{log_lik.brmsfit}}. #' #' @return An object of the class \code{loo}. #' #' @details #' Warnings about Pareto \eqn{k} estimates indicate observations #' for which the approximation to LOO is problematic (this is described in #' detail in Vehtari, Gelman, and Gabry (2017) and the #' \pkg{\link[loo:loo-package]{loo}} package documentation). #' If there are \eqn{J} observations with \eqn{k} estimates above #' \code{k_threshold}, then \code{reloo} will refit the original model #' \eqn{J} times, each time leaving out one of the \eqn{J} #' problematic observations. The pointwise contributions of these observations #' to the total ELPD are then computed directly and substituted for the #' previous estimates from these \eqn{J} observations that are stored in the #' original \code{loo} object. #' #' @seealso \code{\link{loo}}, \code{\link{kfold}} #' #' @examples #' \dontrun{ #' fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient), #' data = epilepsy, family = poisson()) #' # throws warning about some pareto k estimates being too high #' (loo1 <- loo(fit1)) #' (reloo1 <- reloo(fit1, loo = loo1, chains = 1)) #' } #' #' @export reloo.brmsfit <- function(x, loo, k_threshold = 0.7, newdata = NULL, resp = NULL, check = TRUE, ...) { stopifnot(is.loo(loo), is.brmsfit(x)) if (is.brmsfit_multiple(x)) { warn_brmsfit_multiple(x) class(x) <- "brmsfit" } if (is.null(newdata)) { mf <- model.frame(x) } else { mf <- as.data.frame(newdata) } mf <- rm_attr(mf, c("terms", "brmsframe")) if (NROW(mf) != NROW(loo$pointwise)) { stop2("Number of observations in 'loo' and 'x' do not match.") } check <- as_one_logical(check) if (check) { yhash_loo <- attr(loo, "yhash") yhash_fit <- hash_response(x, newdata = newdata) if (!is_equal(yhash_loo, yhash_fit)) { stop2( "Response values used in 'loo' and 'x' do not match. ", "If this is a false positive, please set 'check' to FALSE." ) } } if (is.null(loo$diagnostics$pareto_k)) { stop2("No Pareto k estimates found in the 'loo' object.") } obs <- loo::pareto_k_ids(loo, k_threshold) J <- length(obs) if (J == 0L) { message( "No problematic observations found. ", "Returning the original 'loo' object." ) return(loo) } # split dots for use in log_lik and update dots <- list(...) ll_arg_names <- arg_names("log_lik") ll_arg_names <- intersect(names(dots), ll_arg_names) ll_args <- dots[ll_arg_names] ll_args$allow_new_levels <- TRUE ll_args$resp <- resp ll_args$combine <- TRUE # cores is used in both log_lik and update up_arg_names <- setdiff(names(dots), setdiff(ll_arg_names, "cores")) up_args <- dots[up_arg_names] up_args$refresh <- 0 .reloo <- function(j) { omitted <- obs[j] mf_omitted <- mf[-omitted, , drop = FALSE] fit_j <- x up_args$object <- fit_j up_args$newdata <- mf_omitted up_args$data2 <- subset_data2(x$data2, -omitted) fit_j <- SW(do_call(update, up_args)) ll_args$object <- fit_j ll_args$newdata <- mf[omitted, , drop = FALSE] ll_args$newdata2 <- subset_data2(x$data2, omitted) return(do_call(log_lik, ll_args)) } lls <- futures <- vector("list", J) message( J, " problematic observation(s) found.", "\nThe model will be refit ", J, " times." ) x <- recompile_model(x) for (j in seq_len(J)) { message( "\nFitting model ", j, " out of ", J, " (leaving out observation ", obs[j], ")" ) futures[[j]] <- future::future( .reloo(j), packages = "brms", seed = TRUE ) } for (j in seq_len(J)) { lls[[j]] <- future::value(futures[[j]]) } # most of the following code is taken from rstanarm:::reloo # compute elpd_{loo,j} for each of the held out observations elpd_loo <- ulapply(lls, log_mean_exp) # compute \hat{lpd}_j for each of the held out observations (using log-lik # matrix from full posterior, not the leave-one-out posteriors) mf_obs <- mf[obs, , drop = FALSE] data2_obs <- subset_data2(x$data2, obs) ll_x <- log_lik(x, newdata = mf_obs, newdata2 = data2_obs) hat_lpd <- apply(ll_x, 2, log_mean_exp) # compute effective number of parameters p_loo <- hat_lpd - elpd_loo # replace parts of the loo object with these computed quantities sel <- c("elpd_loo", "p_loo", "looic") loo$pointwise[obs, sel] <- cbind(elpd_loo, p_loo, -2 * elpd_loo) new_pw <- loo$pointwise[, sel, drop = FALSE] loo$estimates[, 1] <- colSums(new_pw) loo$estimates[, 2] <- sqrt(nrow(loo$pointwise) * apply(new_pw, 2, var)) # what should we do about pareto-k? for now setting them to 0 loo$diagnostics$pareto_k[obs] <- 0 loo } #' @rdname reloo.brmsfit #' @export reloo.loo <- function(x, fit, ...) { reloo(fit, loo = x, ...) } # the generic will eventually be moved to 'loo' #' @rdname reloo.brmsfit #' @export reloo <- function(x, ...) { UseMethod("reloo") } brms/R/autocor.R0000644000175000017500000004631214105230573013355 0ustar nileshnilesh# All functions in this file belong to the deprecated 'cor_brms' class # for specifying autocorrelation structures. They will be removed in brms 3. #' (Deprecated) Correlation structure classes for the \pkg{brms} package #' #' Classes of correlation structures available in the \pkg{brms} package. #' \code{cor_brms} is not a correlation structure itself, #' but the class common to all correlation structures implemented in \pkg{brms}. #' #' @name cor_brms #' @aliases cor_brms-class #' #' @section Available correlation structures: #' \describe{ #' \item{cor_arma}{autoregressive-moving average (ARMA) structure, #' with arbitrary orders for the autoregressive and moving #' average components} #' \item{cor_ar}{autoregressive (AR) structure of arbitrary order} #' \item{cor_ma}{moving average (MA) structure of arbitrary order} #' \item{cor_car}{Spatial conditional autoregressive (CAR) structure} #' \item{cor_sar}{Spatial simultaneous autoregressive (SAR) structure} #' \item{cor_fixed}{fixed user-defined covariance structure} #' } #' #' @seealso #' \code{\link{cor_arma}, \link{cor_ar}, \link{cor_ma}, #' \link{cor_car}, \link{cor_sar}, \link{cor_fixed}} #' NULL #' (Deprecated) ARMA(p,q) correlation structure #' #' This function is deprecated. Please see \code{\link{arma}} for the new syntax. #' This functions is a constructor for the \code{cor_arma} class, representing #' an autoregression-moving average correlation structure of order (p, q). #' #' @aliases cor_arma-class #' #' @param formula A one sided formula of the form \code{~ t}, or \code{~ t | g}, #' specifying a time covariate \code{t} and, optionally, a grouping factor #' \code{g}. A covariate for this correlation structure must be integer #' valued. When a grouping factor is present in \code{formula}, the #' correlation structure is assumed to apply only to observations within the #' same grouping level; observations with different grouping levels are #' assumed to be uncorrelated. Defaults to \code{~ 1}, which corresponds to #' using the order of the observations in the data as a covariate, and no #' groups. #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is 0. #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is 0. #' @param r No longer supported. #' @param cov A flag indicating whether ARMA effects should be estimated by #' means of residual covariance matrices. This is currently only possible for #' stationary ARMA effects of order 1. If the model family does not have #' natural residuals, latent residuals are added automatically. If #' \code{FALSE} (the default) a regression formulation is used that is #' considerably faster and allows for ARMA effects of order higher than 1 but #' is only available for \code{gaussian} models and some of its #' generalizations. #' #' @return An object of class \code{cor_arma}, representing an #' autoregression-moving-average correlation structure. #' #' @seealso \code{\link{cor_ar}}, \code{\link{cor_ma}} #' #' @examples #' cor_arma(~ visit | patient, p = 2, q = 2) #' #' @export cor_arma <- function(formula = ~1, p = 0, q = 0, r = 0, cov = FALSE) { formula <- as.formula(formula) p <- as_one_numeric(p) q <- as_one_numeric(q) cov <- as_one_logical(cov) if ("r" %in% names(match.call())) { warning2("The ARR structure is no longer supported and ignored.") } if (!(p >= 0 && p == round(p))) { stop2("Autoregressive order must be a non-negative integer.") } if (!(q >= 0 && q == round(q))) { stop2("Moving-average order must be a non-negative integer.") } if (!sum(p, q)) { stop2("At least one of 'p' and 'q' should be greater zero.") } if (cov && (p > 1 || q > 1)) { stop2("Covariance formulation of ARMA structures is ", "only possible for effects of maximal order one.") } x <- nlist(formula, p, q, cov) class(x) <- c("cor_arma", "cor_brms") x } #' (Deprecated) AR(p) correlation structure #' #' This function is deprecated. Please see \code{\link{ar}} for the new syntax. #' This function is a constructor for the \code{cor_arma} class, #' allowing for autoregression terms only. #' #' @inheritParams cor_arma #' @param p A non-negative integer specifying the autoregressive (AR) #' order of the ARMA structure. Default is 1. #' #' @return An object of class \code{cor_arma} containing solely autoregression terms. #' #' @details AR refers to autoregressive effects of residuals, which #' is what is typically understood as autoregressive effects. #' However, one may also model autoregressive effects of the response #' variable, which is called ARR in \pkg{brms}. #' #' @seealso \code{\link{cor_arma}} #' #' @examples #' cor_ar(~visit|patient, p = 2) #' #' @export cor_ar <- function(formula = ~1, p = 1, cov = FALSE) { cor_arma(formula = formula, p = p, q = 0, cov = cov) } #' (Deprecated) MA(q) correlation structure #' #' This function is deprecated. Please see \code{\link{ma}} for the new syntax. #' This function is a constructor for the \code{cor_arma} class, #' allowing for moving average terms only. #' #' @inheritParams cor_arma #' @param q A non-negative integer specifying the moving average (MA) #' order of the ARMA structure. Default is 1. #' #' @return An object of class \code{cor_arma} containing solely moving #' average terms. #' #' @seealso \code{\link{cor_arma}} #' #' @examples #' cor_ma(~visit|patient, q = 2) #' #' @export cor_ma <- function(formula = ~1, q = 1, cov = FALSE) { cor_arma(formula = formula, p = 0, q = q, cov = cov) } #' (Defunct) ARR correlation structure #' #' The ARR correlation structure is no longer supported. #' #' @inheritParams cor_arma #' #' @keywords internal #' @export cor_arr <- function(formula = ~1, r = 1) { cor_arma(formula = formula, p = 0, q = 0, r = r) } #' (Deprecated) Compound Symmetry (COSY) Correlation Structure #' #' This function is deprecated. Please see \code{\link{cosy}} for the new syntax. #' This functions is a constructor for the \code{cor_cosy} class, representing #' a compound symmetry structure corresponding to uniform correlation. #' #' @aliases cor_cosy-class #' #' @inheritParams cor_arma #' #' @return An object of class \code{cor_cosy}, representing a compound symmetry #' correlation structure. #' #' @examples #' cor_cosy(~ visit | patient) #' #' @export cor_cosy <- function(formula = ~1) { formula <- as.formula(formula) x <- nlist(formula) class(x) <- c("cor_cosy", "cor_brms") x } #' (Deprecated) Spatial simultaneous autoregressive (SAR) structures #' #' Thse functions are deprecated. Please see \code{\link{sar}} for the new #' syntax. These functions are constructors for the \code{cor_sar} class #' implementing spatial simultaneous autoregressive structures. #' The \code{lagsar} structure implements SAR of the response values: #' \deqn{y = \rho W y + \eta + e} #' The \code{errorsar} structure implements SAR of the residuals: #' \deqn{y = \eta + u, u = \rho W u + e} #' In the above equations, \eqn{\eta} is the predictor term and #' \eqn{e} are independent normally or t-distributed residuals. #' #' @param W An object specifying the spatial weighting matrix. #' Can be either the spatial weight matrix itself or an #' object of class \code{listw} or \code{nb}, from which #' the spatial weighting matrix can be computed. #' @param type Type of the SAR structure. Either \code{"lag"} #' (for SAR of the response values) or \code{"error"} #' (for SAR of the residuals). #' #' @details Currently, only families \code{gaussian} and \code{student} #' support SAR structures. #' #' @return An object of class \code{cor_sar} to be used in calls to #' \code{\link{brm}}. #' #' @examples #' \dontrun{ #' data(oldcol, package = "spdep") #' fit1 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, #' autocor = cor_lagsar(COL.nb), #' chains = 2, cores = 2) #' summary(fit1) #' plot(fit1) #' #' fit2 <- brm(CRIME ~ INC + HOVAL, data = COL.OLD, #' autocor = cor_errorsar(COL.nb), #' chains = 2, cores = 2) #' summary(fit2) #' plot(fit2) #' } #' #' @export cor_sar <- function(W, type = c("lag", "error")) { type <- match.arg(type) W_name <- deparse(substitute(W)) W <- validate_sar_matrix(W) structure( nlist(W, W_name, type), class = c("cor_sar", "cor_brms") ) } #' @rdname cor_sar #' @export cor_lagsar <- function(W) { out <- cor_sar(W, type = "lag") out$W_name <- deparse(substitute(W)) out } #' @rdname cor_sar #' @export cor_errorsar <- function(W) { out <- cor_sar(W, type = "error") out$W_name <- deparse(substitute(W)) out } #' (Deprecated) Spatial conditional autoregressive (CAR) structures #' #' These function are deprecated. Please see \code{\link{car}} for the new #' syntax. These functions are constructors for the \code{cor_car} class #' implementing spatial conditional autoregressive structures. #' #' @param W Adjacency matrix of locations. #' All non-zero entries are treated as if the two locations #' are adjacent. If \code{formula} contains a grouping factor, #' the row names of \code{W} have to match the levels #' of the grouping factor. #' @param formula An optional one-sided formula of the form #' \code{~ 1 | g}, where \code{g} is a grouping factor mapping #' observations to spatial locations. If not specified, #' each observation is treated as a separate location. #' It is recommended to always specify a grouping factor #' to allow for handling of new data in post-processing methods. #' @param type Type of the CAR structure. Currently implemented #' are \code{"escar"} (exact sparse CAR), \code{"esicar"} #' (exact sparse intrinsic CAR), \code{"icar"} (intrinsic CAR), #' and \code{"bym2"}. More information is provided in the 'Details' section. #' #' @details The \code{escar} and \code{esicar} types are #' implemented based on the case study of Max Joseph #' (\url{https://github.com/mbjoseph/CARstan}). The \code{icar} and #' \code{bym2} type is implemented based on the case study of Mitzi Morris #' (\url{https://mc-stan.org/users/documentation/case-studies/icar_stan.html}). #' #' @examples #' \dontrun{ #' # generate some spatial data #' east <- north <- 1:10 #' Grid <- expand.grid(east, north) #' K <- nrow(Grid) #' #' # set up distance and neighbourhood matrices #' distance <- as.matrix(dist(Grid)) #' W <- array(0, c(K, K)) #' W[distance == 1] <- 1 #' #' # generate the covariates and response data #' x1 <- rnorm(K) #' x2 <- rnorm(K) #' theta <- rnorm(K, sd = 0.05) #' phi <- rmulti_normal( #' 1, mu = rep(0, K), Sigma = 0.4 * exp(-0.1 * distance) #' ) #' eta <- x1 + x2 + phi #' prob <- exp(eta) / (1 + exp(eta)) #' size <- rep(50, K) #' y <- rbinom(n = K, size = size, prob = prob) #' dat <- data.frame(y, size, x1, x2) #' #' # fit a CAR model #' fit <- brm(y | trials(size) ~ x1 + x2, data = dat, #' family = binomial(), autocor = cor_car(W)) #' summary(fit) #' } #' #' @export cor_car <- function(W, formula = ~1, type = "escar") { options <- c("escar", "esicar", "icar", "bym2") type <- match.arg(type, options) W_name <- deparse(substitute(W)) W <- validate_car_matrix(W) formula <- as.formula(formula) if (!is.null(lhs(formula))) { stop2("'formula' should be a one-sided formula.") } if (length(attr(terms(formula), "term.labels")) > 1L) { stop2("'formula' should not contain more than one term.") } structure( nlist(W, W_name, formula, type), class = c("cor_car", "cor_brms") ) } #' @rdname cor_car #' @export cor_icar <- function(W, formula = ~1) { out <- cor_car(W, formula, type = "icar") out$W_name <- deparse(substitute(W)) out } #' (Deprecated) Fixed user-defined covariance matrices #' #' This function is deprecated. Please see \code{\link{fcor}} for the new #' syntax. Define a fixed covariance matrix of the response variable for #' instance to model multivariate effect sizes in meta-analysis. #' #' @aliases cov_fixed #' #' @param V Known covariance matrix of the response variable. #' If a vector is passed, it will be used as diagonal entries #' (variances) and covariances will be set to zero. #' #' @return An object of class \code{cor_fixed}. #' #' @examples #' \dontrun{ #' dat <- data.frame(y = rnorm(3)) #' V <- cbind(c(0.5, 0.3, 0.2), c(0.3, 1, 0.1), c(0.2, 0.1, 0.2)) #' fit <- brm(y~1, data = dat, autocor = cor_fixed(V)) #' } #' #' @export cor_fixed <- function(V) { V_name <- deparse(substitute(V)) if (is.vector(V)) { V <- diag(V) } else { V <- as.matrix(V) } if (!isSymmetric(unname(V))) { stop2("'V' must be symmetric") } structure(nlist(V, V_name), class = c("cor_fixed", "cor_brms")) } #' (Defunct) Basic Bayesian Structural Time Series #' #' The BSTS correlation structure is no longer supported. #' #' @inheritParams cor_arma #' #' @keywords internal #' @export cor_bsts <- function(formula = ~1) { stop2("The BSTS structure is no longer supported.") } #' Check if argument is a correlation structure #' #' Check if argument is one of the correlation structures #' used in \pkg{brms}. #' #' @param x An \R object. #' #' @export is.cor_brms <- function(x) { inherits(x, "cor_brms") } #' @rdname is.cor_brms #' @export is.cor_arma <- function(x) { inherits(x, "cor_arma") } #' @rdname is.cor_brms #' @export is.cor_cosy <- function(x) { inherits(x, "cor_cosy") } #' @rdname is.cor_brms #' @export is.cor_sar <- function(x) { inherits(x, "cor_sar") } #' @rdname is.cor_brms #' @export is.cor_car <- function(x) { inherits(x, "cor_car") } #' @rdname is.cor_brms #' @export is.cor_fixed <- function(x) { inherits(x, "cor_fixed") } #' @export print.cor_empty <- function(x, ...) { cat("empty()\n") } #' @export print.cor_arma <- function(x, ...) { cat(paste0("arma(", formula2str(x$formula), ", ", x$p, ", ", x$q, ")\n")) invisible(x) } #' @export print.cor_cosy <- function(x, ...) { cat(paste0("cosy(", formula2str(x$formula), ")\n")) invisible(x) } #' @export print.cor_sar <- function(x, ...) { cat(paste0("sar(", x$W_name, ", '", x$type, "')\n")) invisible(x) } #' @export print.cor_car <- function(x, ...) { form <- formula2str(x$formula) cat(paste0("car(", x$W_name, ", ", form, ", '", x$type, "')\n")) invisible(x) } #' @export print.cor_fixed <- function(x, ...) { cat("Fixed covariance matrix: \n") print(x$V) invisible(x) } #' @export print.cov_fixed <- function(x, ...) { class(x) <- "cor_fixed" print.cor_fixed(x) } stop_not_cor_brms <- function(x) { if (!(is.null(x) || is.cor_brms(x))) { stop2("Argument 'autocor' must be of class 'cor_brms'.") } TRUE } # empty 'cor_brms' object cor_empty <- function() { structure(list(), class = c("cor_empty", "cor_brms")) } is.cor_empty <- function(x) { inherits(x, "cor_empty") } #' (Deprecated) Extract Autocorrelation Objects #' #' @inheritParams posterior_predict.brmsfit #' @param ... Currently unused. #' #' @return A \code{cor_brms} object or a list of such objects for multivariate #' models. Not supported for models fitted with brms 2.11.1 or higher. #' #' @export autocor.brmsfit <- function(object, resp = NULL, ...) { warning2("Method 'autocor' is deprecated and will be removed in the future.") object <- restructure(object) resp <- validate_resp(resp, object) if (!is.null(resp)) { # multivariate model autocor <- object$autocor[resp] if (length(resp) == 1L) { autocor <- autocor[[1]] } } else { # univariate model autocor <- object$autocor } autocor } #' @rdname autocor.brmsfit #' @export autocor <- function(object, ...) { UseMethod("autocor") } # extract variables for autocorrelation structures # @param autocor object of class 'cor_brms' # @return a list with elements 'time', and 'group' terms_autocor <- function(autocor) { out <- list() formula <- autocor$formula if (is.null(formula)) { formula <- ~1 } if (!is.null(lhs(formula))) { stop2("Autocorrelation formulas must be one-sided.") } formula <- formula2str(formula) time <- as.formula(paste("~", gsub("~|\\|[[:print:]]*", "", formula))) time_vars <- all_vars(time) if (is.cor_car(autocor) && length(time_vars) > 0L) { stop2("The CAR structure should not contain a 'time' variable.") } if (length(time_vars) > 1L) { stop2("Autocorrelation structures may only contain 1 time variable.") } if (length(time_vars)) { out$time <- time_vars } else { out$time <- NA } group <- sub("^\\|*", "", sub("~[^\\|]*", "", formula)) stopif_illegal_group(group) group_vars <- all_vars(group) if (length(group_vars)) { out$group <- paste0(group_vars, collapse = ":") } else { out$group <- NA } out } # transform a 'cor_brms' object into a formula # this ensure compatibility with brms <= 2.11 as_formula_cor_brms <- function(x) { stop_not_cor_brms(x) if (is.cor_empty(x)) { return(NULL) } args <- data2 <- list() pac <- terms_autocor(x) if (is.cor_arma(x)) { fun <- "arma" args$time <- pac$time args$gr <- pac$group args$p <- x$p args$q <- x$q args$cov <- x$cov out <- paste0(names(args), " = ", args, collapse = ", ") out <- paste0("arma(", out, ")") } else if (is.cor_cosy(x)) { fun <- "cosy" args$time <- pac$time args$gr <- pac$group } else if (is.cor_sar(x)) { fun <- "sar" args$M <- make_M_names(x$W_name) args$type <- paste0("'", x$type, "'") data2[[args$M]] <- x$W } else if (is.cor_car(x)) { fun <- "car" args$M <- make_M_names(x$W_name) args$gr <- pac$group args$type <- paste0("'", x$type, "'") data2[[args$M]] <- x$W } else if (is.cor_fixed(x)) { fun <- "fcor" args$M <- make_M_names(x$V_name) data2[[args$M]] <- x$V } out <- paste0(names(args), " = ", args, collapse = ", ") out <- paste0(fun, "(", out, ")") out <- str2formula(out) attr(out, "data2") <- data2 class(out) <- c("cor_brms_formula", "formula") out } # ensures covariance matrix inputs are named reasonably make_M_names <- function(x) { out <- make.names(x) if (!length(out)) { # likely unique random name for the matrix argument out <- paste0("M", collapse(sample(0:9, 5, TRUE))) } out } # get data objects from 'autocor' for use in 'data2' # for backwards compatibility with brms <= 2.11 get_data2_autocor <- function(x, ...) { UseMethod("get_data2_autocor") } #' @export get_data2_autocor.brmsformula <- function(x, ...) { attr(attr(x$formula, "autocor"), "data2") } #' @export get_data2_autocor.mvbrmsformula <- function(x, ...) { ulapply(x$forms, get_data2_autocor, recursive = FALSE) } #' @export print.cor_brms_formula <- function(x, ...) { y <- x attr(y, "data2") <- NULL class(y) <- "formula" print(y) invisible(x) } brms/R/brmsfit-helpers.R0000644000175000017500000007414314136563414015021 0ustar nileshnileshcontains_draws <- function(x) { if (!(is.brmsfit(x) && length(x$fit@sim))) { stop2("The model does not contain posterior draws.") } invisible(TRUE) } is_mv <- function(x) { stopifnot(is.brmsfit(x)) is.mvbrmsformula(x$formula) } stopifnot_resp <- function(x, resp = NULL) { if (is_mv(x) && length(resp) != 1L) { stop2("Argument 'resp' must be a single variable name ", "when applying this method to a multivariate model.") } invisible(NULL) } # apply a link function # @param x an array of arbitrary dimension # @param link character string defining the link link <- function(x, link) { switch(link, "identity" = x, "log" = log(x), "logm1" = logm1(x), "log1p" = log1p(x), "inverse" = 1 / x, "sqrt" = sqrt(x), "1/mu^2" = 1 / x^2, "tan_half" = tan(x / 2), "logit" = logit(x), "probit" = qnorm(x), "cauchit" = qcauchy(x), "cloglog" = cloglog(x), "probit_approx" = qnorm(x), "softplus" = log_expm1(x), "squareplus" = (x^2 - 1) / x, stop2("Link '", link, "' not supported.") ) } # apply an inverse link function # @param x an array of arbitrary dimension # @param link a character string defining the link ilink <- function(x, link) { switch(link, "identity" = x, "log" = exp(x), "logm1" = expp1(x), "log1p" = expm1(x), "inverse" = 1 / x, "sqrt" = x^2, "1/mu^2" = 1 / sqrt(x), "tan_half" = 2 * atan(x), "logit" = inv_logit(x), "probit" = pnorm(x), "cauchit" = pcauchy(x), "cloglog" = inv_cloglog(x), "probit_approx" = pnorm(x), "softplus" = log1p_exp(x), "squareplus" = (x + sqrt(x^2 + 4)) / 2, stop2("Link '", link, "' not supported.") ) } # validate integers indicating which draws to subset validate_draw_ids <- function(x, draw_ids = NULL, ndraws = NULL) { ndraws_total <- ndraws(x) if (is.null(draw_ids) && !is.null(ndraws)) { ndraws <- as_one_integer(ndraws) draw_ids <- sample(seq_len(ndraws_total), ndraws) } if (!is.null(draw_ids)) { draw_ids <- as.integer(draw_ids) if (any(draw_ids < 1L) || any(draw_ids > ndraws_total)) { stop2("Some 'draw_ids' indices are out of range.") } } draw_ids } # get correlation names as combinations of variable names # @param names the variable names # @param type character string to be put in front of the returned strings # @param brackets should the correlation names contain brackets # or underscores as seperators? # @param sep character string to separate names; only used if !brackets # @return a vector of character strings get_cornames <- function(names, type = "cor", brackets = TRUE, sep = "__") { cornames <- NULL if (length(names) > 1) { for (i in seq_along(names)[-1]) { for (j in seq_len(i - 1)) { if (brackets) { c(cornames) <- paste0(type, "(", names[j], "," , names[i], ")") } else { c(cornames) <- paste0(type, sep, names[j], sep, names[i]) } } } } cornames } # extract names of categorical variables in the model get_cat_vars <- function(x) { stopifnot(is.brmsfit(x)) like_factor <- sapply(model.frame(x), is_like_factor) valid_groups <- c( names(model.frame(x))[like_factor], get_group_vars(x) ) unique(valid_groups[nzchar(valid_groups)]) } # covariance matrices based on correlation and SD draws # @param sd matrix of draws of standard deviations # @param cor matrix of draws of correlations get_cov_matrix <- function(sd, cor = NULL) { sd <- as.matrix(sd) stopifnot(all(sd >= 0)) ndraws <- nrow(sd) size <- ncol(sd) out <- array(diag(1, size), dim = c(size, size, ndraws)) out <- aperm(out, perm = c(3, 1, 2)) for (i in seq_len(size)) { out[, i, i] <- sd[, i]^2 } if (length(cor)) { cor <- as.matrix(cor) stopifnot(nrow(sd) == nrow(cor)) stopifnot(min(cor) >= -1, max(cor) <= 1) stopifnot(ncol(cor) == size * (size - 1) / 2) k <- 0 for (i in seq_len(size)[-1]) { for (j in seq_len(i - 1)) { k = k + 1 out[, j, i] <- out[, i, j] <- cor[, k] * sd[, i] * sd[, j] } } } out } # correlation matrices based on correlation draws # @param cor draws of correlations # @param size optional size of the desired correlation matrix; # ignored is 'cor' is specified # @param ndraws optional number of posterior draws; # ignored is 'cor' is specified get_cor_matrix <- function(cor, size = NULL, ndraws = NULL) { if (length(cor)) { cor <- as.matrix(cor) size <- -1 / 2 + sqrt(1 / 4 + 2 * ncol(cor)) + 1 ndraws <- nrow(cor) } size <- as_one_numeric(size) ndraws <- as_one_numeric(ndraws) stopifnot(is_wholenumber(size) && size > 0) stopifnot(is_wholenumber(ndraws) && ndraws > 0) out <- array(diag(1, size), dim = c(size, size, ndraws)) out <- aperm(out, perm = c(3, 1, 2)) if (length(cor)) { k <- 0 for (i in seq_len(size)[-1]) { for (j in seq_len(i - 1)) { k = k + 1 out[, j, i] <- out[, i, j] <- cor[, k] } } } out } # compute covariance matrices of autocor structures # @param prep a brmsprep object # @param obs observations for which to compute the covariance matrix # @param latent compute covariance matrix for latent residuals? get_cov_matrix_ac <- function(prep, obs = NULL, latent = FALSE) { if (is.null(obs)) { obs <- seq_len(prep$nobs) } nobs <- length(obs) ndraws <- prep$ndraws acef <- prep$ac$acef # prepare correlations if (has_ac_class(acef, "arma")) { ar <- as.numeric(prep$ac$ar) ma <- as.numeric(prep$ac$ma) if (length(ar) && !length(ma)) { cor <- get_cor_matrix_ar1(ar, nobs) } else if (!length(ar) && length(ma)) { cor <- get_cor_matrix_ma1(ma, nobs) } else if (length(ar) && length(ma)) { cor <- get_cor_matrix_arma1(ar, ma, nobs) } else { stop2("Neither 'ar' nor 'ma' were supplied. Please report a bug.") } } else if (has_ac_class(acef, "cosy")) { cosy <- as.numeric(prep$ac$cosy) cor <- get_cor_matrix_cosy(cosy, nobs) } else if (has_ac_class(acef, "fcor")) { cor <- get_cor_matrix_fcor(prep$ac$Mfcor, ndraws) } else { cor <- get_cor_matrix_ident(ndraws, nobs) } # prepare known standard errors if (!is.null(prep$data$se)) { se2 <- prep$data$se[obs]^2 se2 <- array(diag(se2, nobs), dim = c(nobs, nobs, ndraws)) se2 <- aperm(se2, perm = c(3, 1, 2)) # make sure not to add 'se' twice prep$data$se <- NULL } else { se2 <- rep(0, nobs) } # prepare residual standard deviations if (latent) { sigma2 <- as.numeric(prep$ac$sderr)^2 } else { sigma <- get_dpar(prep, "sigma", i = obs) if (NCOL(sigma) > 1L) { # sigma varies across observations sigma2 <- array(dim = c(ndraws, nobs, nobs)) for (s in seq_rows(sigma2)) { sigma2[s, , ] <- outer(sigma[s, ], sigma[s, ]) } } else { sigma2 <- as.numeric(sigma)^2 } } sigma2 * cor + se2 } # compute AR1 correlation matrices # @param ar AR1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ar1 <- function(ar, nobs) { out <- array(0, dim = c(NROW(ar), nobs, nobs)) fac <- 1 / (1 - ar^2) pow_ar <- as.list(rep(1, nobs + 1)) for (i in seq_len(nobs)) { pow_ar[[i + 1]] <- ar^i out[, i, i] <- fac for (j in seq_len(i - 1)) { out[, i, j] <- fac * pow_ar[[i - j + 1]] out[, j, i] <- out[, i, j] } } out } # compute MA1 correlation matrices # @param ma MA1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ma1 <- function(ma, nobs) { out <- array(0, dim = c(NROW(ma), nobs, nobs)) gamma0 <- 1 + ma^2 for (i in seq_len(nobs)) { out[, i, i] <- gamma0 if (i > 1) { out[, i, i - 1] <- ma } if (i < nobs) { out[, i, i + 1] <- ma } } out } # compute ARMA1 correlation matrices # @param ar AR1 autocorrelation draws # @param ma MA1 autocorrelation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_arma1 <- function(ar, ma, nobs) { out <- array(0, dim = c(NROW(ar), nobs, nobs)) fac <- 1 / (1 - ar^2) gamma0 <- 1 + ma^2 + 2 * ar * ma gamma <- as.list(rep(NA, nobs)) gamma[[1]] <- (1 + ar * ma) * (ar + ma) for (i in seq_len(nobs)) { out[, i, i] <- fac * gamma0 gamma[[i]] <- gamma[[1]] * ar^(i - 1) for (j in seq_len(i - 1)) { out[, i, j] <- fac * gamma[[i - j]] out[, j, i] <- out[, i, j] } } out } # compute compound symmetry correlation matrices # @param cosy compund symmetry correlation draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_cosy <- function(cosy, nobs) { out <- array(0, dim = c(NROW(cosy), nobs, nobs)) for (i in seq_len(nobs)) { out[, i, i] <- 1 for (j in seq_len(i - 1)) { out[, i, j] <- cosy out[, j, i] <- out[, i, j] } } out } # prepare a fixed correlation matrix # @param Mfcor correlation matrix to be prepared # @param ndraws number of posterior draws # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_fcor <- function(Mfcor, ndraws) { out <- array(Mfcor, dim = c(dim(Mfcor), ndraws)) aperm(out, c(3, 1, 2)) } # compute an identity correlation matrix # @param ndraws number of posterior draws # @param nobs number of rows of the covariance matrix # @return a numeric 'ndraws' x 'nobs' x 'nobs' array get_cor_matrix_ident <- function(ndraws, nobs) { out <- array(0, dim = c(ndraws, nobs, nobs)) for (i in seq_len(nobs)) { out[, i, i] <- 1 } out } #' Draws of a Distributional Parameter #' #' Get draws of a distributional parameter from a \code{brmsprep} or #' \code{mvbrmsprep} object. This function is primarily useful when developing #' custom families or packages depending on \pkg{brms}. #' This function lets callers easily handle both the case when the #' distributional parameter is predicted directly, via a (non-)linear #' predictor or fixed to a constant. See the vignette #' \code{vignette("brms_customfamilies")} for an example use case. #' #' @param prep A 'brmsprep' or 'mvbrmsprep' object created by #' \code{\link[brms:prepare_predictions.brmsfit]{prepare_predictions}}. #' @param dpar Name of the distributional parameter. #' @param i The observation numbers for which predictions shall be extracted. #' If \code{NULL} (the default), all observation will be extracted. #' Ignored if \code{dpar} is not predicted. #' @param ilink Should the inverse link function be applied? #' If \code{NULL} (the default), the value is chosen internally. #' In particular, \code{ilink} is \code{TRUE} by default for custom #' families. #' @return #' If the parameter is predicted and \code{i} is \code{NULL} or #' \code{length(i) > 1}, an \code{S x N} matrix. If the parameter it not #' predicted or \code{length(i) == 1}, a vector of length \code{S}. Here #' \code{S} is the number of draws and \code{N} is the number of #' observations or length of \code{i} if specified. #' #' @examples #' \dontrun{ #' posterior_predict_my_dist <- function(i, prep, ...) { #' mu <- brms::get_dpar(prep, "mu", i = i) #' mypar <- brms::get_dpar(prep, "mypar", i = i) #' my_rng(mu, mypar) #' } #' } #' #' @export get_dpar <- function(prep, dpar, i = NULL, ilink = NULL) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) dpar <- as_one_character(dpar) x <- prep$dpars[[dpar]] stopifnot(!is.null(x)) if (is.list(x)) { # compute draws of a predicted parameter out <- predictor(x, i = i, fprep = prep) if (is.null(ilink)) { ilink <- apply_dpar_ilink(dpar, family = prep$family) } else { ilink <- as_one_logical(ilink) } if (ilink) { out <- ilink(out, x$family$link) } if (length(i) == 1L) { out <- slice_col(out, 1) } } else if (!is.null(i) && !is.null(dim(x))) { out <- slice_col(x, i) } else { out <- x } out } # get draws of a non-linear parameter # @param x object to extract posterior draws from # @param nlpar name of the non-linear parameter # @param i the current observation number # @return # If i is NULL or length(i) > 1: an S x N matrix # If length(i) == 1: a vector of length S get_nlpar <- function(prep, nlpar, i = NULL) { stopifnot(is.brmsprep(prep) || is.mvbrmsprep(prep)) x <- prep$nlpars[[nlpar]] stopifnot(!is.null(x)) if (is.list(x)) { # compute draws of a predicted parameter out <- predictor(x, i = i, fprep = prep) if (length(i) == 1L) { out <- slice_col(out, 1) } } else if (!is.null(i) && !is.null(dim(x))) { out <- slice_col(x, i) } else { out <- x } out } # get the mixing proportions of mixture models get_theta <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) if ("theta" %in% names(prep$dpars)) { # theta was not predicted; no need to call get_dpar theta <- prep$dpars$theta } else { # theta was predicted; apply softmax mix_family <- prep$family families <- family_names(mix_family) theta <- vector("list", length(families)) for (j in seq_along(families)) { prep$family <- mix_family$mix[[j]] theta[[j]] <- as.matrix(get_dpar(prep, paste0("theta", j), i = i)) } theta <- abind(theta, along = 3) for (n in seq_len(dim(theta)[2])) { theta[, n, ] <- softmax(theta[, n, ]) } if (length(i) == 1L) { dim(theta) <- dim(theta)[c(1, 3)] } } theta } # get posterior draws of multivariate mean vectors # only used in multivariate models with 'rescor' get_Mu <- function(prep, i = NULL) { stopifnot(is.mvbrmsprep(prep)) Mu <- prep$mvpars$Mu if (is.null(Mu)) { Mu <- lapply(prep$resps, get_dpar, "mu", i = i) if (length(i) == 1L) { Mu <- do_call(cbind, Mu) } else { # keep correct dimension even if data has only 1 row Mu <- lapply(Mu, as.matrix) Mu <- abind::abind(Mu, along = 3) } } else { stopifnot(!is.null(i)) Mu <- slice_col(Mu, i) } Mu } # get posterior draws of residual covariance matrices # only used in multivariate models with 'rescor' get_Sigma <- function(prep, i = NULL) { stopifnot(is.mvbrmsprep(prep)) Sigma <- prep$mvpars$Sigma if (is.null(Sigma)) { stopifnot(!is.null(prep$mvpars$rescor)) sigma <- named_list(names(prep$resps)) for (j in seq_along(sigma)) { sigma[[j]] <- get_dpar(prep$resps[[j]], "sigma", i = i) sigma[[j]] <- add_sigma_se(sigma[[j]], prep$resps[[j]], i = i) } is_matrix <- ulapply(sigma, is.matrix) if (!any(is_matrix)) { # happens if length(i) == 1 or if no sigma was predicted sigma <- do_call(cbind, sigma) Sigma <- get_cov_matrix(sigma, prep$mvpars$rescor) } else { for (j in seq_along(sigma)) { # bring all sigmas to the same dimension if (!is_matrix[j]) { sigma[[j]] <- array(sigma[[j]], dim = dim_mu(prep)) } } nsigma <- length(sigma) sigma <- abind(sigma, along = 3) Sigma <- array(dim = c(dim_mu(prep), nsigma, nsigma)) for (n in seq_len(ncol(Sigma))) { Sigma[, n, , ] <- get_cov_matrix(sigma[, n, ], prep$mvpars$rescor) } } } else { stopifnot(!is.null(i)) ldim <- length(dim(Sigma)) stopifnot(ldim %in% 3:4) if (ldim == 4L) { Sigma <- slice_col(Sigma, i) } } Sigma } # extract user-defined standard errors get_se <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) se <- as.vector(prep$data[["se"]]) if (!is.null(se)) { if (!is.null(i)) { se <- se[i] } if (length(se) > 1L) { dim <- c(prep$ndraws, length(se)) se <- data2draws(se, dim = dim) } } else { se <- 0 } se } # add user defined standard errors to 'sigma' # @param sigma draws of the 'sigma' parameter add_sigma_se <- function(sigma, prep, i = NULL) { if ("se" %in% names(prep$data)) { se <- get_se(prep, i = i) sigma <- sqrt(se^2 + sigma^2) } sigma } # extract user-defined rate denominators get_rate_denom <- function(prep, i = NULL) { stopifnot(is.brmsprep(prep)) denom <- as.vector(prep$data[["denom"]]) if (!is.null(denom)) { if (!is.null(i)) { denom <- denom[i] } if (length(denom) > 1L) { dim <- c(prep$ndraws, length(denom)) denom <- data2draws(denom, dim = dim) } } else { denom <- 1 } denom } # multiply a parameter with the 'rate' denominator # @param dpar draws of the distributional parameter multiply_dpar_rate_denom <- function(dpar, prep, i = NULL) { if ("denom" %in% names(prep$data)) { denom <- get_rate_denom(prep, i = i) dpar <- dpar * denom } dpar } # return draws of ordinal thresholds for observation i # @param prep a bprepl or bprepnl object # @param i observation number subset_thres <- function(prep, i) { thres <- prep$thres$thres Jthres <- prep$thres$Jthres if (!is.null(Jthres)) { thres <- thres[, Jthres[i, 1]:Jthres[i, 2], drop = FALSE] } thres } # helper function of 'get_dpar' to decide if # the link function should be applied direclty apply_dpar_ilink <- function(dpar, family) { !(has_joint_link(family) && dpar_class(dpar, family) == "mu") } # insert zeros for the predictor term of the reference category # in categorical-like models using the softmax response function insert_refcat <- function(eta, family) { stopifnot(is.array(eta), is.brmsfamily(family)) if (!conv_cats_dpars(family) || isNA(family$refcat)) { return(eta) } # need to add zeros for the reference category ndim <- length(dim(eta)) dim_noncat <- dim(eta)[-ndim] zeros_arr <- array(0, dim = c(dim_noncat, 1)) if (is.null(family$refcat) || is.null(family$cats)) { # no information on the categories provided: # use the first category as the reference return(abind::abind(zeros_arr, eta)) } ncat <- length(family$cats) stopifnot(identical(dim(eta)[ndim], ncat - 1L)) if (is.null(dimnames(eta)[[ndim]])) { dimnames(eta)[[ndim]] <- paste0("mu", setdiff(family$cats, family$refcat)) } dimnames(zeros_arr)[[ndim]] <- paste0("mu", family$refcat) iref <- match(family$refcat, family$cats) before <- seq_len(iref - 1) after <- setdiff(seq_dim(eta, ndim), before) abind::abind( slice(eta, ndim, before, drop = FALSE), zeros_arr, slice(eta, ndim, after, drop = FALSE) ) } # validate the 'resp' argument of 'predict' and related methods # @param resp response names to be validated # @param x valid response names or brmsfit object to extract names from # @param multiple allow multiple response variables? # @return names of validated response variables validate_resp <- function(resp, x, multiple = TRUE) { if (is.brmsfit(x)) { x <- brmsterms(x$formula)$responses } x <- as.character(x) if (!length(x)) { # resp is unused in univariate models return(NULL) } if (length(resp)) { resp <- as.character(resp) if (!all(resp %in% x)) { stop2("Invalid argument 'resp'. Valid response ", "variables are: ", collapse_comma(x)) } if (!multiple) { resp <- as_one_character(resp) } } else { resp <- x } resp } # split '...' into a list of model objects and other arguments # takes its argument names from parent.frame() # @param .... objects to split into model and non-model objects # @param x object treated in the same way as '...'. Adding it is # necessary for substitute() to catch the name of the first # argument passed to S3 methods. # @param model_names optional names of the model objects # @param other: allow non-model arguments in '...'? # @return # A list of arguments. All brmsfit objects are stored # as a list in element 'models' unless 'other' is FALSE. # In the latter case just returns a list of models split_dots <- function(x, ..., model_names = NULL, other = TRUE) { other <- as_one_logical(other) dots <- list(x, ...) names <- substitute(list(x, ...), env = parent.frame())[-1] names <- ulapply(names, deparse_combine) if (length(names)) { if (!length(names(dots))) { names(dots) <- names } else { has_no_name <- !nzchar(names(dots)) names(dots)[has_no_name] <- names[has_no_name] } } is_brmsfit <- unlist(lapply(dots, is.brmsfit)) models <- dots[is_brmsfit] models <- validate_models(models, model_names, names(models)) out <- dots[!is_brmsfit] if (other) { out$models <- models } else { if (length(out)) { stop2("Only model objects can be passed to '...' for this method.") } out <- models } out } # reorder observations to be in the initial user-defined order # currently only relevant for autocorrelation models # @param eta 'ndraws' x 'nobs' matrix or array # @param old_order optional vector to retrieve the initial data order # @param sort keep the new order as defined by the time-series? # @return the 'eta' matrix with possibly reordered columns reorder_obs <- function(eta, old_order = NULL, sort = FALSE) { stopifnot(length(dim(eta)) %in% c(2L, 3L)) if (is.null(old_order) || sort) { return(eta) } stopifnot(length(old_order) == NCOL(eta)) p(eta, old_order, row = FALSE) } # update .MISC environment of the stanfit object # allows to call log_prob and other C++ using methods # on objects not created in the current R session # or objects created via another backend update_misc_env <- function(x, only_windows = FALSE) { stopifnot(is.brmsfit(x)) only_windows <- as_one_logical(only_windows) if (!has_rstan_model(x)) { x <- add_rstan_model(x) } else if (os_is_windows() || !only_windows) { # TODO: detect when updating .MISC is not required # TODO: find a more efficient way to update .MISC old_backend <- x$backend x$backend <- "rstan" x$fit@.MISC <- suppressMessages(brm(fit = x, chains = 0))$fit@.MISC x$backend <- old_backend } x } #' Add compiled \pkg{rstan} models to \code{brmsfit} objects #' #' Compile a \code{\link[rstan:stanmodel-class]{stanmodel}} and add #' it to a \code{brmsfit} object. This enables some advanced functionality #' of \pkg{rstan}, most notably \code{\link[rstan:log_prob]{log_prob}} #' and friends, to be used with brms models fitted with other Stan backends. #' #' @param x A \code{brmsfit} object to be updated. #' @param overwrite Logical. If \code{TRUE}, overwrite any existing #' \code{\link[rstan:stanmodel-class]{stanmodel}}. Defaults to \code{FALSE}. #' #' @return A (possibly updated) \code{brmsfit} object. #' #' @export add_rstan_model <- function(x, overwrite = FALSE) { stopifnot(is.brmsfit(x)) overwrite <- as_one_logical(overwrite) if (!has_rstan_model(x) || overwrite) { message("Recompiling the model with 'rstan'") # threading is not yet supported by rstan and needs to be deactivated stanfit <- suppressMessages(rstan::stan( model_code = stancode(x, threads = threading()), data = standata(x), chains = 0 )) x$fit@stanmodel <- stanfit@stanmodel x$fit@.MISC <- stanfit@.MISC message("Recompilation done") } x } # does the model have a non-empty rstan 'stanmodel' # that can be used for 'log_prob' and friends? has_rstan_model <- function(x) { stopifnot(is.brmsfit(x)) isTRUE(nzchar(x$fit@stanmodel@model_cpp$model_cppname)) && length(ls(pos = x$fit@.MISC)) > 0 } # extract argument names of a post-processing method arg_names <- function(method) { opts <- c("posterior_predict", "posterior_epred", "log_lik") method <- match.arg(method, opts) out <- names(formals(paste0(method, ".brmsfit"))) c(out) <- names(formals(prepare_predictions.brmsfit)) c(out) <- names(formals(validate_newdata)) out <- unique(out) out <- setdiff(out, c("object", "x", "...")) out } # validate 'cores' argument for use in post-processing functions validate_cores_post_processing <- function(cores) { if (is.null(cores)) { if (os_is_windows()) { # multi cores often leads to a slowdown on windows # in post-processing functions as discussed in #1129 cores <- 1L } else { cores <- getOption("mc.cores", 1L) } } cores <- as_one_integer(cores) if (cores < 1L) { cores <- 1L } cores } #' Check if cached fit can be used. #' #' Checks whether a given cached fit can be used without refitting when #' \code{file_refit = "on_change"} is used. #' This function is internal and exposed only to facilitate debugging problems #' with cached fits. The function may change or be removed in future versions #' and scripts should not use it. #' #' @param fit Old \code{brmsfit} object (e.g., loaded from file). #' @param sdata New Stan data (result of a call to \code{\link{make_standata}}). #' Pass \code{NULL} to avoid this data check. #' @param scode New Stan code (result of a call to \code{\link{make_stancode}}). #' Pass \code{NULL} to avoid this code check. #' @param data New data to check consistency of factor level names. #' Pass \code{NULL} to avoid this data check. #' @param algorithm New algorithm. Pass \code{NULL} to avoid algorithm check. #' @param silent Logical. If \code{TRUE}, no messages will be given. #' @param verbose Logical. If \code{TRUE} detailed report of the differences #' is printed to the console. #' @return A boolean indicating whether a refit is needed. #' #' @details #' Use with \code{verbose = TRUE} to get additional info on how the stored #' fit differs from the given data and code. #' #' @export #' @keywords internal brmsfit_needs_refit <- function(fit, sdata = NULL, scode = NULL, data = NULL, algorithm = NULL, silent = FALSE, verbose = FALSE) { stopifnot(is.brmsfit(fit)) silent <- as_one_logical(silent) verbose <- as_one_logical(verbose) if (!is.null(scode)) { scode <- as_one_character(scode) cached_scode <- stancode(fit) } if (!is.null(sdata)) { stopifnot(is.list(sdata)) cached_sdata <- standata(fit) } if (!is.null(data)) { stopifnot(is.data.frame(data)) cached_data <- fit$data } if (!is.null(algorithm)) { algorithm <- as_one_character(algorithm) stopifnot(!is.null(fit$algorithm)) } refit <- FALSE if (!is.null(scode)) { if (normalize_stancode(scode) != normalize_stancode(cached_scode)) { if (!silent) { message("Stan code has changed beyond whitespace/comments.") if (verbose) { require_package("diffobj") print(diffobj::diffChr(scode, cached_scode, format = "ansi8")) } } refit <- TRUE } } if (!is.null(sdata)) { sdata_equality <- all.equal(sdata, cached_sdata, check.attributes = FALSE) if (!isTRUE(sdata_equality)) { if (!silent) { message("The processed data for Stan has changed.") if (verbose) { print(sdata_equality) } } refit <- TRUE } } if (!is.null(data)) { # check consistency of factor names # as they are only stored as attributes in sdata (#1128) factor_level_message <- FALSE for (var in names(cached_data)) { if (is_like_factor(cached_data[[var]])) { cached_levels <- levels(factor(cached_data[[var]])) new_levels <- levels(factor(data[[var]])) if (!is_equal(cached_levels, new_levels)) { if (!silent) { factor_level_message <- TRUE if (verbose) { cat(paste0( "Names of factor levels have changed for variable '", var, "' ", "with cached levels (", collapse_comma(cached_levels), ") ", "but new levels (", collapse_comma(new_levels), ").\n" )) } } refit <- TRUE if (!verbose) { # no need to check all variables if we trigger a refit anyway break } } } } if (factor_level_message) { message("Names of factor levels have changed.") } } if (!is.null(algorithm)) { if (algorithm != fit$algorithm) { if (!silent) { message("Algorithm has changed from '", fit$algorithm, "' to '", algorithm, "'.\n") } refit <- TRUE } } refit } # read a brmsfit object from a file # @param file path to an rds file # @return a brmsfit object or NULL read_brmsfit <- function(file) { file <- check_brmsfit_file(file) dir <- dirname(file) if (!dir.exists(dir)) { stop2( "The directory '", dir, "' does not exist. Please choose an ", "existing directory where the model can be saved after fitting." ) } x <- suppressWarnings(try(readRDS(file), silent = TRUE)) if (!is(x, "try-error")) { if (!is.brmsfit(x)) { stop2("Object loaded via 'file' is not of class 'brmsfit'.") } x$file <- file } else { x <- NULL } x } # write a brmsfit object to a file # @param x a brmsfit object # @param file path to an rds file # @return NULL write_brmsfit <- function(x, file) { stopifnot(is.brmsfit(x)) file <- check_brmsfit_file(file) x$file <- file saveRDS(x, file = file) invisible(x) } # check validity of file name to store a brmsfit object in check_brmsfit_file <- function(file) { file <- as_one_character(file) file_ending <- tolower(get_matches("\\.[^\\.]+$", file)) if (!isTRUE(file_ending == ".rds")) { file <- paste0(file, ".rds") } file } # check if a function requires an old default setting # only used to ensure backwards compatibility # @param version brms version in which the change to the default was made # @return TRUE or FALSE require_old_default <- function(version) { version <- as.package_version(version) brmsfit_version <- getOption(".brmsfit_version") isTRUE(brmsfit_version < version) } # add dummy draws to a brmsfit object for use in unit tests # @param x a brmsfit object # @param newpar name of the new parameter to add # @param dim dimension of the new parameter # @param dist name of the distribution from which to sample # @param ... further arguments passed to r # @return a brmsfit object including dummy draws of the new parameter add_dummy_draws <- function(x, newpar, dim = numeric(0), dist = "norm", ...) { stopifnot(is.brmsfit(x)) stopifnot(identical(dim, numeric(0))) newpar <- as_one_character(newpar) for (i in seq_along(x$fit@sim$samples)) { x$fit@sim$samples[[i]][[newpar]] <- do_call(paste0("r", dist), list(x$fit@sim$iter, ...)) } x$fit@sim$fnames_oi <- c(x$fit@sim$fnames_oi, newpar) x$fit@sim$dims_oi[[newpar]] <- dim x$fit@sim$pars_oi <- names(x$fit@sim$dims_oi) x } brms/inst/0000755000175000017500000000000014146747050012334 5ustar nileshnileshbrms/inst/CITATION0000644000175000017500000000330314146733671013474 0ustar nileshnileshbibentry( bibtype = "Article", title = "{brms}: An {R} Package for {Bayesian} Multilevel Models Using {Stan}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "Journal of Statistical Software", year = "2017", volume = "80", number = "1", pages = "1--28", doi = "10.18637/jss.v080.i01", header = "To cite brms in publications use:", textVersion = paste( "Paul-Christian Bürkner (2017).", "brms: An R Package for Bayesian Multilevel Models Using Stan.", "Journal of Statistical Software, 80(1), 1-28.", "doi:10.18637/jss.v080.i01" ), encoding = "UTF-8" ) bibentry( bibtype = "Article", title = "Advanced {Bayesian} Multilevel Modeling with the {R} Package {brms}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "The R Journal", year = "2018", volume = "10", number = "1", pages = "395--411", doi = "10.32614/RJ-2018-017", textVersion = paste( "Paul-Christian Bürkner (2018).", "Advanced Bayesian Multilevel Modeling with the R Package brms.", "The R Journal, 10(1), 395-411.", "doi:10.32614/RJ-2018-017" ), encoding = "UTF-8" ) bibentry( bibtype = "Article", title = "Bayesian Item Response Modeling in {R} with {brms} and {Stan}", author = person(given = "Paul-Christian", family = "Bürkner"), journal = "Journal of Statistical Software", year = "2021", volume = "100", number = "5", pages = "1--54", doi = "10.18637/jss.v100.i05", textVersion = paste( "Paul-Christian Bürkner (2021).", "Bayesian Item Response Modeling in R with brms and Stan.", "Journal of Statistical Software, 100(5), 1-54.", "doi:10.18637/jss.v100.i05" ), encoding = "UTF-8" ) brms/inst/chunks/0000755000175000017500000000000014111751667013630 5ustar nileshnileshbrms/inst/chunks/fun_scale_r_cor_cov.stan0000644000175000017500000000263714105230573020510 0ustar nileshnilesh /* compute correlated group-level effects * in the presence of a within-group covariance matrix * Args: * z: matrix of unscaled group-level effects * SD: vector of standard deviation parameters * L: cholesky factor correlation matrix * Lcov: cholesky factor of within-group correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_cov(matrix z, vector SD, matrix L, matrix Lcov) { vector[num_elements(z)] z_flat = to_vector(z); vector[num_elements(z)] r = rep_vector(0, num_elements(z)); matrix[rows(L), cols(L)] LC = diag_pre_multiply(SD, L); int rows_z = rows(z); int rows_L = rows(L); // kronecker product of cholesky factors times a vector for (icov in 1:rows(Lcov)) { for (jcov in 1:icov) { if (Lcov[icov, jcov] > 1e-10) { // avoid calculating products between unrelated individuals for (i in 1:rows_L) { for (j in 1:i) { // incremented element of the output vector int k = (rows_L * (icov - 1)) + i; // applied element of the input vector int l = (rows_L * (jcov - 1)) + j; r[k] = r[k] + Lcov[icov, jcov] * LC[i, j] * z_flat[l]; } } } } } // r is returned in another dimension order than z return to_matrix(r, cols(z), rows(z), 0); } brms/inst/chunks/fun_scale_time_err.stan0000644000175000017500000000145614060727547020355 0ustar nileshnilesh /* scale and correlate time-series residuals * Args: * zerr: standardized and independent residuals * sderr: standard deviation of the residuals * chol_cor: cholesky factor of the correlation matrix * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * vector of scaled and correlated residuals */ vector scale_time_err(vector zerr, real sderr, matrix chol_cor, int[] nobs, int[] begin, int[] end) { vector[rows(zerr)] err; for (i in 1:size(nobs)) { err[begin[i]:end[i]] = sderr * chol_cor[1:nobs[i], 1:nobs[i]] * zerr[begin[i]:end[i]]; } return err; } brms/inst/chunks/fun_cholesky_cor_ar1.stan0000644000175000017500000000117614105230573020612 0ustar nileshnilesh /* compute the cholesky factor of an AR1 correlation matrix * Args: * ar: AR1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows matrix */ matrix cholesky_cor_ar1(real ar, int nrows) { matrix[nrows, nrows] mat; vector[nrows - 1] gamma; mat = diag_matrix(rep_vector(1, nrows)); for (i in 2:nrows) { gamma[i - 1] = pow(ar, i - 1); for (j in 1:(i - 1)) { mat[i, j] = gamma[i - j]; mat[j, i] = gamma[i - j]; } } return cholesky_decompose(mat ./ (1 - ar^2)); } brms/inst/chunks/fun_multinomial_logit.stan0000644000175000017500000000050514105230573021106 0ustar nileshnilesh /* multinomial-logit log-PMF * Args: * y: array of integer response values * mu: vector of category logit probabilities * Returns: * a scalar to be added to the log posterior */ real multinomial_logit2_lpmf(int[] y, vector mu) { return multinomial_lpmf(y | softmax(mu)); } brms/inst/chunks/fun_logm1.stan0000644000175000017500000000060113202254050016363 0ustar nileshnilesh /* compute the logm1 link * Args: * p: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real logm1(real y) { return log(y - 1); } /* compute the inverse of the logm1 link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a positive scalar */ real expp1(real y) { return exp(y) + 1; } brms/inst/chunks/fun_dirichlet_logit.stan0000644000175000017500000000055613625764732020550 0ustar nileshnilesh /* dirichlet-logit log-PDF * Args: * y: vector of real response values * mu: vector of category logit probabilities * phi: precision parameter * Returns: * a scalar to be added to the log posterior */ real dirichlet_logit_lpdf(vector y, vector mu, real phi) { return dirichlet_lpdf(y | softmax(mu) * phi); } brms/inst/chunks/fun_cholesky_cor_ma1.stan0000644000175000017500000000115713625764732020623 0ustar nileshnilesh /* compute the cholesky factor of a MA1 correlation matrix * Args: * ma: MA1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows MA1 covariance matrix */ matrix cholesky_cor_ma1(real ma, int nrows) { matrix[nrows, nrows] mat; mat = diag_matrix(rep_vector(1 + ma^2, nrows)); if (nrows > 1) { mat[1, 2] = ma; for (i in 2:(nrows - 1)) { mat[i, i - 1] = ma; mat[i, i + 1] = ma; } mat[nrows, nrows - 1] = ma; } return cholesky_decompose(mat); } brms/inst/chunks/fun_zero_inflated_binomial.stan0000644000175000017500000000715713277405242022075 0ustar nileshnilesh /* zero-inflated binomial log-PDF of a single response * Args: * y: the response value * trials: number of trials of the binomial part * theta: probability parameter of the binomial part * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_lpmf(int y, int trials, real theta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + binomial_lpmf(0 | trials, theta)); } else { return bernoulli_lpmf(0 | zi) + binomial_lpmf(y | trials, theta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * trials: number of trials of the binomial part * theta: probability parameter of the binomial part * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_logit_lpmf(int y, int trials, real theta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + binomial_lpmf(0 | trials, theta)); } else { return bernoulli_logit_lpmf(0 | zi) + binomial_lpmf(y | trials, theta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the binomial part * Args: * y: the response value * trials: number of trials of the binomial part * eta: linear predictor for binomial part * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_blogit_lpmf(int y, int trials, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + binomial_logit_lpmf(0 | trials, eta)); } else { return bernoulli_lpmf(0 | zi) + binomial_logit_lpmf(y | trials, eta); } } /* zero-inflated binomial log-PDF of a single response * logit parameterization of the binomial part * logit parameterization of the zero-inflation part * Args: * y: the response value * trials: number of trials of the binomial part * eta: linear predictor for binomial part * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_binomial_blogit_logit_lpmf(int y, int trials, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + binomial_logit_lpmf(0 | trials, eta)); } else { return bernoulli_logit_lpmf(0 | zi) + binomial_logit_lpmf(y | trials, eta); } } // zero-inflated binomial log-CCDF and log-CDF functions real zero_inflated_binomial_lccdf(int y, int trials, real theta, real zi) { return bernoulli_lpmf(0 | zi) + binomial_lccdf(y | trials, theta); } real zero_inflated_binomial_lcdf(int y, int trials, real theta, real zi) { return log1m_exp(zero_inflated_binomial_lccdf(y | trials, theta, zi)); } brms/inst/chunks/fun_zero_one_inflated_beta.stan0000644000175000017500000000150013277406620022042 0ustar nileshnilesh /* zero-one-inflated beta log-PDF of a single response * Args: * y: response value * mu: mean parameter of the beta part * phi: precision parameter of the beta part * zoi: zero-one-inflation probability * coi: conditional one-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_one_inflated_beta_lpdf(real y, real mu, real phi, real zoi, real coi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_lpmf(1 | zoi) + bernoulli_lpmf(0 | coi); } else if (y == 1) { return bernoulli_lpmf(1 | zoi) + bernoulli_lpmf(1 | coi); } else { return bernoulli_lpmf(0 | zoi) + beta_lpdf(y | shape[1], shape[2]); } } brms/inst/chunks/fun_scale_r_cor_by.stan0000644000175000017500000000136714105230573020332 0ustar nileshnilesh /* compute correlated group-level effects with 'by' variables * Args: * z: matrix of unscaled group-level effects * SD: matrix of standard deviation parameters * L: an array of cholesky factor correlation matrices * Jby: index which grouping level belongs to which by level * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_by(matrix z, matrix SD, matrix[] L, int[] Jby) { // r is stored in another dimension order than z matrix[cols(z), rows(z)] r; matrix[rows(L[1]), cols(L[1])] LC[size(L)]; for (i in 1:size(LC)) { LC[i] = diag_pre_multiply(SD[, i], L[i]); } for (j in 1:rows(r)) { r[j] = transpose(LC[Jby[j]] * z[, j]); } return r; } brms/inst/chunks/fun_wiener_diffusion.stan0000644000175000017500000000125413202254050020710 0ustar nileshnilesh /* Wiener diffusion log-PDF for a single response * Args: * y: reaction time data * dec: decision data (0 or 1) * alpha: boundary separation parameter > 0 * tau: non-decision time parameter > 0 * beta: initial bias parameter in [0, 1] * delta: drift rate parameter * Returns: * a scalar to be added to the log posterior */ real wiener_diffusion_lpdf(real y, int dec, real alpha, real tau, real beta, real delta) { if (dec == 1) { return wiener_lpdf(y | alpha, tau, beta, delta); } else { return wiener_lpdf(y | alpha, tau, 1 - beta, - delta); } } brms/inst/chunks/fun_cholesky_cor_arma1.stan0000644000175000017500000000136114105230573021124 0ustar nileshnilesh /* compute the cholesky factor of an ARMA1 correlation matrix * Args: * ar: AR1 autocorrelation * ma: MA1 autocorrelation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows matrix */ matrix cholesky_cor_arma1(real ar, real ma, int nrows) { matrix[nrows, nrows] mat; vector[nrows] gamma; mat = diag_matrix(rep_vector(1 + ma^2 + 2 * ar * ma, nrows)); gamma[1] = (1 + ar * ma) * (ar + ma); for (i in 2:nrows) { gamma[i] = gamma[1] * pow(ar, i - 1); for (j in 1:(i - 1)) { mat[i, j] = gamma[i - j]; mat[j, i] = gamma[i - j]; } } return cholesky_decompose(mat ./ (1 - ar^2)); } brms/inst/chunks/fun_normal_time.stan0000644000175000017500000000535514111751667017705 0ustar nileshnilesh /* multi-normal log-PDF for time-series covariance structures * assuming homogoneous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation * chol_cor: cholesky factor of the correlation matrix * se2: square of user defined standard errors * should be set to zero if none are defined * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real normal_time_hom_lpdf(vector y, vector mu, real sigma, matrix chol_cor, data vector se2, int[] nobs, int[] begin, int[] end) { int I = size(nobs); int has_se = max(se2) > 0; vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] L; L = sigma * chol_cor[1:nobs[i], 1:nobs[i]]; if (has_se) { // need to add 'se' to the correlation matrix itself L = multiply_lower_tri_self_transpose(L); L += diag_matrix(se2[begin[i]:end[i]]); L = cholesky_decompose(L); } lp[i] = multi_normal_cholesky_lpdf( y[begin[i]:end[i]] | mu[begin[i]:end[i]], L ); } return sum(lp); } /* multi-normal log-PDF for time-series covariance structures * assuming heterogenous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation vector * chol_cor: cholesky factor of the correlation matrix * se2: square of user defined standard errors * should be set to zero if none are defined * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real normal_time_het_lpdf(vector y, vector mu, vector sigma, matrix chol_cor, data vector se2, int[] nobs, int[] begin, int[] end) { int I = size(nobs); int has_se = max(se2) > 0; vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] L; L = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); if (has_se) { // need to add 'se' to the correlation matrix itself L = multiply_lower_tri_self_transpose(L); L += diag_matrix(se2[begin[i]:end[i]]); L = cholesky_decompose(L); } lp[i] = multi_normal_cholesky_lpdf( y[begin[i]:end[i]] | mu[begin[i]:end[i]], L ); } return sum(lp); } brms/inst/chunks/fun_discrete_weibull.stan0000644000175000017500000000125514105230573020706 0ustar nileshnilesh /* discrete Weibull log-PMF for a single response * Args: * y: the response value * mu: location parameter on the unit interval * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real discrete_weibull_lpmf(int y, real mu, real shape) { return log(mu^y^shape - mu^(y+1)^shape); } // discrete Weibull log-CDF for a single response real discrete_weibull_lcdf(int y, real mu, real shape) { return log1m(mu^(y + 1)^shape); } // discrete Weibull log-CCDF for a single response real discrete_weibull_lccdf(int y, real mu, real shape) { return lmultiply((y + 1)^shape, mu); } brms/inst/chunks/fun_normal_fcor.stan0000644000175000017500000000202614111751667017670 0ustar nileshnilesh /* multi-normal log-PDF for fixed correlation matrices * assuming homogoneous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real normal_fcor_hom_lpdf(vector y, vector mu, real sigma, data matrix chol_cor) { return multi_normal_cholesky_lpdf(y | mu, sigma * chol_cor); } /* multi-normal log-PDF for fixed correlation matrices * assuming heterogenous variances * Args: * y: response vector * mu: mean parameter vector * sigma: residual standard deviation vector * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real normal_fcor_het_lpdf(vector y, vector mu, vector sigma, data matrix chol_cor) { return multi_normal_cholesky_lpdf(y | mu, diag_pre_multiply(sigma, chol_cor)); } brms/inst/chunks/fun_gaussian_process.stan0000644000175000017500000000166614111751667020750 0ustar nileshnilesh /* compute a latent Gaussian process * Args: * x: array of continuous predictor values * sdgp: marginal SD parameter * lscale: length-scale parameter * zgp: vector of independent standard normal variables * Returns: * a vector to be added to the linear predictor */ vector gp(data vector[] x, real sdgp, vector lscale, vector zgp) { int Dls = rows(lscale); int N = size(x); matrix[N, N] cov; if (Dls == 1) { // one dimensional or isotropic GP cov = gp_exp_quad_cov(x, sdgp, lscale[1]); } else { // multi-dimensional non-isotropic GP cov = gp_exp_quad_cov(x[, 1], sdgp, lscale[1]); for (d in 2:Dls) { cov = cov .* gp_exp_quad_cov(x[, d], 1, lscale[d]); } } for (n in 1:N) { // deal with numerical non-positive-definiteness cov[n, n] += 1e-12; } return cholesky_decompose(cov) * zgp; } brms/inst/chunks/fun_hurdle_gamma.stan0000644000175000017500000000276413277406136020025 0ustar nileshnilesh /* hurdle gamma log-PDF of a single response * Args: * y: the response value * alpha: shape parameter of the gamma distribution * beta: rate parameter of the gamma distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_gamma_lpdf(real y, real alpha, real beta, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + gamma_lpdf(y | alpha, beta); } } /* hurdle gamma log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * alpha: shape parameter of the gamma distribution * beta: rate parameter of the gamma distribution * hu: linear predictor for the hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_gamma_logit_lpdf(real y, real alpha, real beta, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + gamma_lpdf(y | alpha, beta); } } // hurdle gamma log-CCDF and log-CDF functions real hurdle_gamma_lccdf(real y, real alpha, real beta, real hu) { return bernoulli_lpmf(0 | hu) + gamma_lccdf(y | alpha, beta); } real hurdle_gamma_lcdf(real y, real alpha, real beta, real hu) { return log1m_exp(hurdle_gamma_lccdf(y | alpha, beta, hu)); } brms/inst/chunks/fun_tan_half.stan0000644000175000017500000000063413202254050017126 0ustar nileshnilesh /* compute the tan_half link * Args: * x: a scalar in (-pi, pi) * Returns: * a scalar in (-Inf, Inf) */ real tan_half(real x) { return tan(x / 2); } /* compute the inverse of the tan_half link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a scalar in (-pi, pi) */ real inv_tan_half(real y) { return 2 * atan(y); } brms/inst/chunks/fun_zero_inflated_beta.stan0000644000175000017500000000334413277406647021222 0ustar nileshnilesh /* zero-inflated beta log-PDF of a single response * Args: * y: the response value * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_lpdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_lpmf(1 | zi); } else { return bernoulli_lpmf(0 | zi) + beta_lpdf(y | shape[1], shape[2]); } } /* zero-inflated beta log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * mu: mean parameter of the beta distribution * phi: precision parameter of the beta distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_beta_logit_lpdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; if (y == 0) { return bernoulli_logit_lpmf(1 | zi); } else { return bernoulli_logit_lpmf(0 | zi) + beta_lpdf(y | shape[1], shape[2]); } } // zero-inflated beta log-CCDF and log-CDF functions real zero_inflated_beta_lccdf(real y, real mu, real phi, real zi) { row_vector[2] shape = [mu * phi, (1 - mu) * phi]; return bernoulli_lpmf(0 | zi) + beta_lccdf(y | shape[1], shape[2]); } real zero_inflated_beta_lcdf(real y, real mu, real phi, real zi) { return log1m_exp(zero_inflated_beta_lccdf(y | mu, phi, zi)); } brms/inst/chunks/fun_normal_errorsar.stan0000644000175000017500000000153114111751667020576 0ustar nileshnilesh /* normal log-pdf for spatially lagged residuals * Args: * y: the response vector * mu: mean parameter vector * sigma: residual standard deviation * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real normal_errorsar_lpdf(vector y, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * (y - mu); log_det = sum(log1m(rho * eigenW)); return 0.5 * N * log(inv_sigma2) + log_det - 0.5 * dot_self(half_pred) * inv_sigma2; } brms/inst/chunks/fun_cholesky_cor_cosy.stan0000644000175000017500000000110213625764732021110 0ustar nileshnilesh /* compute the cholesky factor of a compound symmetry correlation matrix * Args: * cosy: compound symmetry correlation * nrows: number of rows of the covariance matrix * Returns: * A nrows x nrows covariance matrix */ matrix cholesky_cor_cosy(real cosy, int nrows) { matrix[nrows, nrows] mat; mat = diag_matrix(rep_vector(1, nrows)); for (i in 2:nrows) { for (j in 1:(i - 1)) { mat[i, j] = cosy; mat[j, i] = mat[i, j]; } } return cholesky_decompose(mat); } brms/inst/chunks/fun_softplus.stan0000644000175000017500000000032613625764732017255 0ustar nileshnilesh /* softplus link function inverse to 'log1p_exp' * Args: * x: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real log_expm1(real x) { return log(expm1(x)); } brms/inst/chunks/fun_which_range.stan0000644000175000017500000000232514105230573017636 0ustar nileshnilesh /* how many elements are in a range of integers? * Args: * x: an integer array * start: start of the range (inclusive) * end: end of the range (inclusive) * Returns: * a scalar integer */ int size_range(int[] x, int start, int end) { int out = 0; for (i in 1:size(x)) { out += (x[i] >= start && x[i] <= end); } return out; } /* which elements are in a range of integers? * Args: * x: an integer array * start: start of the range (inclusive) * end: end of the range (inclusive) * Returns: * an integer array */ int[] which_range(int[] x, int start, int end) { int out[size_range(x, start, end)]; int j = 1; for (i in 1:size(x)) { if (x[i] >= start && x[i] <= end) { out[j] = i; j += 1; } } return out; } /* adjust array values to x - start + 1 * Args: * x: an integer array * start: start of the range of values in x (inclusive) * Returns: * an integer array */ int[] start_at_one(int[] x, int start) { int out[size(x)]; for (i in 1:size(x)) { out[i] = x[i] - start + 1; } return out; } brms/inst/chunks/fun_student_t_errorsar.stan0000644000175000017500000000204014111751667021313 0ustar nileshnilesh /* student-t log-pdf for spatially lagged residuals * Args: * y: the response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: residual scale parameter * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real student_t_errorsar_lpdf(vector y, real nu, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real K = rows(y); // avoid integer division warning real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * (y - mu); log_det = sum(log1m(rho * eigenW)); return - K / 2 * log(nu) + lgamma((nu + K) / 2) - lgamma(nu / 2) + 0.5 * K * log(inv_sigma2) + log_det - (nu + K) / 2 * log1p(dot_self(half_pred) * inv_sigma2 / nu); } brms/inst/chunks/fun_monotonic.stan0000644000175000017500000000047413625764732017407 0ustar nileshnilesh /* compute monotonic effects * Args: * scale: a simplex parameter * i: index to sum over the simplex * Returns: * a scalar between 0 and 1 */ real mo(vector scale, int i) { if (i == 0) { return 0; } else { return rows(scale) * sum(scale[1:i]); } } brms/inst/chunks/fun_asym_laplace.stan0000644000175000017500000000354614105230573020020 0ustar nileshnilesh /* helper function for asym_laplace_lpdf * Args: * y: the response value * quantile: quantile parameter in (0, 1) */ real rho_quantile(real y, real quantile) { if (y < 0) { return y * (quantile - 1); } else { return y * quantile; } } /* asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lpdf(real y, real mu, real sigma, real quantile) { return log(quantile * (1 - quantile)) - log(sigma) - rho_quantile((y - mu) / sigma, quantile); } /* asymmetric laplace log-CDF for a single quantile * Args: * y: a quantile * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lcdf(real y, real mu, real sigma, real quantile) { if (y < mu) { return log(quantile) + (1 - quantile) * (y - mu) / sigma; } else { return log1m((1 - quantile) * exp(-quantile * (y - mu) / sigma)); } } /* asymmetric laplace log-CCDF for a single quantile * Args: * y: a quantile * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * Returns: * a scalar to be added to the log posterior */ real asym_laplace_lccdf(real y, real mu, real sigma, real quantile) { if (y < mu) { return log1m(quantile * exp((1 - quantile) * (y - mu) / sigma)); } else { return log1m(quantile) - quantile * (y - mu) / sigma; } } brms/inst/chunks/fun_hurdle_negbinomial.stan0000644000175000017500000000623514105230573021213 0ustar nileshnilesh /* hurdle negative binomial log-PDF of a single response * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_lpmf(int y, real mu, real phi, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lpmf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * logit parameterization for the hurdle part * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: phi parameter of negative binomial distribution * hu: linear predictor of hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_logit_lpmf(int y, real mu, real phi, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + neg_binomial_2_lpmf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * log parameterization for the negative binomial part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi phi parameter of negative binomial distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_log_lpmf(int y, real eta, real phi, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + neg_binomial_2_log_lpmf(y | eta, phi) - log1m((phi / (exp(eta) + phi))^phi); } } /* hurdle negative binomial log-PDF of a single response * log parameterization for the negative binomial part * logit parameterization for the hurdle part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: phi parameter of negative binomial distribution * hu: linear predictor of hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_neg_binomial_log_logit_lpmf(int y, real eta, real phi, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + neg_binomial_2_log_lpmf(y | eta, phi) - log1m((phi / (exp(eta) + phi))^phi); } } // hurdle negative binomial log-CCDF and log-CDF functions real hurdle_neg_binomial_lccdf(int y, real mu, real phi, real hu) { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lccdf(y | mu, phi) - log1m((phi / (mu + phi))^phi); } real hurdle_neg_binomial_lcdf(int y, real mu, real phi, real hu) { return log1m_exp(hurdle_neg_binomial_lccdf(y | mu, phi, hu)); } brms/inst/chunks/fun_scale_xi.stan0000644000175000017500000000213013254660260017145 0ustar nileshnilesh /* scale auxiliary parameter xi to a suitable region * expecting sigma to be a scalar * Args: * xi: unscaled shape parameter * y: response values * mu: location parameter * sigma: scale parameter * Returns: * scaled shape parameter xi */ real scale_xi(real xi, vector y, vector mu, real sigma) { vector[rows(y)] x = (y - mu) / sigma; vector[2] bounds = [-inv(min(x)), -inv(max(x))]'; real lb = min(bounds); real ub = max(bounds); return inv_logit(xi) * (ub - lb) + lb; } /* scale auxiliary parameter xi to a suitable region * expecting sigma to be a vector * Args: * xi: unscaled shape parameter * y: response values * mu: location parameter * sigma: scale parameter * Returns: * scaled shape parameter xi */ real scale_xi_vector(real xi, vector y, vector mu, vector sigma) { vector[rows(y)] x = (y - mu) ./ sigma; vector[2] bounds = [-inv(min(x)), -inv(max(x))]'; real lb = min(bounds); real ub = max(bounds); return inv_logit(xi) * (ub - lb) + lb; } brms/inst/chunks/fun_hurdle_poisson.stan0000644000175000017500000000520213277405012020413 0ustar nileshnilesh /* hurdle poisson log-PDF of a single response * Args: * y: the response value * lambda: mean parameter of the poisson distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_lpmf(int y, real lambda, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + poisson_lpmf(y | lambda) - log1m_exp(-lambda); } } /* hurdle poisson log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * lambda: mean parameter of the poisson distribution * hu: linear predictor for hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_logit_lpmf(int y, real lambda, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + poisson_lpmf(y | lambda) - log1m_exp(-lambda); } } /* hurdle poisson log-PDF of a single response * log parameterization for the poisson part * Args: * y: the response value * eta: linear predictor for poisson part * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_log_lpmf(int y, real eta, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + poisson_log_lpmf(y | eta) - log1m_exp(-exp(eta)); } } /* hurdle poisson log-PDF of a single response * log parameterization for the poisson part * logit parameterization of the hurdle part * Args: * y: the response value * eta: linear predictor for poisson part * hu: linear predictor for hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_poisson_log_logit_lpmf(int y, real eta, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + poisson_log_lpmf(y | eta) - log1m_exp(-exp(eta)); } } // hurdle poisson log-CCDF and log-CDF functions real hurdle_poisson_lccdf(int y, real lambda, real hu) { return bernoulli_lpmf(0 | hu) + poisson_lccdf(y | lambda) - log1m_exp(-lambda); } real hurdle_poisson_lcdf(int y, real lambda, real hu) { return log1m_exp(hurdle_poisson_lccdf(y | lambda, hu)); } brms/inst/chunks/fun_student_t_lagsar.stan0000644000175000017500000000203414111751667020730 0ustar nileshnilesh /* student-t log-pdf for spatially lagged responses * Args: * y: the response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: residual scale parameter * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real student_t_lagsar_lpdf(vector y, real nu, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real K = rows(y); // avoid integer division warning real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * y - mu; log_det = sum(log1m(rho * eigenW)); return - K / 2 * log(nu) + lgamma((nu + K) / 2) - lgamma(nu / 2) + 0.5 * K * log(inv_sigma2) + log_det - (nu + K) / 2 * log1p(dot_self(half_pred) * inv_sigma2 / nu); } brms/inst/chunks/fun_cauchit.stan0000644000175000017500000000064513202254050016774 0ustar nileshnilesh /* compute the cauchit link * Args: * p: a scalar in (0, 1) * Returns: * a scalar in (-Inf, Inf) */ real cauchit(real p) { return tan(pi() * (p - 0.5)); } /* compute the inverse of the cauchit link * Args: * y: a scalar in (-Inf, Inf) * Returns: * a scalar in (0, 1) */ real inv_cauchit(real y) { return cauchy_cdf(y, 0, 1); } brms/inst/chunks/fun_zero_inflated_asym_laplace.stan0000644000175000017500000000455213625764732022742 0ustar nileshnilesh /* zero-inflated asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_asym_laplace_lpdf(real y, real mu, real sigma, real quantile, real zi) { if (y == 0) { return bernoulli_lpmf(1 | zi); } else { return bernoulli_lpmf(0 | zi) + asym_laplace_lpdf(y | mu, sigma, quantile); } } /* zero-inflated asymmetric laplace log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: positive scale parameter * quantile: quantile parameter in (0, 1) * zi: linear predictor of the zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_asym_laplace_logit_lpdf(real y, real mu, real sigma, real quantile, real zi) { if (y == 0) { return bernoulli_logit_lpmf(1 | zi); } else { return bernoulli_logit_lpmf(0 | zi) + asym_laplace_lpdf(y | mu, sigma, quantile); } } // zero-inflated asymmetric laplace log-CDF function real zero_inflated_asym_laplace_lcdf(real y, real mu, real sigma, real quantile, real zi) { if (y < 0) { return bernoulli_lpmf(0 | zi) + asym_laplace_lcdf(y | mu, sigma, quantile); } else { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + asym_laplace_lcdf(y | mu, sigma, quantile)); } } // zero-inflated asymmetric laplace log-CCDF function real zero_inflated_asym_laplace_lccdf(real y, real mu, real sigma, real quantile, real zi) { if (y > 0) { return bernoulli_lpmf(0 | zi) + asym_laplace_lccdf(y | mu, sigma, quantile); } else { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + asym_laplace_lccdf(y | mu, sigma, quantile)); } } brms/inst/chunks/fun_student_t_time.stan0000644000175000017500000000534014111751667020420 0ustar nileshnilesh /* multi-student-t log-PDF for time-series covariance structures * assuming homogoneous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter * chol_cor: cholesky factor of the correlation matrix * se2: square of user defined standard errors * should be set to zero if none are defined * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_hom_lpdf(vector y, real nu, vector mu, real sigma, matrix chol_cor, data vector se2, int[] nobs, int[] begin, int[] end) { int I = size(nobs); int has_se = max(se2) > 0; vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov; Cov = sigma * chol_cor[1:nobs[i], 1:nobs[i]]; Cov = multiply_lower_tri_self_transpose(Cov); if (has_se) { Cov += diag_matrix(se2[begin[i]:end[i]]); } lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov ); } return sum(lp); } /* multi-student-t log-PDF for time-series covariance structures * assuming heterogenous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter vector * chol_cor: cholesky factor of the correlation matrix * se2: square of user defined standard errors * should be set to zero if none are defined * nobs: number of observations in each group * begin: the first observation in each group * end: the last observation in each group * Returns: * sum of the log-PDF values of all observations */ real student_t_time_het_lpdf(vector y, real nu, vector mu, vector sigma, matrix chol_cor, data vector se2, int[] nobs, int[] begin, int[] end) { int I = size(nobs); int has_se = max(se2) > 0; vector[I] lp; for (i in 1:I) { matrix[nobs[i], nobs[i]] Cov; Cov = diag_pre_multiply(sigma[begin[i]:end[i]], chol_cor[1:nobs[i], 1:nobs[i]]); Cov = multiply_lower_tri_self_transpose(Cov); if (has_se) { Cov += diag_matrix(se2[begin[i]:end[i]]); } lp[i] = multi_student_t_lpdf( y[begin[i]:end[i]] | nu, mu[begin[i]:end[i]], Cov ); } return sum(lp); } brms/inst/chunks/fun_zero_inflated_poisson.stan0000644000175000017500000000610413277404740021766 0ustar nileshnilesh /* zero-inflated poisson log-PDF of a single response * Args: * y: the response value * lambda: mean parameter of the poisson distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_lpmf(int y, real lambda, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + poisson_lpmf(0 | lambda)); } else { return bernoulli_lpmf(0 | zi) + poisson_lpmf(y | lambda); } } /* zero-inflated poisson log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * lambda: mean parameter of the poisson distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_logit_lpmf(int y, real lambda, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + poisson_lpmf(0 | lambda)); } else { return bernoulli_logit_lpmf(0 | zi) + poisson_lpmf(y | lambda); } } /* zero-inflated poisson log-PDF of a single response * log parameterization for the poisson part * Args: * y: the response value * eta: linear predictor for poisson distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_log_lpmf(int y, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + poisson_log_lpmf(0 | eta)); } else { return bernoulli_lpmf(0 | zi) + poisson_log_lpmf(y | eta); } } /* zero-inflated poisson log-PDF of a single response * log parameterization for the poisson part * logit parameterization of the zero-inflation part * Args: * y: the response value * eta: linear predictor for poisson distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_poisson_log_logit_lpmf(int y, real eta, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + poisson_log_lpmf(0 | eta)); } else { return bernoulli_logit_lpmf(0 | zi) + poisson_log_lpmf(y | eta); } } // zero-inflated poisson log-CCDF and log-CDF functions real zero_inflated_poisson_lccdf(int y, real lambda, real zi) { return bernoulli_lpmf(0 | zi) + poisson_lccdf(y | lambda); } real zero_inflated_poisson_lcdf(int y, real lambda, real zi) { return log1m_exp(zero_inflated_poisson_lccdf(y | lambda, zi)); } brms/inst/chunks/fun_scale_r_cor_by_cov.stan0000644000175000017500000000336114105230573021175 0ustar nileshnilesh /* compute correlated group-level effects with 'by' variables * in the presence of a within-group covariance matrix * Args: * z: matrix of unscaled group-level effects * SD: matrix of standard deviation parameters * L: an array of cholesky factor correlation matrices * Jby: index which grouping level belongs to which by level * Lcov: cholesky factor of within-group correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor_by_cov(matrix z, matrix SD, matrix[] L, int[] Jby, matrix Lcov) { vector[num_elements(z)] z_flat = to_vector(z); vector[num_elements(z)] r = rep_vector(0, num_elements(z)); matrix[rows(L[1]), cols(L[1])] LC[size(L)]; int rows_z = rows(z); int rows_L = rows(L[1]); for (i in 1:size(LC)) { LC[i] = diag_pre_multiply(SD[, i], L[i]); } // kronecker product of cholesky factors times a vector for (icov in 1:rows(Lcov)) { for (jcov in 1:icov) { if (Lcov[icov, jcov] > 1e-10) { // avoid calculating products between unrelated individuals for (i in 1:rows_L) { for (j in 1:i) { // incremented element of the output vector int k = (rows_L * (icov - 1)) + i; // applied element of the input vector int l = (rows_L * (jcov - 1)) + j; // column number of z to which z_flat[l] belongs int m = (l - 1) / rows_z + 1; r[k] = r[k] + Lcov[icov, jcov] * LC[Jby[m]][i, j] * z_flat[l]; } } } } } // r is returned in another dimension order than z return to_matrix(r, cols(z), rows(z), 0); } brms/inst/chunks/fun_student_t_fcor.stan0000644000175000017500000000257214111751667020417 0ustar nileshnilesh /* multi-student-t log-PDF for fixed correlation matrices * assuming homogoneous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real student_t_fcor_hom_lpdf(vector y, real nu, vector mu, real sigma, data matrix chol_cor) { int N = rows(chol_cor); matrix[N, N] Cov = multiply_lower_tri_self_transpose(sigma * chol_cor); return multi_student_t_lpdf(y | nu, mu, Cov); } /* multi-student-t log-PDF for fixed correlation matrices * assuming heterogenous variances * Args: * y: response vector * nu: degrees of freedom parameter * mu: mean parameter vector * sigma: scale parameter vector * chol_cor: cholesky factor of the correlation matrix * Returns: * sum of the log-PDF values of all observations */ real student_t_fcor_het_lpdf(vector y, real nu, vector mu, vector sigma, data matrix chol_cor) { int N = rows(chol_cor); matrix[N, N] Cov = diag_pre_multiply(sigma, chol_cor); Cov = multiply_lower_tri_self_transpose(Cov); return multi_student_t_lpdf(y | nu, mu, Cov); } brms/inst/chunks/fun_scale_r_cor.stan0000644000175000017500000000066214105230573017635 0ustar nileshnilesh /* compute correlated group-level effects * Args: * z: matrix of unscaled group-level effects * SD: vector of standard deviation parameters * L: cholesky factor correlation matrix * Returns: * matrix of scaled group-level effects */ matrix scale_r_cor(matrix z, vector SD, matrix L) { // r is stored in another dimension order than z return transpose(diag_pre_multiply(SD, L) * z); } brms/inst/chunks/fun_cox.stan0000644000175000017500000000276014010776134016157 0ustar nileshnilesh /* distribution functions of the Cox proportional hazards model * parameterize hazard(t) = baseline(t) * mu * so that higher values of 'mu' imply lower survival times * Args: * y: the response value; currently ignored as the relevant * information is passed via 'bhaz' and 'cbhaz' * mu: positive location parameter * bhaz: baseline hazard * cbhaz: cumulative baseline hazard */ real cox_lhaz(real y, real mu, real bhaz, real cbhaz) { return log(bhaz) + log(mu); } real cox_lccdf(real y, real mu, real bhaz, real cbhaz) { // equivalent to the log survival function return - cbhaz * mu; } real cox_lcdf(real y, real mu, real bhaz, real cbhaz) { return log1m_exp(cox_lccdf(y | mu, bhaz, cbhaz)); } real cox_lpdf(real y, real mu, real bhaz, real cbhaz) { return cox_lhaz(y, mu, bhaz, cbhaz) + cox_lccdf(y | mu, bhaz, cbhaz); } // Distribution functions of the Cox model in log parameterization real cox_log_lhaz(real y, real log_mu, real bhaz, real cbhaz) { return log(bhaz) + log_mu; } real cox_log_lccdf(real y, real log_mu, real bhaz, real cbhaz) { return - cbhaz * exp(log_mu); } real cox_log_lcdf(real y, real log_mu, real bhaz, real cbhaz) { return log1m_exp(cox_log_lccdf(y | log_mu, bhaz, cbhaz)); } real cox_log_lpdf(real y, real log_mu, real bhaz, real cbhaz) { return cox_log_lhaz(y, log_mu, bhaz, cbhaz) + cox_log_lccdf(y | log_mu, bhaz, cbhaz); } brms/inst/chunks/fun_zero_inflated_negbinomial.stan0000644000175000017500000000740013277405036022557 0ustar nileshnilesh /* zero-inflated negative binomial log-PDF of a single response * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_lpmf(int y, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + neg_binomial_2_lpmf(0 | mu, phi)); } else { return bernoulli_lpmf(0 | zi) + neg_binomial_2_lpmf(y | mu, phi); } } /* zero-inflated negative binomial log-PDF of a single response * logit parameterization of the zero-inflation part * Args: * y: the response value * mu: mean parameter of negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_logit_lpmf(int y, real mu, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_lpmf(0 | mu, phi)); } else { return bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_lpmf(y | mu, phi); } } /* zero-inflated negative binomial log-PDF of a single response * log parameterization for the negative binomial part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: zero-inflation probability * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_log_lpmf(int y, real eta, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_lpmf(1 | zi), bernoulli_lpmf(0 | zi) + neg_binomial_2_log_lpmf(0 | eta, phi)); } else { return bernoulli_lpmf(0 | zi) + neg_binomial_2_log_lpmf(y | eta, phi); } } /* zero-inflated negative binomial log-PDF of a single response * log parameterization for the negative binomial part * logit parameterization of the zero-inflation part * Args: * y: the response value * eta: linear predictor for negative binomial distribution * phi: shape parameter of negative binomial distribution * zi: linear predictor for zero-inflation part * Returns: * a scalar to be added to the log posterior */ real zero_inflated_neg_binomial_log_logit_lpmf(int y, real eta, real phi, real zi) { if (y == 0) { return log_sum_exp(bernoulli_logit_lpmf(1 | zi), bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_log_lpmf(0 | eta, phi)); } else { return bernoulli_logit_lpmf(0 | zi) + neg_binomial_2_log_lpmf(y | eta, phi); } } // zero_inflated negative binomial log-CCDF and log-CDF functions real zero_inflated_neg_binomial_lccdf(int y, real mu, real phi, real hu) { return bernoulli_lpmf(0 | hu) + neg_binomial_2_lccdf(y | mu, phi); } real zero_inflated_neg_binomial_lcdf(int y, real mu, real phi, real hu) { return log1m_exp(zero_inflated_neg_binomial_lccdf(y | mu, phi, hu)); } brms/inst/chunks/fun_gaussian_process_approx.stan0000644000175000017500000000215614111751667022334 0ustar nileshnilesh /* Spectral density function of a Gaussian process * with squared exponential covariance kernel * Args: * x: array of numeric values of dimension NB x D * sdgp: marginal SD parameter * lscale: vector of length-scale parameters * Returns: * numeric values of the function evaluated at 'x' */ vector spd_cov_exp_quad(data vector[] x, real sdgp, vector lscale) { int NB = dims(x)[1]; int D = dims(x)[2]; int Dls = rows(lscale); vector[NB] out; if (Dls == 1) { // one dimensional or isotropic GP real constant = square(sdgp) * (sqrt(2 * pi()) * lscale[1])^D; real neg_half_lscale2 = -0.5 * square(lscale[1]); for (m in 1:NB) { out[m] = constant * exp(neg_half_lscale2 * dot_self(x[m])); } } else { // multi-dimensional non-isotropic GP real constant = square(sdgp) * sqrt(2 * pi())^D * prod(lscale); vector[Dls] neg_half_lscale2 = -0.5 * square(lscale); for (m in 1:NB) { out[m] = constant * exp(dot_product(neg_half_lscale2, square(x[m]))); } } return out; } brms/inst/chunks/fun_gen_extreme_value.stan0000644000175000017500000000264513254660260021067 0ustar nileshnilesh /* generalized extreme value log-PDF for a single response * Args: * y: the response value * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * a scalar to be added to the log posterior */ real gen_extreme_value_lpdf(real y, real mu, real sigma, real xi) { real x = (y - mu) / sigma; if (xi == 0) { return - log(sigma) - x - exp(-x); } else { real t = 1 + xi * x; real inv_xi = 1 / xi; return - log(sigma) - (1 + inv_xi) * log(t) - pow(t, -inv_xi); } } /* generalized extreme value log-CDF for a single response * Args: * y: a quantile * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * log(P(Y <= y)) */ real gen_extreme_value_lcdf(real y, real mu, real sigma, real xi) { real x = (y - mu) / sigma; if (xi == 0) { return - exp(-x); } else { return - pow(1 + xi * x, - 1 / xi); } } /* generalized extreme value log-CCDF for a single response * Args: * y: a quantile * mu: location parameter * sigma: scale parameter * xi: shape parameter * Returns: * log(P(Y > y)) */ real gen_extreme_value_lccdf(real y, real mu, real sigma, real xi) { return log1m_exp(gen_extreme_value_lcdf(y | mu, sigma, xi)); } brms/inst/chunks/fun_squareplus.stan0000644000175000017500000000070314111751667017573 0ustar nileshnilesh /* squareplus inverse link function (squareplus itself) * Args: * x: a scalar in (-Inf, Inf) * Returns: * a positive scalar */ real squareplus(real x) { return (x + sqrt(x^2 + 4)) / 2; } /* squareplus link function (inverse squareplus) * Args: * x: a positive scalar * Returns: * a scalar in (-Inf, Inf) */ real inv_squareplus(real x) { return (x^2 - 1) / x; } brms/inst/chunks/fun_hurdle_lognormal.stan0000644000175000017500000000302713277406150020722 0ustar nileshnilesh /* hurdle lognormal log-PDF of a single response * Args: * y: the response value * mu: mean parameter of the lognormal distribution * sigma: sd parameter of the lognormal distribution * hu: hurdle probability * Returns: * a scalar to be added to the log posterior */ real hurdle_lognormal_lpdf(real y, real mu, real sigma, real hu) { if (y == 0) { return bernoulli_lpmf(1 | hu); } else { return bernoulli_lpmf(0 | hu) + lognormal_lpdf(y | mu, sigma); } } /* hurdle lognormal log-PDF of a single response * logit parameterization of the hurdle part * Args: * y: the response value * mu: mean parameter of the lognormal distribution * sigma: sd parameter of the lognormal distribution * hu: linear predictor for the hurdle part * Returns: * a scalar to be added to the log posterior */ real hurdle_lognormal_logit_lpdf(real y, real mu, real sigma, real hu) { if (y == 0) { return bernoulli_logit_lpmf(1 | hu); } else { return bernoulli_logit_lpmf(0 | hu) + lognormal_lpdf(y | mu, sigma); } } // hurdle lognormal log-CCDF and log-CDF functions real hurdle_lognormal_lccdf(real y, real mu, real sigma, real hu) { return bernoulli_lpmf(0 | hu) + lognormal_lccdf(y | mu, sigma); } real hurdle_lognormal_lcdf(real y, real mu, real sigma, real hu) { return log1m_exp(hurdle_lognormal_lccdf(y | mu, sigma, hu)); } brms/inst/chunks/fun_sequence.stan0000644000175000017500000000051614060727052017173 0ustar nileshnilesh /* integer sequence of values * Args: * start: starting integer * end: ending integer * Returns: * an integer sequence from start to end */ int[] sequence(int start, int end) { int seq[end - start + 1]; for (n in 1:num_elements(seq)) { seq[n] = n + start - 1; } return seq; } brms/inst/chunks/fun_von_mises.stan0000644000175000017500000000216213715703350017365 0ustar nileshnilesh /* von Mises log-PDF of a single response * for kappa > 100 the normal approximation is used * for reasons of numerial stability * Args: * y: the response vector between -pi and pi * mu: location parameter vector * kappa: precision parameter * Returns: * a scalar to be added to the log posterior */ real von_mises_real_lpdf(real y, real mu, real kappa) { if (kappa < 100) { return von_mises_lpdf(y | mu, kappa); } else { return normal_lpdf(y | mu, sqrt(1 / kappa)); } } /* von Mises log-PDF of a response vector * for kappa > 100 the normal approximation is used * for reasons of numerial stability * Args: * y: the response vector between -pi and pi * mu: location parameter vector * kappa: precision parameter * Returns: * a scalar to be added to the log posterior */ real von_mises_vector_lpdf(vector y, vector mu, real kappa) { if (kappa < 100) { return von_mises_lpdf(y | mu, kappa); } else { return normal_lpdf(y | mu, sqrt(1 / kappa)); } } brms/inst/chunks/fun_sparse_icar_lpdf.stan0000644000175000017500000000261014111751667020666 0ustar nileshnilesh /* Return the log probability of an intrinsic conditional autoregressive * (ICAR) prior with a sparse representation for the adjacency matrix * Full credit to Max Joseph (https://github.com/mbjoseph/CARstan) * Args: * phi: Vector containing the CAR parameters for each location * sdcar: Standard deviation parameter for the CAR prior * Nloc: Number of locations * Nedges: Number of edges (adjacency pairs) * Nneigh: Number of neighbors for each location * eigenW: Eigenvalues of D^(-1/2) * W * D^(-1/2) * edges1, edges2: Sparse representation of adjacency matrix * Details: * D = Diag(Nneigh) * Returns: * Log probability density of CAR prior up to additive constant */ real sparse_icar_lpdf(vector phi, real sdcar, int Nloc, int Nedges, data vector Nneigh, data vector eigenW, int[] edges1, int[] edges2) { real tau; // precision parameter row_vector[Nloc] phit_D; // phi' * D row_vector[Nloc] phit_W; // phi' * W tau = inv_square(sdcar); phit_D = (phi .* Nneigh)'; phit_W = rep_row_vector(0, Nloc); for (i in 1:Nedges) { phit_W[edges1[i]] = phit_W[edges1[i]] + phi[edges2[i]]; phit_W[edges2[i]] = phit_W[edges2[i]] + phi[edges1[i]]; } return 0.5 * ((Nloc - 1) * log(tau) - tau * (phit_D * phi - (phit_W * phi))); } brms/inst/chunks/fun_sparse_car_lpdf.stan0000644000175000017500000000307214111751667020520 0ustar nileshnilesh /* Return the log probability of a proper conditional autoregressive (CAR) * prior with a sparse representation for the adjacency matrix * Full credit to Max Joseph (https://github.com/mbjoseph/CARstan) * Args: * phi: Vector containing the CAR parameters for each location * car: Dependence (usually spatial) parameter for the CAR prior * sdcar: Standard deviation parameter for the CAR prior * Nloc: Number of locations * Nedges: Number of edges (adjacency pairs) * Nneigh: Number of neighbors for each location * eigenW: Eigenvalues of D^(-1/2) * W * D^(-1/2) * edges1, edges2: Sparse representation of adjacency matrix * Details: * D = Diag(Nneigh) * Returns: * Log probability density of CAR prior up to additive constant */ real sparse_car_lpdf(vector phi, real car, real sdcar, int Nloc, int Nedges, data vector Nneigh, data vector eigenW, int[] edges1, int[] edges2) { real tau; // precision parameter row_vector[Nloc] phit_D; // phi' * D row_vector[Nloc] phit_W; // phi' * W vector[Nloc] ldet; tau = inv_square(sdcar); phit_D = (phi .* Nneigh)'; phit_W = rep_row_vector(0, Nloc); for (i in 1:Nedges) { phit_W[edges1[i]] = phit_W[edges1[i]] + phi[edges2[i]]; phit_W[edges2[i]] = phit_W[edges2[i]] + phi[edges1[i]]; } for (i in 1:Nloc) { ldet[i] = log1m(car * eigenW[i]); } return 0.5 * (Nloc * log(tau) + sum(ldet) - tau * (phit_D * phi - car * (phit_W * phi))); } brms/inst/chunks/fun_cloglog.stan0000644000175000017500000000030314105230573017000 0ustar nileshnilesh /* compute the cloglog link * Args: * p: a scalar in (0, 1) * Returns: * a scalar in (-Inf, Inf) */ real cloglog(real p) { return log(-log1m(p)); } brms/inst/chunks/fun_horseshoe.stan0000644000175000017500000000121513701270367017362 0ustar nileshnilesh /* Efficient computation of the horseshoe prior * see Appendix C.1 in https://projecteuclid.org/euclid.ejs/1513306866 * Args: * z: standardized population-level coefficients * lambda: local shrinkage parameters * tau: global shrinkage parameter * c2: slap regularization parameter * Returns: * population-level coefficients following the horseshoe prior */ vector horseshoe(vector z, vector lambda, real tau, real c2) { int K = rows(z); vector[K] lambda2 = square(lambda); vector[K] lambda_tilde = sqrt(c2 * lambda2 ./ (c2 + tau^2 * lambda2)); return z .* lambda_tilde * tau; } brms/inst/chunks/fun_normal_lagsar.stan0000644000175000017500000000152414111751667020212 0ustar nileshnilesh /* normal log-pdf for spatially lagged responses * Args: * y: the response vector * mu: mean parameter vector * sigma: residual standard deviation * rho: positive autoregressive parameter * W: spatial weight matrix * eigenW: precomputed eigenvalues of W * Returns: * a scalar to be added to the log posterior */ real normal_lagsar_lpdf(vector y, vector mu, real sigma, real rho, data matrix W, data vector eigenW) { int N = rows(y); real inv_sigma2 = inv_square(sigma); matrix[N, N] W_tilde = add_diag(-rho * W, 1); vector[N] half_pred; real log_det; half_pred = W_tilde * y - mu; log_det = sum(log1m(rho * eigenW)); return 0.5 * N * log(inv_sigma2) + log_det - 0.5 * dot_self(half_pred) * inv_sigma2; } brms/inst/chunks/fun_com_poisson.stan0000644000175000017500000000774714105230573017725 0ustar nileshnilesh // log approximate normalizing constant of the COM poisson distribuion // approximation based on doi:10.1007/s10463-017-0629-6 // Args: see log_Z_com_poisson() real log_Z_com_poisson_approx(real log_mu, real nu) { real nu_mu = nu * exp(log_mu); real nu2 = nu^2; // first 4 terms of the residual series real log_sum_resid = log1p( nu_mu^(-1) * (nu2 - 1) / 24 + nu_mu^(-2) * (nu2 - 1) / 1152 * (nu2 + 23) + nu_mu^(-3) * (nu2 - 1) / 414720 * (5 * nu2^2 - 298 * nu2 + 11237) ); return nu_mu + log_sum_resid - ((log(2 * pi()) + log_mu) * (nu - 1) / 2 + log(nu) / 2); } // log normalizing constant of the COM Poisson distribution // implementation inspired by code of Ben Goodrich // improved following suggestions of Sebastian Weber (#892) // Args: // log_mu: log location parameter // shape: positive shape parameter real log_Z_com_poisson(real log_mu, real nu) { real log_Z; int k = 2; int M = 10000; int converged = 0; int num_terms = 50; if (nu == 1) { return exp(log_mu); } // nu == 0 or Inf will fail in this parameterization if (nu <= 0) { reject("nu must be positive"); } if (nu == positive_infinity()) { reject("nu must be finite"); } if (log_mu * nu >= log(1.5) && log_mu >= log(1.5)) { return log_Z_com_poisson_approx(log_mu, nu); } // direct computation of the truncated series // check if the Mth term of the series is small enough if (nu * (M * log_mu - lgamma(M + 1)) > -36.0) { reject("nu is too close to zero."); } // first 2 terms of the series log_Z = log1p_exp(nu * log_mu); while (converged == 0) { // adding terms in batches simplifies the AD tape vector[num_terms + 1] log_Z_terms; int i = 1; log_Z_terms[1] = log_Z; while (i <= num_terms) { log_Z_terms[i + 1] = nu * (k * log_mu - lgamma(k + 1)); k += 1; if (log_Z_terms[i + 1] <= -36.0) { converged = 1; break; } i += 1; } log_Z = log_sum_exp(log_Z_terms[1:(i + 1)]); } return log_Z; } // COM Poisson log-PMF for a single response (log parameterization) // Args: // y: the response value // log_mu: log location parameter // shape: positive shape parameter real com_poisson_log_lpmf(int y, real log_mu, real nu) { if (nu == 1) return poisson_log_lpmf(y | log_mu); return nu * (y * log_mu - lgamma(y + 1)) - log_Z_com_poisson(log_mu, nu); } // COM Poisson log-PMF for a single response real com_poisson_lpmf(int y, real mu, real nu) { if (nu == 1) return poisson_lpmf(y | mu); return com_poisson_log_lpmf(y | log(mu), nu); } // COM Poisson log-CDF for a single response real com_poisson_lcdf(int y, real mu, real nu) { real log_mu; real log_Z; // log denominator vector[y] log_num_terms; // terms of the log numerator if (nu == 1) { return poisson_lcdf(y | mu); } // nu == 0 or Inf will fail in this parameterization if (nu <= 0) { reject("nu must be positive"); } if (nu == positive_infinity()) { reject("nu must be finite"); } if (y > 10000) { reject("cannot handle y > 10000"); } log_mu = log(mu); if (nu * (y * log_mu - lgamma(y + 1)) <= -36.0) { // y is large enough for the CDF to be very close to 1; return 0; } log_Z = log_Z_com_poisson(log_mu, nu); if (y == 0) { return -log_Z; } // first 2 terms of the series log_num_terms[1] = log1p_exp(nu * log_mu); // remaining terms of the series until y for (k in 2:y) { log_num_terms[k] = nu * (k * log_mu - lgamma(k + 1)); } return log_sum_exp(log_num_terms) - log_Z; } // COM Poisson log-CCDF for a single response real com_poisson_lccdf(int y, real mu, real nu) { return log1m_exp(com_poisson_lcdf(y | mu, nu)); } brms/inst/chunks/fun_inv_gaussian.stan0000644000175000017500000000337314105230573020052 0ustar nileshnilesh /* inverse Gaussian log-PDF for a single response * Args: * y: the response value * mu: positive mean parameter * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real inv_gaussian_lpdf(real y, real mu, real shape) { return 0.5 * log(shape / (2 * pi())) - 1.5 * log(y) - 0.5 * shape * square((y - mu) / (mu * sqrt(y))); } /* vectorized inverse Gaussian log-PDF * Args: * y: response vector * mu: positive mean parameter vector * shape: positive shape parameter * Returns: * a scalar to be added to the log posterior */ real inv_gaussian_vector_lpdf(vector y, vector mu, real shape) { return 0.5 * rows(y) * log(shape / (2 * pi())) - 1.5 * sum(log(y)) - 0.5 * shape * dot_self((y - mu) ./ (mu .* sqrt(y))); } /* inverse Gaussian log-CDF for a single quantile * Args: * y: a quantile * mu: positive mean parameter * shape: positive shape parameter * Returns: * log(P(Y <= y)) */ real inv_gaussian_lcdf(real y, real mu, real shape) { return log(Phi(sqrt(shape) / sqrt(y) * (y / mu - 1)) + exp(2 * shape / mu) * Phi(-sqrt(shape) / sqrt(y) * (y / mu + 1))); } /* inverse Gaussian log-CCDF for a single quantile * Args: * y: a quantile * mu: positive mean parameter * shape: positive shape parameter * Returns: * log(P(Y > y)) */ real inv_gaussian_lccdf(real y, real mu, real shape) { return log1m(Phi(sqrt(shape) / sqrt(y) * (y / mu - 1)) - exp(2 * shape / mu) * Phi(-sqrt(shape) / sqrt(y) * (y / mu + 1))); } brms/inst/chunks/fun_r2d2.stan0000644000175000017500000000055114105230573016130 0ustar nileshnilesh /* Efficient computation of the R2D2 prior * Args: * z: standardized population-level coefficients * phi: local weight parameters * tau2: global scale parameter * Returns: * population-level coefficients following the R2D2 prior */ vector R2D2(vector z, vector phi, real tau2) { return z .* sqrt(phi * tau2); } brms/inst/doc/0000755000175000017500000000000014146747050013101 5ustar nileshnileshbrms/inst/doc/brms_overview.ltx0000644000175000017500000017607013701270370016526 0ustar nileshnilesh\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Overview of the brms Package} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{\pkg{brms}: An \proglang{R} Package for Bayesian Multilevel Models using \pkg{Stan}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{brms: An R Package for Bayesian Multilevel Models using Stan} %% without formatting \Shorttitle{\pkg{brms}: Bayesian Multilevel Models using Stan} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package implements Bayesian multilevel models in \proglang{R} using the probabilistic programming language \pkg{Stan}. A wide range of distributions and link functions are supported, allowing users to fit -- among others -- linear, robust linear, binomial, Poisson, survival, response times, ordinal, quantile, zero-inflated, hurdle, and even non-linear models all in a multilevel context. Further modeling options include autocorrelation of the response variable, user defined covariance structures, censored data, as well as meta-analytic standard errors. Prior specifications are flexible and explicitly encourage users to apply prior distributions that actually reflect their beliefs. In addition, model fit can easily be assessed and compared using posterior-predictive checks and leave-one-out cross-validation. If you use \pkg{brms}, please cite this article as published in the Journal of Statistical Software \citep{brms1}. } \Keywords{Bayesian inference, multilevel model, ordinal data, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer a great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow the modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for \proglang{R} \citep{Rcore2015} have been developed to fit MLMs. Possibly the most widely known package in this area is \pkg{lme4} \citep{bates2015}, which uses maximum likelihood or restricted maximum likelihood methods for model fitting. Although alternative Bayesian methods have several advantages over frequentist approaches (e.g., the possibility of explicitly incorporating prior knowledge about parameters into the model), their practical use was limited for a long time because the posterior distributions of more complex models (such as MLMs) could not be found analytically. Markov chain Monte Carlo (MCMC) algorithms allowing to draw random samples from the posterior were not available or too time-consuming. In the last few decades, however, this has changed with the development of new algorithms and the rapid increase of general computing power. Today, several software packages implement these techniques, for instance \pkg{WinBugs} \citep{lunn2000, spiegelhalter2003}, \pkg{OpenBugs} \citep{spiegelhalter2007}, \pkg{JAGS} \citep{plummer2013}, \pkg{MCMCglmm} \citep{hadfield2010} and \pkg{Stan} \citep{stan2017, carpenter2017} to mention only a few. With the exception of the latter, all of these programs are primarily using combinations of Metropolis-Hastings updates \citep{metropolis1953,hastings1970} and Gibbs-sampling \citep{geman1984,gelfand1990}, sometimes also coupled with slice-sampling \citep{damien1999,neal2003}. One of the main problems of these algorithms is their rather slow convergence for high-dimensional models with correlated parameters \citep{neal2011,hoffman2014,gelman2014}. Furthermore, Gibbs-sampling requires priors to be conjugate to the likelihood of parameters in order to work efficiently \citep{gelman2014}, thus reducing the freedom of the researcher in choosing a prior that reflects his or her beliefs. In contrast, \pkg{Stan} implements Hamiltonian Monte Carlo \citep{duane1987, neal2011} and its extension, the No-U-Turn Sampler (NUTS) \citep{hoffman2014}. These algorithms converge much more quickly especially for high-dimensional models regardless of whether the priors are conjugate or not \citep{hoffman2014}. Similar to software packages like \pkg{WinBugs}, \pkg{Stan} comes with its own programming language, allowing for great modeling flexibility (cf., \citeauthor{stanM2017} \citeyear{stanM2017}; \citeauthor{carpenter2017} \citeyear{carpenter2017}). Many researchers may still hesitate to use \pkg{Stan} directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error prone process even for researchers familiar with Bayesian inference. The package \pkg{brms}, presented in this paper, aims at closing this gap (at least for MLMs) allowing the user to benefit from the merits of \pkg{Stan} only by using simple, \pkg{lme4}-like formula syntax. \pkg{brms} supports a wide range of distributions and link functions, allows for multiple grouping factors each with multiple group-level effects, autocorrelation of the response variable, user defined covariance structures, as well as flexible and explicit prior specifications. The purpose of the present article is to provide a general overview of the \pkg{brms} package (version 0.10.0). We begin by explaining the underlying structure of MLMs. Next, the software is introduced in detail using recurrence times of infection in kidney patients \citep{mcgilchrist1991} and ratings of inhaler instructions \citep{ezzet1991} as examples. We end by comparing \pkg{brms} to other \proglang{R} packages implementing MLMs and describe future plans for extending the package. \section{Model description} \label{model} The core of every MLM is the prediction of the response $y$ through the linear combination $\eta$ of predictors transformed by the inverse link function $f$ assuming a certain distribution $D$ for $y$. We write $$y_i \sim D(f(\eta_i), \theta)$$ to stress the dependency on the $i\textsuperscript{th}$ data point. In many \proglang{R} packages, $D$ is also called the `family' and we will use this term in the following. The parameter $\theta$ describes additional family specific parameters that typically do not vary across data points, such as the standard deviation $\sigma$ in normal models or the shape $\alpha$ in Gamma or negative binomial models. The linear predictor can generally be written as $$\eta = \mathbf{X} \beta + \mathbf{Z} u$$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The response $y$ as well as $\mathbf{X}$ and $\mathbf{Z}$ make up the data, whereas $\beta$, $u$, and $\theta$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects. However, we avoid these terms in the present paper following the recommendations of \cite{gelmanMLM2006}, as they are not used unambiguously in the literature. Also, we want to make explicit that $u$ is a model parameter in the same manner as $\beta$ so that uncertainty in its estimates can be naturally evaluated. In fact, this is an important advantage of Bayesian MCMC methods as compared to maximum likelihood approaches, which do not treat $u$ as a parameter, but assume that it is part of the error term instead (cf., \citeauthor{fox2011}, \citeyear{fox2011}). Except for linear models, we do not incorporate an additional error term for every observation by default. If desired, such an error term can always be modeled using a grouping factor with as many levels as observations in the data. \subsection{Prior distributions} \subsubsection{Regression parameters at population-level} In \pkg{brms}, population-level parameters are not restricted to have normal priors. Instead, every parameter can have every one-dimensional prior implemented in \pkg{Stan}, for instance uniform, Cauchy or even Gamma priors. As a negative side effect of this flexibility, correlations between them cannot be modeled as parameters. If desired, point estimates of the correlations can be obtained after sampling has been done. By default, population level parameters have an improper flat prior over the reals. \subsubsection{Regression parameters at group-level} The group-level parameters $u$ are assumed to come from a multivariate normal distribution with mean zero and unknown covariance matrix $\mathbf{\Sigma}$: $$u \sim N(0, \mathbf{\Sigma})$$ As is generally the case, covariances between group-level parameters of different grouping factors are assumed to be zero. This implies that $\mathbf{Z}$ and $u$ can be split up into several matrices $\mathbf{Z_k}$ and parameter vectors $u_k$, where $k$ indexes grouping factors, so that the model can be simplified to $$u_k \sim N(0, \mathbf{\Sigma_k})$$ Usually, but not always, we can also assume group-level parameters associated with different levels (indexed by $j$) of the same grouping factor to be independent leading to $$u_{kj} \sim N(0, \mathbf{V_k})$$ The covariance matrices $\mathbf{V_k}$ are modeled as parameters. In most packages, an Inverse-Wishart distribution is used as a prior for $\mathbf{V_k}$. This is mostly because its conjugacy leads to good properties of Gibbs-Samplers \citep{gelman2014}. However, there are good arguments against the Inverse-Wishart prior \citep{natarajan2000, kass2006}. The NUTS-Sampler implemented in \pkg{Stan} does not require priors to be conjugate. This advantage is utilized in \pkg{brms}: $\mathbf{V_k}$ is parameterized in terms of a correlation matrix $\mathbf{\Omega_k}$ and a vector of standard deviations $\sigma_k$ through $$\mathbf{V_k} = \mathbf{D}(\sigma_k) \mathbf{\Omega_k} \mathbf{D}(\sigma_k)$$ where $\mathbf{D}(\sigma_k)$ denotes the diagonal matrix with diagonal elements $\sigma_k$. Priors are then specified for the parameters on the right hand side of the equation. For $\mathbf{\Omega_k}$, we use the LKJ-Correlation prior with parameter $\zeta > 0$ by \cite{lewandowski2009}\footnote{Internally, the Cholesky factor of the correlation matrix is used, as it is more efficient and numerically stable.}: $$\mathbf{\Omega_k} \sim \mathrm{LKJ}(\zeta)$$ The expected value of the LKJ-prior is the identity matrix (implying correlations of zero) for any positive value of $\zeta$, which can be interpreted like the shape parameter of a symmetric beta distribution \citep{stanM2017}. If $\zeta = 1$ (the default in \pkg{brms}) the density is uniform over correlation matrices of the respective dimension. If $\zeta > 1$, the identity matrix is the mode of the prior, with a sharper peak in the density for larger values of $\zeta$. If $0 < \zeta < 1$ the prior is U-shaped having a trough at the identity matrix, which leads to higher probabilities for non-zero correlations. For every element of $\sigma_k$, any prior can be applied that is defined on the non-negative reals only. As default in \pkg{brms}, we use a half Student-t prior with 3 degrees of freedom. This prior often leads to better convergence of the models than a half Cauchy prior, while still being relatively weakly informative. Sometimes -- for instance when modeling pedigrees -- different levels of the same grouping factor cannot be assumed to be independent. In this case, the covariance matrix of $u_k$ becomes $$\mathbf{\Sigma_k} = \mathbf{V_k} \otimes \mathbf{A_k}$$ where $\mathbf{A_k}$ is the known covariance matrix between levels and $\otimes$ is the Kronecker product. \subsubsection{Family specific parameters} For some families, additional parameters need to be estimated. In the current section, we only name the most important ones. Normal and Student's distributions need the parameter $\sigma$ to account for residual error variance. By default, $\sigma$ has a half Cauchy prior with a scale parameter that depends on the standard deviation of the response variable to remain only weakly informative regardless of response variable's scaling. Furthermore, Student's distributions needs the parameter $\nu$ representing the degrees of freedom. By default, $\nu$ has a wide gamma prior as proposed by \cite{juarez2010}. Gamma, Weibull, and negative binomial distributions need the shape parameter $\alpha$ that also has a wide gamma prior by default. \section{Parameter estimation} The \pkg{brms} package does not fit models itself but uses \pkg{Stan} on the back-end. Accordingly, all samplers implemented in \pkg{Stan} can be used to fit \pkg{brms} models. Currently, these are the static Hamiltonian Monte-Carlo (HMC) Sampler sometimes also referred to as Hybrid Monte-Carlo \citep{neal2011, neal2003, duane1987} and its extension the No-U-Turn Sampler (NUTS) by \cite{hoffman2014}. HMC-like algorithms produce samples that are much less autocorrelated than those of other samplers such as the random-walk Metropolis algorithm \citep{hoffman2014, Creutz1988}. The main drawback of this increased efficiency is the need to calculate the gradient of the log-posterior, which can be automated using algorithmic differentiation \citep{griewank2008} but is still a time-consuming process for more complex models. Thus, using HMC leads to higher quality samples but takes more time per sample than other algorithms typically applied. Another drawback of HMC is the need to pre-specify at least two parameters, which are both critical for the performance of HMC. The NUTS Sampler allows setting these parameters automatically thus eliminating the need for any hand-tuning, while still being at least as efficient as a well tuned HMC \citep{hoffman2014}. For more details on the sampling algorithms applied in \pkg{Stan}, see the \pkg{Stan} user's manual \citep{stanM2017} as well as \cite{hoffman2014}. In addition to the estimation of model parameters, \pkg{brms} allows drawing samples from the posterior predictive distribution as well as from the pointwise log-likelihood. Both can be used to assess model fit. The former allows a comparison between the actual response $y$ and the response $\hat{y}$ predicted by the model. The pointwise log-likelihood can be used, among others, to calculate the widely applicable information criterion (WAIC) proposed by \cite{watanabe2010} and leave-one-out cross-validation (LOO; \citealp{gelfand1992}; \citealp{vehtari2015}; see also \citealp{ionides2008}) both allowing to compare different models applied to the same data (lower WAICs and LOOs indicate better model fit). The WAIC can be viewed as an improvement of the popular deviance information criterion (DIC), which has been criticized by several authors (\citealp{vehtari2015}; \citealp{plummer2008}; \citealp{vanderlinde2005}; see also the discussion at the end of the original DIC paper by \citealp{spiegelhalter2002}) in part because of problems arising from fact that the DIC is only a point estimate. In \pkg{brms}, WAIC and LOO are implemented using the \pkg{loo} package \citep{loo2016} also following the recommendations of \cite{vehtari2015}. \section{Software} \label{software} The \pkg{brms} package provides functions for fitting MLMs using \pkg{Stan} for full Bayesian inference. To install the latest release version of \pkg{brms} from CRAN, type \code{install.packages("brms")} within \proglang{R}. The current developmental version can be downloaded from GitHub via \begin{Sinput} devtools::install_github("paul-buerkner/brms") \end{Sinput} Additionally, a \proglang{C++} compiler is required. This is because \pkg{brms} internally creates \pkg{Stan} code, which is translated to \proglang{C++} and compiled afterwards. The program \pkg{Rtools} \citep{Rtools2015} comes with a \proglang{C++} compiler for Windows\footnote{During the installation process, there is an option to change the system \code{PATH}. Please make sure to check this options, because otherwise \pkg{Rtools} will not be available within \proglang{R}.}. On OS X, one should use \pkg{Xcode} \citep{Xcode2015} from the App Store. To check whether the compiler can be called within \proglang{R}, run \code{system("g++ -v")} when using \pkg{Rtools} or \code{system("clang++ -v")} when using \pkg{Xcode}. If no warning occurs and a few lines of difficult to read system code are printed out, the compiler should work correctly. For more detailed instructions on how to get the compilers running, see the prerequisites section on \url{https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started}. Models are fitted in \pkg{brms} using the following procedure, which is also summarized in Figure~\ref{flowchart}. First, the user specifies the model using the \code{brm} function in a way typical for most model fitting \proglang{R} functions, that is by defining \code{formula}, \code{data}, and \code{family}, as well as some other optional arguments. Second, this information is processed and the \code{make_stancode} and \code{make_standata} functions are called. The former generates the model code in \pkg{Stan} language and the latter prepares the data for use in \pkg{Stan}. These two are the mandatory parts of every \pkg{Stan} model and without \pkg{brms}, users would have to specify them themselves. Third, \pkg{Stan} code and data as well as additional arguments (such as the number of iterations and chains) are passed to functions of the \pkg{rstan} package (the \proglang{R} interface of \pkg{Stan}; \citeauthor{stan2017}, \citeyear{stan2017}). Fourth, the model is fitted by \pkg{Stan} after translating and compiling it in \proglang{C++}. Fifth, after the model has been fitted and returned by \pkg{rstan}, the fitted model object is post-processed in \pkg{brms} among others by renaming the model parameters to be understood by the user. Sixth, the results can be investigated in \proglang{R} using various methods such as \code{summary}, \code{plot}, or \code{predict} (for a complete list of methods type \code{methods(class = "brmsfit")}). \begin{figure}[ht] \centering \includegraphics[height = 0.4\textheight, keepaspectratio]{flowchart.pdf} \caption{High level description of the model fitting procedure used in \pkg{brms}.} \label{flowchart} \end{figure} \subsection{A worked example} In the following, we use an example about the recurrence time of an infection in kidney patients initially published by \cite{mcgilchrist1991}. The data set consists of 76 entries of 7 variables: \begin{Sinput} R> library("brms") R> data("kidney") R> head(kidney, n = 3) \end{Sinput} \begin{Soutput} time censored patient recur age sex disease 1 8 0 1 1 28 male other 2 23 0 2 1 48 female GN 3 22 0 3 1 32 male other \end{Soutput} Variable \code{time} represents the recurrence time of the infection, \code{censored} indicates if \code{time} is right censored (\code{1}) or not censored (\code{0}), variable \code{patient} is the patient id, and \code{recur} indicates if it is the first or second recurrence in that patient. Finally, variables \code{age}, \code{sex}, and \code{disease} make up the predictors. \subsection[Fitting models with brms]{Fitting models with \pkg{brms}} The core of the \pkg{brms} package is the \code{brm} function and we will explain its argument structure using the example above. Suppose we want to predict the (possibly censored) recurrence time using a log-normal model, in which the intercept as well as the effect of \code{age} is nested within patients. Then, we may use the following code: \begin{Sinput} fit1 <- brm(formula = time | cens(censored) ~ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal(), prior = c(set_prior("normal(0,5)", class = "b"), set_prior("cauchy(0,2)", class = "sd"), set_prior("lkj(2)", class = "cor")), warmup = 1000, iter = 2000, chains = 4, control = list(adapt_delta = 0.95)) \end{Sinput} \subsection[formula: Information on the response and predictors]{\code{formula}: Information on the response and predictors} Without doubt, \code{formula} is the most complicated argument, as it contains information on the response variable as well as on predictors at different levels of the model. Everything before the $\sim$ sign relates to the response part of \code{formula}. In the usual and most simple case, this is just one variable name (e.g., \code{time}). However, to incorporate additional information about the response, one can add one or more terms of the form \code{| fun(variable)}. \code{fun} may be one of a few functions defined internally in \pkg{brms} and \code{variable} corresponds to a variable in the data set supplied by the user. In this example, \code{cens} makes up the internal function that handles censored data, and \code{censored} is the variable that contains information on the censoring. Other available functions in this context are \code{weights} and \code{disp} to allow different sorts of weighting, \code{se} to specify known standard errors primarily for meta-analysis, \code{trunc} to define truncation boundaries, \code{trials} for binomial models\footnote{In functions such as \code{glm} or \code{glmer}, the binomial response is typically passed as \code{cbind(success, failure)}. In \pkg{brms}, the equivalent syntax is \code{success | trials(success + failure)}.}, and \code{cat} to specify the number of categories for ordinal models. Everything on the right side of $\sim$ specifies predictors. Here, the syntax exactly matches that of \pkg{lme4}. For both, population-level and group-level terms, the \code{+} is used to separate different effects from each other. Group-level terms are of the form \code{(coefs | group)}, where \code{coefs} contains one or more variables whose effects are assumed to vary with the levels of the grouping factor given in \code{group}. Multiple grouping factors each with multiple group-level coefficients are possible. In the present example, only one group-level term is specified in which \code{1 + age} are the coefficients varying with the grouping factor \code{patient}. This implies that the intercept of the model as well as the effect of age is supposed to vary between patients. By default, group-level coefficients within a grouping factor are assumed to be correlated. Correlations can be set to zero by using the \code{(coefs || group)} syntax\footnote{In contrast to \pkg{lme4}, the \code{||} operator in \pkg{brms} splits up the design matrix computed from \code{coefs} instead of decomposing \code{coefs} in its terms. This implies that columns of the design matrix originating from the same factor are also assumed to be uncorrelated, whereas \pkg{lme4} estimates the correlations in this case. For a way to achieve \pkg{brms}-like behavior with \pkg{lme4}, see the \code{mixed} function of the \pkg{afex} package by \cite{afex2015}.}. Everything on the right side of \code{formula} that is not recognized as part of a group-level term is treated as a population-level effect. In this example, the population-level effects are \code{age}, \code{sex}, and \code{disease}. \subsection[family: Distribution of the response variable]{\code{family}: Distribution of the response variable} Argument \code{family} should usually be a family function, a call to a family function or a character string naming the family. If not otherwise specified, default link functions are applied. \pkg{brms} comes with a large variety of families. Linear and robust linear regression can be performed using the \code{gaussian} or \code{student} family combined with the \code{identity} link. For dichotomous and categorical data, families \code{bernoulli}, \code{binomial}, and \code{categorical} combined with the \code{logit} link, by default, are perfectly suited. Families \code{poisson}, \code{negbinomial}, and \code{geometric} allow for modeling count data. Families \code{lognormal}, \code{Gamma}, \code{exponential}, and \code{weibull} can be used (among others) for survival regression. Ordinal regression can be performed using the families \code{cumulative}, \code{cratio}, \code{sratio}, and \code{acat}. Finally, families \code{zero_inflated_poisson}, \code{zero_inflated_negbinomial}, \code{zero_inflated_binomial}, \code{zero_inflated_beta}, \code{hurdle_poisson}, \code{hurdle_negbinomial}, and \code{hurdle_gamma} can be used to adequately model excess zeros in the response. In our example, we use \code{family = lognormal()} implying a log-normal ``survival'' model for the response variable \code{time}. \subsection[prior: Prior distributions of model parameters]{\code{prior}: Prior distributions of model parameters} Every population-level effect has its corresponding regression parameter. These parameters are named as \code{b\_}, where \code{} represents the name of the corresponding population-level effect. The default prior is an improper flat prior over the reals. Suppose, for instance, that we want to set a normal prior with mean \code{0} and standard deviation \code{10} on the effect of \code{age} and a Cauchy prior with location \code{1} and scale \code{2} on \code{sexfemale}\footnote{When factors are used as predictors, parameter names will depend on the factor levels. To get an overview of all parameters and parameter classes for which priors can be specified, use function \code{get\_prior}. For the present example, \code{get\_prior(time | cens(censored) $\sim$ age * sex + disease + (1 + age|patient), data = kidney, family = lognormal())} does the desired.}. Then, we may write \begin{Sinput} prior <- c(set_prior("normal(0,10)", class = "b", coef = "age"), set_prior("cauchy(1,2)", class = "b", coef = "sexfemale")) \end{Sinput} To put the same prior (e.g., a normal prior) on all population-level effects at once, we may write as a shortcut \code{set_prior("normal(0,10)", class = "b")}. This also leads to faster sampling, because priors can be vectorized in this case. Note that we could also omit the \code{class} argument for population-level effects, as it is the default class in \code{set_prior}. A special shrinkage prior to be applied on population-level effects is the horseshoe prior \citep{carvalho2009, carvalho2010}. It is symmetric around zero with fat tails and an infinitely large spike at zero. This makes it ideal for sparse models that have many regression coefficients, although only a minority of them is non-zero. The horseshoe prior can be applied on all population-level effects at once (excluding the intercept) by using \code{set_prior("horseshoe(1)")}. The $1$ implies that the Student-$t$ prior of the local shrinkage parameters has 1 degrees of freedom. In \pkg{brms} it is possible to increase the degrees of freedom (which will often improve convergence), although the prior no longer resembles a horseshoe in this case\footnote{This class of priors is often referred to as hierarchical shrinkage family, which contains the original horseshoe prior as a special case.}. For more details see \cite{carvalho2009, carvalho2010}. Each group-level effect of each grouping factor has a standard deviation parameter, which is restricted to be non-negative and, by default, has a half Student-$t$ prior with $3$ degrees of freedom and a scale parameter that is minimally $10$. For non-ordinal models, \pkg{brms} tries to evaluate if the scale is large enough to be considered only weakly informative for the model at hand by comparing it with the standard deviation of the response after applying the link function. If this is not the case, it will increase the scale based on the aforementioned standard deviation\footnote{Changing priors based on the data is not truly Bayesian and might rightly be criticized. However, it helps avoiding the problem of too informative default priors without always forcing users to define their own priors. The latter would also be problematic as not all users can be expected to be well educated Bayesians and reasonable default priors will help them a lot in using Bayesian methods.}. \pkg{Stan} implicitly defines a half Student-$t$ prior by using a Student-$t$ prior on a restricted parameter \citep{stanM2017}. For other reasonable priors on standard deviations see \cite{gelman2006}. In \pkg{brms}, standard deviation parameters are named as \code{sd\_\_} so that \code{sd\_patient\_Intercept} and \code{sd\_patient\_age} are the parameter names in the example. If desired, it is possible to set a different prior on each parameter, but statements such as \code{set_prior("student_t(3,0,5)", class = "sd", group = "patient")} or even \code{set_prior("student_t(3,0,5)", class = "sd")} may also be used and are again faster because of vectorization. If there is more than one group-level effect per grouping factor, correlations between group-level effects are estimated. As mentioned in Section~\ref{model}, the LKJ-Correlation prior with parameter $\zeta > 0$ \citep{lewandowski2009} is used for this purpose. In \pkg{brms}, this prior is abbreviated as \code{"lkj(zeta)"} and correlation matrix parameters are named as \code{cor\_}, (e.g., \code{cor_patient}), so that \code{set_prior("lkj(2)", class = "cor", group = "patient")} is a valid statement. To set the same prior on every correlation matrix in the model, \code{set_prior("lkj(2)", class = "cor")} is also allowed, but does not come with any efficiency increases. Other model parameters such as the residual standard deviation \code{sigma} in normal models or the \code{shape} in Gamma models have their priors defined in the same way, where each of them is treated as having its own parameter class. A complete overview on possible prior distributions is given in the \pkg{Stan} user's manual \citep{stanM2017}. Note that \pkg{brms} does not thoroughly check if the priors are written in correct \pkg{Stan} language. Instead, \pkg{Stan} will check their syntactical correctness when the model is parsed to \proglang{C++} and return an error if they are not. This, however, does not imply that priors are always meaningful if they are accepted by \pkg{Stan}. Although \pkg{brms} tries to find common problems (e.g., setting bounded priors on unbounded parameters), there is no guarantee that the defined priors are reasonable for the model. \subsection[control Adjusting the sampling behavior of Stan]{\code{control}: Adjusting the sampling behavior of \pkg{Stan}} In addition to choosing the number of iterations, warmup samples, and chains, users can control the behavior of the NUTS sampler by using the \code{control} argument. The most important reason to use \code{control} is to decrease (or eliminate at best) the number of divergent transitions that cause a bias in the obtained posterior samples. Whenever you see the warning \code{"There were x divergent transitions after warmup."}, you should really think about increasing \code{adapt_delta}. To do this, write \code{control = list(adapt_delta = )}, where \code{} should usually be a value between \code{0.8} (current default) and \code{1}. Increasing \code{adapt_delta} will slow down the sampler but will decrease the number of divergent transitions threatening the validity of your posterior samples. Another problem arises when the depth of the tree being evaluated in each iteration is exceeded. This is less common than having divergent transitions, but may also bias the posterior samples. When it happens, \pkg{Stan} will throw out a warning suggesting to increase \code{max_treedepth}, which can be accomplished by writing \code{control = list(max_treedepth = )} with a positive integer \code{} that should usually be larger than the current default of \code{10}. \subsection{Analyzing the results} The example model \code{fit1} is fitted using 4 chains, each with 2000 iterations of which the first 1000 are warmup to calibrate the sampler, leading to a total of 4000 posterior samples\footnote{To save time, chains may also run in parallel when using argument \code{cluster}.}. For researchers familiar with Gibbs or Metropolis-Hastings sampling, this number may seem far too small to achieve good convergence and reasonable results, especially for multilevel models. However, as \pkg{brms} utilizes the NUTS sampler \citep{hoffman2014} implemented in \pkg{Stan}, even complex models can often be fitted with not more than a few thousand samples. Of course, every iteration is more computationally intensive and time-consuming than the iterations of other algorithms, but the quality of the samples (i.e., the effective sample size per iteration) is usually higher. After the posterior samples have been computed, the \code{brm} function returns an \proglang{R} object, containing (among others) the fully commented model code in \pkg{Stan} language, the data to fit the model, and the posterior samples themselves. The model code and data for the present example can be extracted through \code{stancode(fit1)} and \code{standata(fit1)} respectively\footnote{Both model code and data may be amended and used to fit new models. That way, \pkg{brms} can also serve as a good starting point in building more complicated models in \pkg{Stan}, directly.}. A model summary is readily available using \begin{Sinput} R> summary(fit1, waic = TRUE) \end{Sinput} \begin{Soutput} Family: lognormal (identity) Formula: time | cens(censored) ~ age * sex + disease + (1 + age | patient) Data: kidney (Number of observations: 76) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 673.51 Group-Level Effects: ~patient (Number of levels: 38) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.40 0.28 0.01 1.01 1731 1 sd(age) 0.01 0.01 0.00 0.02 1137 1 cor(Intercept,age) -0.13 0.46 -0.88 0.76 3159 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 2.73 0.96 0.82 4.68 2139 1 age 0.01 0.02 -0.03 0.06 1614 1 sexfemale 2.42 1.13 0.15 4.64 2065 1 diseaseGN -0.40 0.53 -1.45 0.64 2664 1 diseaseAN -0.52 0.50 -1.48 0.48 2713 1 diseasePKD 0.60 0.74 -0.86 2.02 2968 1 age:sexfemale -0.02 0.03 -0.07 0.03 1956 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.15 0.13 0.91 1.44 4000 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} On the top of the output, some general information on the model is given, such as family, formula, number of iterations and chains, as well as the WAIC. Next, group-level effects are displayed separately for each grouping factor in terms of standard deviations and correlations between group-level effects. On the bottom of the output, population-level effects are displayed. If incorporated, autocorrelation and family specific parameters (e.g., the residual standard deviation \code{sigma}) are also given. In general, every parameter is summarized using the mean (\code{Estimate}) and the standard deviation (\code{Est.Error}) of the posterior distribution as well as two-sided 95\% Credible intervals (\code{l-95\% CI} and \code{u-95\% CI}) based on quantiles. The \code{Eff.Sample} value is an estimation of the effective sample size; that is the number of independent samples from the posterior distribution that would be expected to yield the same standard error of the posterior mean as is obtained from the dependent samples returned by the MCMC algorithm. The \code{Rhat} value provides information on the convergence of the algorithm (cf., \citeauthor{gelman1992}, \citeyear{gelman1992}). If \code{Rhat} is considerably greater than 1 (i.e., $> 1.1$), the chains have not yet converged and it is necessary to run more iterations and/or set stronger priors. To visually investigate the chains as well as the posterior distribution, the \code{plot} method can be used (see Figure~\ref{kidney_plot}). An even more detailed investigation can be achieved by applying the \pkg{shinystan} package \citep{gabry2015} through method \code{launch_shiny}. With respect to the above summary, \code{sexfemale} seems to be the only population-level effect with considerable influence on the response. Because the mean of \code{sexfemale} is positive, the model predicts longer periods without an infection for females than for males. Effects of population-level predictors can also be visualized with the \code{conditional_effects} method (see Figure~\ref{kidney_conditional_effects}). \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{kidney_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the kidney model discussed in Section~\ref{software}.} \label{kidney_plot} \end{figure} \begin{figure}[ht] \centering \includegraphics[height=0.90\textheight]{kidney_conditional_effects.pdf} \caption{Conditional effects plots of all population-level predictors of the kidney model discussed in Section~\ref{software}.} \label{kidney_conditional_effects} \end{figure} Looking at the group-level effects, the standard deviation parameter of \code{age} is suspiciously small. To test whether it is smaller than the standard deviation parameter of \code{Intercept}, we apply the \code{hypothesis} method: \begin{Sinput} R> hypothesis(fit1, "Intercept - age > 0", class = "sd", group = "patient") \end{Sinput} \begin{Soutput} Hypothesis Tests for class sd_patient: Estimate Est.Error l-95% CI u-95% CI Evid.Ratio Intercept-age > 0 0.39 0.27 0.03 Inf 67.97 * --- '*': The expected value under the hypothesis lies outside the 95% CI. \end{Soutput} The one-sided 95\% credibility interval does not contain zero, thus indicating that the standard deviations differ from each other in the expected direction. In accordance with this finding, the \code{Evid.Ratio} shows that the hypothesis being tested (i.e., \code{Intercept - age > 0}) is about $68$ times more likely than the alternative hypothesis \code{Intercept - age < 0}. It is important to note that this kind of comparison is not easily possible when applying frequentist methods, because in this case only point estimates are available for group-level standard deviations and correlations. When looking at the correlation between both group-level effects, its distribution displayed in Figure~\ref{kidney_plot} and the 95\% credibility interval in the summary output appear to be rather wide. This indicates that there is not enough evidence in the data to reasonably estimate the correlation. Together, the small standard deviation of \code{age} and the uncertainty in the correlation raise the question if \code{age} should be modeled as a group specific term at all. To answer this question, we fit another model without this term: \begin{Sinput} R> fit2 <- update(fit1, formula. = ~ . - (1 + age|patient) + (1|patient)) \end{Sinput} A good way to compare both models is leave-one-out cross-validation (LOO)\footnote{The WAIC is an approximation of LOO that is faster and easier to compute. However, according to \cite{vehtari2015}, LOO may be the preferred method to perform model comparisons.}, which can be called in \pkg{brms} using \begin{Sinput} R> LOO(fit1, fit2) \end{Sinput} \begin{Soutput} LOOIC SE fit1 675.45 45.18 fit2 674.17 45.06 fit1 - fit2 1.28 0.99 \end{Soutput} In the output, the LOO information criterion for each model as well as the difference of the LOOs each with its corresponding standard error is shown. Both LOO and WAIC are approximately normal if the number of observations is large so that the standard errors can be very helpful in evaluating differences in the information criteria. However, for small sample sizes, standard errors should be interpreted with care \citep{vehtari2015}. For the present example, it is immediately evident that both models have very similar fit, indicating that there is little benefit in adding group specific coefficients for \code{age}. \subsection{Modeling ordinal data} In the following, we want to briefly discuss a second example to demonstrate the capabilities of \pkg{brms} in handling ordinal data. \cite{ezzet1991} analyze data from a two-treatment, two-period crossover trial to compare 2 inhalation devices for delivering the drug salbutamol in 286 asthma patients. Patients were asked to rate the clarity of leaflet instructions accompanying each device, using a four-point ordinal scale. Ratings are predicted by \code{treat} to indicate which of the two inhaler devices was used, \code{period} to indicate the time of administration, and \code{carry} to model possible carry over effects. \begin{Sinput} R> data("inhaler") R> head(inhaler, n = 1) \end{Sinput} \begin{Soutput} subject rating treat period carry 1 1 1 0.5 0.5 0 \end{Soutput} Typically, the ordinal response is assumed to originate from the categorization of a latent continuous variable. That is there are $K$ latent thresholds (model intercepts), which partition the continuous scale into the $K + 1$ observable, ordered categories. Following this approach leads to the cumulative or graded-response model \citep{samejima1969} for ordinal data implemented in many \proglang{R} packages. In \pkg{brms}, it is available via family \code{cumulative}. Fitting the cumulative model to the inhaler data, also incorporating an intercept varying by subjects, may look this: \begin{Sinput} fit3 <- brm(formula = rating ~ treat + period + carry + (1|subject), data = inhaler, family = cumulative) \end{Sinput} While the support for ordinal data in most \proglang{R} packages ends here\footnote{Exceptions known to us are the packages \pkg{ordinal} \citep{christensen2015} and \pkg{VGAM} \citep{yee2010}. The former supports only cumulative models but with different modeling option for the thresholds. The latter supports all four ordinal families also implemented in \pkg{brms} as well as category specific effects but no group-specific effects.}, \pkg{brms} allows changes to this basic model in at least three ways. First of all, three additional ordinal families are implemented. Families \code{sratio} (stopping ratio) and \code{cratio} (continuation ratio) are so called sequential models \citep{tutz1990}. Both are equivalent to each other for symmetric link functions such as \code{logit} but will differ for asymmetric ones such as \code{cloglog}. The fourth ordinal family is \code{acat} (adjacent category) also known as partial credits model \citep{masters1982, andrich1978a}. Second, restrictions to the thresholds can be applied. By default, thresholds are ordered for family \code{cumulative} or are completely free to vary for the other families. This is indicated by argument \code{threshold = "flexible"} (default) in \code{brm}. Using \code{threshold = "equidistant"} forces the distance between two adjacent thresholds to be the same, that is $$\tau_k = \tau_1 + (k-1)\delta$$ for thresholds $\tau_k$ and distance $\delta$ (see also \citealp{andrich1978b}; \citealp{andrich1978a}; \citealp{andersen1977}). Third, the assumption that predictors have constant effects across categories may be relaxed for non-cumulative ordinal models \citep{vanderark2001, tutz2000} leading to category specific effects. For instance, variable \code{treat} may only have an impact on the decision between category 3 and 4, but not on the lower categories. Without using category specific effects, such a pattern would remain invisible. To illustrate all three modeling options at once, we fit a (hardly theoretically justified) stopping ratio model with equidistant thresholds and category specific effects for variable \code{treat} on which we apply an informative prior. \begin{Sinput} fit4 <- brm(formula = rating ~ period + carry + cs(treat) + (1|subject), data = inhaler, family = sratio, threshold = "equidistant", prior = set_prior("normal(-1,2)", coef = "treat")) \end{Sinput} Note that priors are defined on category specific effects in the same way as for other population-level effects. A model summary can be obtained in the same way as before: \begin{Sinput} R> summary(fit4, waic = TRUE) \end{Sinput} \begin{Soutput} Family: sratio (logit) Formula: rating ~ period + carry + cs(treat) + (1 | subject) Data: inhaler (Number of observations: 572) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: 911.9 Group-Level Effects: ~subject (Number of levels: 286) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 1.05 0.23 0.56 1.5 648 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept[1] 0.72 0.13 0.48 0.99 2048 1 Intercept[2] 2.67 0.35 2.00 3.39 969 1 Intercept[3] 4.62 0.66 3.36 5.95 1037 1 period 0.25 0.18 -0.09 0.61 4000 1 carry -0.26 0.22 -0.70 0.17 1874 1 treat[1] -0.96 0.30 -1.56 -0.40 1385 1 treat[2] -0.65 0.49 -1.60 0.27 4000 1 treat[3] -2.65 1.21 -5.00 -0.29 4000 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat delta 1.95 0.32 1.33 2.6 1181 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Soutput} Trace and density plots of the model parameters as produced by \code{plot(fit4)} can be found in Figure~\ref{inhaler_plot}. We see that three intercepts (thresholds) and three effects of \code{treat} have been estimated, because a four-point scale was used for the ratings. The treatment effect seems to be strongest between category 3 and 4. At the same time, however, the credible interval is also much larger. In fact, the intervals of all three effects of \code{treat} are highly overlapping, which indicates that there is not enough evidence in the data to support category specific effects. On the bottom of the output, parameter \code{delta} specifies the distance between two adjacent thresholds and indeed the intercepts differ from each other by the magnitude of \code{delta}. \begin{figure}[ht] \centering \includegraphics[width=0.95\textwidth]{inhaler_plot.pdf} \caption{Trace and Density plots of all relevant parameters of the inhaler model discussed in Section~\ref{software}.} \label{inhaler_plot} \end{figure} \section[Comparison]{Comparison between packages} Over the years, many \proglang{R} packages have been developed that implement MLMs, each being more or less general in their supported models. Comparing all of them to \pkg{brms} would be too extensive and barely helpful for the purpose of the present paper. Accordingly, we concentrate on a comparison with four packages. These are \pkg{lme4} \citep{bates2015} and \pkg{MCMCglmm} \citep{hadfield2010}, which are possibly the most general and widely applied \proglang{R} packages for MLMs, as well as \pkg{rstanarm} \citep{rstanarm2016} and \pkg{rethinking} \citep{mcelreath2016}, which are both based on \pkg{Stan}. As opposed to the other packages, \pkg{rethinking} was primarily written for teaching purposes and requires the user to specify the full model explicitly using its own simplified \pkg{BUGS}-like syntax thus helping users to better understand the models that are fitted to their data. Regarding model families, all five packages support the most common types such as linear and binomial models as well as Poisson models for count data. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. In addition, \pkg{brms} supports robust linear regression using Student's distribution, which is also implemented on a GitHub branch of \pkg{rstanarm}. \pkg{MCMCglmm} allows fitting multinomial models that are currently not available in the other packages. Generalizing classical MLMs, \pkg{brms} and \pkg{MCMCglmm} allow fiting zero-inflated and hurdle models dealing with excess zeros in the response. Furthermore, \pkg{brms} supports non-linear models similar to the \pkg{nlme} package \citep{nlme2016} providing great flexibility but also requiring more care to produce reasonable results. Another flexible model class are generalized additive mixed models \citep{hastie1990,wood2011,zuur2014}, which can be fitted with \pkg{brms} and \pkg{rstanarm}. In all five packages, there are quite a few additional modeling options. Variable link functions can be specified in all packages except for \pkg{MCMCglmm}, in which only one link is available per family. \pkg{MCMCglmm} generally supports multivariate responses using data in wide format, whereas \pkg{brms} currently only offers this option for families \code{gaussian} and \code{student}. It should be noted that it is always possible to transform data from wide to long format for compatibility with the other packages. Autocorrelation of the response can only be fitted in \pkg{brms}, which supports auto-regressive as well as moving-average effects. For ordinal models in \pkg{brms}, effects of predictors may vary across different levels of the response as explained in the inhaler example. A feature currently exclusive to \pkg{rethinking} is the possibility to impute missing values in the predictor variables. Information criteria are available in all three packages. The advantage of WAIC and LOO implemented in \pkg{brms}, \pkg{rstanarm}, and \pkg{rethinking} is that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the Bayesian packages, \pkg{brms} and \pkg{rethinking} offer a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, we believe that the way priors are specified in \pkg{brms} and \pkg{rethinking} is more intuitive as it is directly evident what prior is actually applied. A more detailed comparison of the packages can be found in Table~\ref{comparison1} and Table~\ref{comparison2}. To facilitate the understanding of the model formulation in \pkg{brms}, Table~\ref{syntax} shows \pkg{lme4} function calls to fit sample models along with the equivalent \pkg{brms} syntax. So far the focus was only on capabilities. Another important topic is speed, especially for more complex models. Of course, \pkg{lme4} is usually much faster than the other packages as it uses maximum likelihood methods instead of MCMC algorithms, which are slower by design. To compare the efficiency of the four Bayesian packages, we fitted multilevel models on real data sets using the minimum effective sample size divided by sampling time as a measure of sampling efficiency. One should always aim at running multiple chains as one cannot be sure that a single chain really explores the whole posterior distribution. However, as \pkg{MCMCglmm} does not come with a built-in option to run multiple chains, we used only a single chain to fit the models after making sure that it leads to the same results as multiple chains. The \proglang{R} code allowing to replicate the results is available as supplemental material. The first thing that becomes obvious when fitting the models is that \pkg{brms} and \pkg{rethinking} need to compile the \proglang{C++} model before actually fitting it, because the \pkg{Stan} code being parsed to \proglang{C++} is generated on the fly based on the user's input. Compilation takes about a half to one minute depending on the model complexity and computing power of the machine. This is not required by \pkg{rstanarm} and \pkg{MCMCglmm}, although the former is also based on \pkg{Stan}, as compilation takes place only once at installation time. While the latter approach saves the compilation time, the former is more flexible when it comes to model specification. For small and simple models, compilation time dominates the overall computation time, but for larger and more complex models, sampling will take several minutes or hours so that one minute more or less will not really matter, anymore. Accordingly, the following comparisons do not include the compilation time. In models containing only group-specific intercepts, \pkg{MCMCglmm} is usually more efficient than the \pkg{Stan} packages. However, when also estimating group-specific slopes, \pkg{MCMCglmm} falls behind the other packages and quite often refuses to sample at all unless one carefully specifies informative priors. Note that these results are obtained by running only a single chain. For all three \pkg{Stan} packages, sampling efficiency can easily be increased by running multiple chains in parallel. Comparing the \pkg{Stan} packages to each other, \pkg{brms} is usually most efficient for models with group-specific terms, whereas \pkg{rstanarm} tends to be roughly $50\%$ to $75\%$ as efficient at least for the analyzed data sets. The efficiency of \pkg{rethinking} is more variable depending on the model formulation and data, sometimes being slightly ahead of the other two packages, but usually being considerably less efficient. Generally, \pkg{rethinking} loses efficiency for models with many population-level effects presumably because one cannot use design matrices and vectorized prior specifications for population-level parameters. Note that it was not possible to specify the exact same priors across packages due to varying parameterizations. Of course, efficiency depends heavily on the model, chosen priors, and data at hand so that the present results should not be over-interpreted. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{lme4}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & yes \\ Multinomial models & no & no & yes \\ Count data models & yes & yes & yes \\ Survival models & yes$^1$ & yes & yes \\ Ordinal models & various & no & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ Generalized additive models & yes & no & no \\ Non-linear models & yes & no & no \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Multivariate responses & limited & no & yes \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & yes \\ Censored data & yes & no & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & -- & no \\ population-level priors & flexible & --$^3$ & normal \\ group-level priors & normal & --$^3$ & normal \\ covariance priors & flexible & --$^3$ & restricted$^4$ \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & ML, REML & MH, Gibbs$^2$ \\ Information criterion & WAIC, LOO & AIC, BIC & DIC \\ \proglang{C++} compiler required & yes & no & no \\ Modularized & no & yes & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{lme4} and \pkg{MCMCglmm} package. Notes: (1) Weibull family only available in \pkg{brms}. (2) Estimator consists of a combination of both algorithms. (3) Priors may be imposed using the \pkg{blme} package \citep{chung2013}. (4) For details see \cite{hadfield2010}.} \label{comparison1} \end{table} \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{rethinking}} \\ \hline \\ [-1.5ex] \parbox{6cm}{Supported model types:} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & yes$^1$ & no \\ Binomial models & yes & yes & yes \\ Categorical models & yes & no & no \\ Multinomial models & no & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes$^2$ & yes & yes \\ Ordinal models & various & cumulative$^3$ & no \\ Zero-inflated and hurdle models & yes & no & no \\ Generalized additive models & yes & yes & no \\ Non-linear models & yes & no & limited$^4$ \\ \hline \\ [-1.5ex] \parbox{5cm}{Additional modeling options:} & & & \\ [1ex] Variable link functions & various & various & various \\ Weights & yes & yes & no \\ Offset & yes & yes & yes \\ Multivariate responses & limited & no & no \\ Autocorrelation effects & yes & no & no \\ Category specific effects & yes & no & no \\ Standard errors for meta-analysis & yes & no & no \\ Censored data & yes & no & no \\ Truncated data & yes & no & yes \\ Customized covariances & yes & no & no \\ Missing value imputation & no & no & yes \\ \hline \\ [-1.5ex] Bayesian specifics: & & & \\ [1ex] parallelization & yes & yes & yes \\ population-level priors & flexible & normal, Student-t & flexible \\ group-level priors & normal & normal & normal \\ covariance priors & flexible & restricted$^5$ & flexible \\ \hline \\ [-1.5ex] Other: & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & HMC, NUTS \\ Information criterion & WAIC, LOO & AIC, LOO & AIC, LOO \\ \proglang{C++} compiler required & yes & no & yes \\ Modularized & no & no & no \\ \hline \end{tabular} \caption{Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{rethinking} package. Notes: (1) Currently only implemented on a branch on GitHub. (2) Weibull family only available in \pkg{brms}. (3) No group-level terms allowed. (4) The parser is mainly written for linear models but also accepts some non-linear model specifications. (5) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}.} \label{comparison2} \end{table} \begin{table}[hbtp] \centering %\renewcommand{\arraystretch}{2} \begin{tabular}{ll} Dataset & \parbox{10cm}{Function call} \\ \hline \\ [-1.5ex] \parbox{2cm}{cake} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{5ex} data = cake)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(angle $\sim$ recipe * temperature + (1|recipe:replicate), \\ \hspace*{4ex} data = cake)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{sleepstudy} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{lmer(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [1.5ex] \pkg{brms} & \parbox{13cm}{\code{brm(Reaction $\sim$ Days + (Days|Subject), data = sleepstudy)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{cbpp$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(cbind(incidence, size - incidence) $\sim$ period + (1 | herd), \\ \hspace*{6ex} family = binomial("logit"), data = cbpp)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(incidence | trials(size) $\sim$ period + (1 | herd), \\ \hspace*{4ex} family = binomial("logit"), data = cbpp)}} \\ [2ex] \hline \\ [-1.5ex] \parbox{2cm}{grouseticks$^1$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{6ex} family = poisson("log"), data = grouseticks)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(TICKS $\sim$ YEAR + HEIGHT + (1|BROOD) + (1|LOCATION), \\ \hspace*{4ex} family = poisson("log"), data = grouseticks)}} \\ [2ex] \hline \\ [-1ex] \parbox{2cm}{VerbAgg$^2$} & \\ [1ex] \pkg{lme4} & \parbox{13cm}{\code{glmer(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{6ex} + (1|item), family = binomial, data = VerbAgg)}} \\ [3ex] \pkg{brms} & \parbox{13cm}{\code{brm(r2 $\sim$ (Anger + Gender + btype + situ)\^{}2 + (1|id) \\ \hspace*{4ex} + (1|item), family = bernoulli, data = VerbAgg)}} \\ [2ex] \hline \\ [-1.5ex] \end{tabular} \caption{Comparison of the model syntax of \pkg{lme4} and \pkg{brms} using data sets included in \pkg{lme4}. Notes: (1) Default links are used to that the link argument may be omitted. (2) Fitting this model takes some time. A proper prior on the population-level effects (e.g., \code{prior = set\_prior("normal(0,5)")}) may help in increasing sampling speed.} \label{syntax} \end{table} \section{Conclusion} The present paper is meant to provide a general overview on the \proglang{R} package \pkg{brms} implementing MLMs using the probabilistic programming language \pkg{Stan} for full Bayesian inference. Although only a small selection of the modeling options available in \pkg{brms} are discussed in detail, I hope that this article can serve as a good starting point to further explore the capabilities of the package. For the future, I have several plans on how to improve the functionality of \pkg{brms}. I want to include multivariate models that can handle multiple response variables coming from different distributions as well as new correlation structures for instance for spatial data. Similarily, distributional regression models as well as mixture response distributions appear to be valuable extensions of the package. I am always grateful for any suggestions and ideas regarding new features. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language \pkg{Stan}, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Two anonymous reviewers provided very detailed and thoughtful suggestions to substantially improve both the package and the paper. Furthermore, Prof. Philipp Doebler and Prof. Heinz Holling have given valuable feedback on earlier versions of the paper. Lastly, I want to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_overview} \end{document} brms/inst/doc/brms_threading.R0000644000175000017500000003743714146747003016230 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ## ---- fake-data-sim, include=FALSE, eval=TRUE------------------------------------------- set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ## ---- model-poisson, include=FALSE------------------------------------------------------ model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4) ) ## ---- benchmark, include=FALSE---------------------------------------------------------- # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and inits is set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, inits = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, inits=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, inits = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) lapply(sims, abind::asub, idx=draw, dim=1, drop=FALSE) ## ---- eval=FALSE------------------------------------------------------------------------ # fit_serial <- brm( # count ~ zAge + zBase * Trt + (1|patient), # data = epilepsy, family = poisson(), # chains = 4, cores = 4, backend = "cmdstanr" # ) ## ---- eval=FALSE------------------------------------------------------------------------ # fit_parallel <- update( # fit_serial, chains = 2, cores = 2, # backend = "cmdstanr", threads = threading(2) # ) ## --------------------------------------------------------------------------------------- kable(head(fake, 10), digits = 3) ## ---- eval=FALSE------------------------------------------------------------------------ # model_poisson <- brm( # y ~ 1 + x1 + x2 + (1 | g), # data = fake, # family = poisson(), # iter = 500, # short sampling to speedup example # chains = 2, # prior = prior(normal(0,1), class = b) + # prior(constant(1), class = sd, group = g), # backend = "cmdstanr", # threads = threading(4) # ) ## ---- chunking-scale, message=FALSE, warning=FALSE, results='hide'---------------------- chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ## ---- munge-chunking-scaling, include=FALSE--------------------------------------------- scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ## --------------------------------------------------------------------------------------- ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ## ---- speedup-scale, message=FALSE, warning=FALSE, results='hide'----------------------- num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ## --------------------------------------------------------------------------------------- ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ## --------------------------------------------------------------------------------------- kable(scaling_cores, digits = 2) ## ---- eval=FALSE------------------------------------------------------------------------ # set.seed(54647) # # number of observations # N <- 1E4 # # number of group levels # G <- round(N / 10) # # number of predictors # P <- 3 # # regression coefficients # beta <- rnorm(P) # # # sampled covariates, group means and fake data # fake <- matrix(rnorm(N * P), ncol = P) # dimnames(fake) <- list(NULL, paste0("x", 1:P)) # # # fixed effect part and sampled group membership # fake <- transform( # as.data.frame(fake), # theta = fake %*% beta, # g = sample.int(G, N, replace=TRUE) # ) # # # add random intercept by group # fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # # # linear predictor # fake <- transform(fake, mu = theta + eta) # # # sample Poisson data # fake <- transform(fake, y = rpois(N, exp(mu))) # # # shuffle order of data rows to ensure even distribution of computational effort # fake <- fake[sample.int(N, N),] # # # drop not needed row names # rownames(fake) <- NULL ## ---- eval=FALSE------------------------------------------------------------------------ # model_poisson <- brm( # y ~ 1 + x1 + x2 + (1 | g), # data = fake, # family = poisson(), # iter = 500, # short sampling to speedup example # chains = 2, # prior = prior(normal(0,1), class = b) + # prior(constant(1), class = sd, group = g), # backend = "cmdstanr", # threads = threading(4) # ) ## ---- eval=FALSE------------------------------------------------------------------------ # # Benchmarks given model with cross-product of tuning parameters CPU # # cores, grainsize and iterations. Models are run with either static # # or non-static scheduler and inits is set by default to 0 on the # # unconstrained scale. Function returns a data-frame with the # # cross-product of the tuning parameters and as result column the # # respective runtime. # benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, # static = FALSE) { # # winfo <- extract_warmup_info(model) # sims <- rstan::extract(model$fit) # init <- list(extract_draw(sims, 1)) # # scaling_model <- update( # model, refresh = 0, # threads = threading(1, grainsize = grainsize[1], static = static), # chains = 1, iter = 2, backend = "cmdstanr" # ) # # run_benchmark <- function(cores, size, iter) { # bench_fit <- update( # scaling_model, warmup=0, iter = iter, # chains = 1, seed = 1234, inits = init, refresh = 0, save_warmup=TRUE, # threads = threading(cores, grainsize = size, static = static), # inv_metric=winfo$inv_metric[[1]], # step_size=winfo$step_size[[1]], # adapt_engaged=FALSE # ) # lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) # elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) # # c(num_leapfrog=lf, runtime=elapsed) # } # # cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) # res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) # cbind(cases, as.data.frame(t(res))) # } # # benchmark_reference <- function(model, iter=100, inits=0) { # winfo <- extract_warmup_info(model) # sims <- rstan::extract(model$fit) # init <- list(extract_draw(sims, 1)) # # ref_model <- update( # model, refresh = 0, # threads = NULL, # chains = 1, iter = 2, backend = "cmdstanr" # ) # # run_benchmark_ref <- function(iter_bench) { # bench_fit <- update( # ref_model, warmup=0, iter = iter_bench, # chains = 1, seed = 1234, inits = init, refresh = 0, # inv_metric=winfo$inv_metric[[1]], # step_size=winfo$step_size[[1]], # adapt_engaged=FALSE # ) # # lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) # elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) # # c(num_leapfrog=lf, runtime=elapsed) # } # # ref <- sapply(iter, run_benchmark_ref) # ref <- cbind(as.data.frame(t(ref)), iter=iter) # ref # } # # extract_warmup_info <- function(bfit) { # adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") # step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) # inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) # list(step_size=step_size, inv_metric=inv_metric) # } # # extract_draw <- function(sims, draw) lapply(sims, abind::asub, idx=draw, dim=1, drop=FALSE) # ## ---- eval=FALSE------------------------------------------------------------------------ # scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") # # single_chunk <- transform( # subset(scaling_chunking, chunks == 1), # num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, # runtime_single = runtime, runtime = NULL, # grainsize = NULL, chunks=NULL # ) # # scaling_chunking <- transform( # merge(scaling_chunking, single_chunk), # slowdown = runtime/runtime_single, # iter = factor(iter), # runtime_single = NULL # ) # # ref <- transform(ref, iter=factor(iter)) brms/inst/doc/brms_multilevel.ltx0000644000175000017500000016721713701270367017053 0ustar nileshnilesh\documentclass[article, nojss]{jss} %\VignetteIndexEntry{Multilevel Models with brms} %\VignetteEngine{R.rsp::tex} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% declarations for jss.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% almost as usual \author{Paul-Christian B\"urkner} \title{Advanced Bayesian Multilevel Modeling with the \proglang{R} Package \pkg{brms}} %% for pretty printing and a nice hypersummary also set: \Plainauthor{Paul-Christian B\"urkner} %% comma-separated \Plaintitle{Advanced Bayesian Multilevel Modeling with the R Package brms} %% without formatting \Shorttitle{Advanced Bayesian Multilevel Modeling with \pkg{brms}} %% a short title (if necessary) %% an abstract and keywords \Abstract{ The \pkg{brms} package allows R users to easily specify a wide range of Bayesian single-level and multilevel models, which are fitted with the probabilistic programming language \proglang{Stan} behind the scenes. Several response distributions are supported, of which all parameters (e.g., location, scale, and shape) can be predicted at the same time thus allowing for distributional regression. Non-linear relationships may be specified using non-linear predictor terms or semi-parametric approaches such as splines or Gaussian processes. Multivariate models, in which each response variable can be predicted using the above mentioned options, can be fitted as well. To make all of these modeling options possible in a multilevel framework, \pkg{brms} provides an intuitive and powerful formula syntax, which extends the well known formula syntax of \pkg{lme4}. The purpose of the present paper is to introduce this syntax in detail and to demonstrate its usefulness with four examples, each showing other relevant aspects of the syntax. If you use \pkg{brms}, please cite this article as published in the R Journal \citep{brms2}. } \Keywords{Bayesian inference, multilevel models, distributional regression, MCMC, \proglang{Stan}, \proglang{R}} \Plainkeywords{Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R} %% without formatting %% at least one keyword must be supplied %% publication information %% NOTE: Typically, this can be left commented and will be filled out by the technical editor %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{2012-06-04} %% \Acceptdate{2012-06-04} %% The address of (at least) one author should be given %% in the following format: \Address{ Paul-Christian B\"urkner\\ E-mail: \email{paul.buerkner@gmail.com}\\ URL: \url{https://paul-buerkner.github.io} } %% It is also possible to add a telephone and fax number %% before the e-mail in the following format: %% Telephone: +43/512/507-7103 %% Fax: +43/512/507-2851 %% for those who use Sweave please include the following line (with % symbols): %% need no \usepackage{Sweave.sty} %% end of declarations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %% include your article here, just as usual %% Note that you should use the \pkg{}, \proglang{} and \code{} commands. \section{Introduction} Multilevel models (MLMs) offer great flexibility for researchers across sciences \citep{brown2015, demidenko2013, gelmanMLM2006, pinheiro2006}. They allow modeling of data measured on different levels at the same time -- for instance data of students nested within classes and schools -- thus taking complex dependency structures into account. It is not surprising that many packages for R have been developed to fit MLMs. Usually, however, the functionality of these implementations is limited insofar as it is only possible to predict the mean of the response distribution. Other parameters of the response distribution, such as the residual standard deviation in linear models, are assumed constant across observations, which may be violated in many applications. Accordingly, it is desirable to allow for prediction of \emph{all} response parameters at the same time. Models doing exactly that are often referred to as \emph{distributional models} or more verbosely \emph{models for location, scale and shape} \citep{rigby2005}. Another limitation of basic MLMs is that they only allow for linear predictor terms. While linear predictor terms already offer good flexibility, they are of limited use when relationships are inherently non-linear. Such non-linearity can be handled in at least two ways: (1) by fully specifying a non-linear predictor term with corresponding parameters each of which can be predicted using MLMs \citep{lindstrom1990}, or (2) estimating the form of the non-linear relationship on the fly using splines \citep{wood2004} or Gaussian processes \citep{rasmussen2006}. The former are often simply called \emph{non-linear models}, while models applying splines are referred to as \emph{generalized additive models} (GAMs; \citeauthor{hastie1990}, \citeyear{hastie1990}). Combining all of these modeling options into one framework is a complex task, both conceptually and with regard to model fitting. Maximum likelihood methods, which are typically applied in classical 'frequentist' statistics, can reach their limits at some point and fully Bayesian methods become the go-to solutions to fit such complex models \citep{gelman2014}. In addition to being more flexible, the Bayesian framework comes with other advantages, for instance, the ability to derive probability statements for every quantity of interest or explicitly incorporating prior knowledge about parameters into the model. The former is particularly relevant in non-linear models, for which classical approaches struggle more often than not in propagating all the uncertainty in the parameter estimates to non-linear functions such as out-of-sample predictions. Possibly the most powerful program for performing full Bayesian inference available to date is Stan \citep{stanM2017, carpenter2017}. It implements Hamiltonian Monte Carlo \citep{duane1987, neal2011, betancourt2014} and its extension, the No-U-Turn (NUTS) Sampler \citep{hoffman2014}. These algorithms converge much more quickly than other Markov-Chain Monte-Carlo (MCMC) algorithms especially for high-dimensional models \citep{hoffman2014, betancourt2014, betancourt2017}. An excellent non-mathematical introduction to Hamiltonian Monte Carlo can be found in \citet{betancourt2017}. Stan comes with its own programming language, allowing for great modeling flexibility \cite{stanM2017, carpenter2017}). Many researchers may still be hesitent to use Stan directly, as every model has to be written, debugged and possibly also optimized. This may be a time-consuming and error-prone process even for researchers familiar with Bayesian inference. The \pkg{brms} package \citep{brms1}, presented in this paper, aims to remove these hurdles for a wide range of regression models by allowing the user to benefit from the merits of Stan by using extended \pkg{lme4}-like \citep{bates2015} formula syntax, with which many R users are familiar with. It offers much more than writing efficient and human-readable Stan code: \pkg{brms} comes with many post-processing and visualization functions, for instance to perform posterior predictive checks, leave-one-out cross-validation, visualization of estimated effects, and prediction of new data. The overarching aim is to have one general framework for regression modeling, which offers everything required to successfully apply regression models to complex data. To date, it already replaces and extends the functionality of dozens of other R packages, each of which is restricted to specific regression models\footnote{Unfortunately, due to the implementation via Stan, it is not easily possible for users to define their own response distributions and run them via \pkg{brms}. If you feel that a response distribution is missing in \pkg{brms}, please open an issue on GitHub (\url{https://github.com/paul-buerkner/brms}).}. The purpose of the present article is to provide an introduction of the advanced multilevel formula syntax implemented in \pkg{brms}, which allows to fit a wide and growing range of non-linear distributional multilevel models. A general overview of the package is already given in \citet{brms1}. Accordingly, the present article focuses on more recent developments. We begin by explaining the underlying structure of distributional models. Next, the formula syntax of \pkg{lme4} and its extensions implemented in \pkg{brms} are explained. Four examples that demonstrate the use of the new syntax are discussed in detail. Afterwards, the functionality of \pkg{brms} is compared with that of \pkg{rstanarm} \citep{rstanarm2017} and \pkg{MCMCglmm} \citep{hadfield2010}. We end by describing future plans for extending the package. \section{Model description} \label{model} The core of models implemented in \pkg{brms} is the prediction of the response $y$ through predicting all parameters $\theta_p$ of the response distribution $D$, which is also called the model \code{family} in many R packages. We write $$y_i \sim D(\theta_{1i}, \theta_{2i}, ...)$$ to stress the dependency on the $i\textsuperscript{th}$ observation. Every parameter $\theta_p$ may be regressed on its own predictor term $\eta_p$ transformed by the inverse link function $f_p$ that is $\theta_{pi} = f_p(\eta_{pi})$\footnote{A parameter can also be assumed constant across observations so that a linear predictor is not required.}. Such models are typically refered to as \emph{distributional models}\footnote{The models described in \citet{brms1} are a sub-class of the here described models.}. Details about the parameterization of each \code{family} are given in \code{vignette("brms\_families")}. Suppressing the index $p$ for simplicity, a predictor term $\eta$ can generally be written as $$ \eta = \mathbf{X} \beta + \mathbf{Z} u + \sum_{k = 1}^K s_k(x_k) $$ In this equation, $\beta$ and $u$ are the coefficients at population-level and group-level respectively and $\mathbf{X}, \mathbf{Z}$ are the corresponding design matrices. The terms $s_k(x_k)$ symbolize optional smooth functions of unspecified form based on covariates $x_k$ fitted via splines (see \citet{wood2011} for the underlying implementation in the \pkg{mgcv} package) or Gaussian processes \citep{williams1996}. The response $y$ as well as $\mathbf{X}$, $\mathbf{Z}$, and $x_k$ make up the data, whereas $\beta$, $u$, and the smooth functions $s_k$ are the model parameters being estimated. The coefficients $\beta$ and $u$ may be more commonly known as fixed and random effects, but I avoid theses terms following the recommendations of \citet{gelmanMLM2006}. Details about prior distributions of $\beta$ and $u$ can be found in \citet{brms1} and under \code{help("set\_prior")}. As an alternative to the strictly additive formulation described above, predictor terms may also have any form specifiable in Stan. We call it a \emph{non-linear} predictor and write $$\eta = f(c_1, c_2, ..., \phi_1, \phi_2, ...)$$ The structure of the function $f$ is given by the user, $c_r$ are known or observed covariates, and $\phi_s$ are non-linear parameters each having its own linear predictor term $\eta_{\phi_s}$ of the form specified above. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves. A frequentist implementation of such models, which inspired the non-linear syntax in \pkg{brms}, can be found in the \pkg{nlme} package \citep{nlme2016}. \section{Extended multilevel formula syntax} \label{formula_syntax} The formula syntax applied in \pkg{brms} builds upon the syntax of the R package \pkg{lme4} \citep{bates2015}. First, we will briefly explain the \pkg{lme4} syntax used to specify multilevel models and then introduce certain extensions that allow to specify much more complicated models in \pkg{brms}. An \pkg{lme4} formula has the general form \begin{Sinput} response ~ pterms + (gterms | group) \end{Sinput} The \code{pterms} part contains the population-level effects that are assumed to be the same across obervations. The \code{gterms} part contains so called group-level effects that are assumed to vary accross grouping variables specified in \code{group}. Multiple grouping factors each with multiple group-level effects are possible. Usually, \code{group} contains only a single variable name pointing to a factor, but you may also use \code{g1:g2} or \code{g1/g2}, if both \code{g1} and \code{g2} are suitable grouping factors. The \code{:} operator creates a new grouping factor that consists of the combined levels of \code{g1} and \code{g2} (you could think of this as pasting the levels of both factors together). The \code{/} operator indicates nested grouping structures and expands one grouping factor into two or more when using multiple \code{/} within one term. If, for instance, you write \code{(1 | g1/g2)}, it will be expanded to \code{(1 | g1) + (1 | g1:g2)}. Instead of \code{|} you may use \code{||} in grouping terms to prevent group-level correlations from being modeled. This may be useful in particular when modeling so many group-level effects that convergence of the fitting algorithms becomes an issue due to model complexity. One limitation of the \code{||} operator in \pkg{lme4} is that it only splits up terms so that columns of the design matrix originating from the same term are still modeled as correlated (e.g., when coding a categorical predictor; see the \code{mixed} function of the \pkg{afex} package by \citet{afex2015} for a way to avoid this behavior). While intuitive and visually appealing, the classic \pkg{lme4} syntax is not flexible enough to allow for specifying the more complex models supported by \pkg{brms}. In non-linear or distributional models, for instance, multiple parameters are predicted, each having their own population and group-level effects. Hence, multiple formulas are necessary to specify such models\footnote{Actually, it is possible to specify multiple model parts within one formula using interactions terms for instance as implemented in \pkg{MCMCglmm} \citep{hadfield2010}. However, this syntax is limited in flexibility and requires a rather deep understanding of the way R parses formulas, thus often being confusing to users.}. Then, however, specifying group-level effects of the same grouping factor to be correlated \emph{across} formulas becomes complicated. The solution implemented in \pkg{brms} (and currently unique to it) is to expand the \code{|} operator into \code{||}, where \code{} can be any value. Group-level terms with the same \code{ID} will then be modeled as correlated if they share same grouping factor(s)\footnote{It might even be further extended to \code{|fun()|}, where \code{fun} defines the type of correlation structure, defaulting to unstructured that is estimating the full correlation matrix. The \code{fun} argument is not yet supported by \pkg{brms} but could be supported in the future if other correlation structures, such as compound symmetry or Toeplitz, turn out to have reasonable practical applications and effective implementations in Stan.}. For instance, if the terms \code{(x1|ID|g1)} and \code{(x2|ID|g1)} appear somewhere in the same or different formulas passed to \pkg{brms}, they will be modeled as correlated. Further extensions of the classical \pkg{lme4} syntax refer to the \code{group} part. It is rather limited in its flexibility since only variable names combined by \code{:} or \code{/} are supported. We propose two extensions of this syntax: Firstly, \code{group} can generally be split up in its terms so that, say, \code{(1 | g1 + g2)} is expanded to \code{(1 | g1) + (1 | g2)}. This is fully consistent with the way \code{/} is handled so it provides a natural generalization to the existing syntax. Secondly, there are some special grouping structures that cannot be expressed by simply combining grouping variables. For instance, multi-membership models cannot be expressed this way. To overcome this limitation, we propose wrapping terms in \code{group} within special functions that allow specifying alternative grouping structures: \code{(gterms | fun(group))}. In \pkg{brms}, there are currently two such functions implemented, namely \code{gr} for the default behavior and \code{mm} for multi-membership terms. To be compatible with the original syntax and to keep formulas short, \code{gr} is automatically added internally if none of these functions is specified. While some non-linear relationships, such as quadratic relationships, can be expressed within the basic R formula syntax, other more complicated ones cannot. For this reason, it is possible in \pkg{brms} to fully specify non-linear predictor terms similar to how it is done in \pkg{nlme}, but fully compatible with the extended multilevel syntax described above. Suppose, for instance, we want to model the non-linear growth curve $$ y = b_1 (1 - \exp(-(x / b_2)^{b_3}) $$ between $y$ and $x$ with parameters $b_1$, $b_2$, and $b_3$ (see Example 3 in this paper for an implementation of this model with real data). Furthermore, we want all three parameters to vary by a grouping variable $g$ and model those group-level effects as correlated. Additionally $b_1$ should be predicted by a covariate $z$. We can express this in \pkg{brms} using multiple formulas, one for the non-linear model itself and one per non-linear parameter: \begin{Sinput} y ~ b1 * (1 - exp(-(x / b2) ^ b3) b1 ~ z + (1|ID|g) b2 ~ (1|ID|g) b3 ~ (1|ID|g) \end{Sinput} The first formula will not be evaluated using standard R formula parsing, but instead taken literally. In contrast, the formulas for the non-linear parameters will be evaluated in the usual way and are compatible with all terms supported by \pkg{brms}. Note that we have used the above described ID-syntax to model group-level effects as correlated across formulas. There are other syntax extensions implemented in \pkg{brms} that do not directly target grouping terms. Firstly, there are terms formally included in the \code{pterms} part that are handled separately. The most prominent examples are smooth terms specified through the \code{s} and \code{t2} functions of the \pkg{mgcv} package \citep{wood2011}. Other examples are category specific effects \code{cs}, monotonic effects \code{mo}, noise-free effects \code{me}, or Gaussian process terms \code{gp}. The former is explained in \citet{brms1}, while the latter three are documented in \code{help(brmsformula)}. Internally, these terms are extracted from \code{pterms} and not included in the construction of the population-level design matrix. Secondly, making use of the fact that \code{|} is unused on the left-hand side of $\sim$ in formula, additional information on the response variable may be specified via \begin{Sinput} response | aterms ~ \end{Sinput} The \code{aterms} part may contain multiple terms of the form \code{fun()} separated by \code{+} each providing special information on the response variable. This allows among others to weight observations, provide known standard errors for meta-analysis, or model censored or truncated data. As it is not the main topic of the present paper, we refer to \code{help("brmsformula")} and \code{help("addition-terms")} for more details. To set up the model formulas and combine them into one object, \pkg{brms} defines the \code{brmsformula} (or short \code{bf}) function. Its output can then be passed to the \code{parse\_bf} function, which splits up the formulas in separate parts and prepares them for the generation of design matrices and related data. Other packages may re-use these functions in their own routines making it easier to offer support for the above described multilevel syntax. \section{Examples} The idea of \pkg{brms} is to provide one unified framework for multilevel regression models in R. As such, the above described formula syntax in all of its variations can be applied in combination with all response distributions supported by \pkg{brms} (currently about 35 response distributions are supported; see \code{help("brmsfamily")} and \code{vignette("brms\_families")} for an overview). In this section, we will discuss four examples in detail, each focusing on certain aspects of the syntax. They are chosen to provide a broad overview of the modeling options. The first is about the number of fish caught be different groups of people. It does not actually contain any multilevel structure, but helps in understanding how to set up formulas for different model parts. The second example is about housing rents in Munich. We model the data using splines and a distributional regression approach. The third example is about cumulative insurance loss payments across several years, which is fitted using a rather complex non-linear multilevel model. Finally, the fourth example is about the performance of school children, who change school during the year, thus requiring a multi-membership model. Despite not being covered in the four examples, there are a few more modeling options that we want to briefly describe. First, \pkg{brms} allows fitting so called phylogenetic models. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. Species are not independent as they come from the same phylogenetic tree, implying that different levels of the same grouping-factor (i.e., species) are likely correlated. There is a whole vignette dedicated to this topic, which can be found via \code{vignette("brms\_phylogenetics")}. Second, there is a canonical way to handle ordinal predictors, without falsely assuming they are either categorical or continuous. We call them monotonic effects and discuss them in \code{vignette("brms\_monotonic")}. Last but not least, it is possible to account for measurement error in both response and predictor variables. This is often ignored in applied regression modeling \citep{westfall2016}, although measurement error is very common in all scientific fields making use of observational data. There is no vignette yet covering this topic, but one will be added in the future. In the meantime, \code{help("brmsformula")} is the best place to start learning about such models as well as about other details of the \pkg{brms} formula syntax. \subsection{Example 1: Catching fish} An important application of the distributional regression framework of \pkg{brms} are so called zero-inflated and hurdle models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: ``The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.'' \begin{Sinput} zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") zinb$camper <- factor(zinb$camper, labels = c("no", "yes")) head(zinb) \end{Sinput} \begin{Sinput} nofish livebait camper persons child xb zg count 1 1 0 no 1 0 -0.8963146 3.0504048 0 2 0 1 yes 1 0 -0.5583450 1.7461489 0 3 0 1 no 1 0 -0.4017310 0.2799389 0 4 0 1 yes 2 1 -0.9562981 -0.6015257 0 5 0 1 no 1 0 0.4368910 0.5277091 1 6 0 1 yes 4 2 1.3944855 -0.7075348 0 \end{Sinput} As predictors we choose the number of people per group, the number of children, as well as whether or not the group consists of campers. Many groups may not catch any fish just because they do not try and so we fit a zero-inflated Poisson model. For now, we assume a constant zero-inflation probability across observations. \begin{Sinput} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson("log")) \end{Sinput} The model is readily summarized via \begin{Sinput} summary(fit_zinb1) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.01 0.17 -1.34 -0.67 2171 1 persons 0.87 0.04 0.79 0.96 2188 1 child -1.36 0.09 -1.55 -1.18 1790 1 camper 0.80 0.09 0.62 0.98 2950 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat zi 0.41 0.04 0.32 0.49 2409 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} A graphical summary is available through \begin{Sinput} conditional_effects(fit_zinb1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_zinb1.pdf} \caption{Conditional effects plots of the \code{fit\_zinb1} model.} \label{me_zinb1} \end{figure} (see Figure \ref{me_zinb1}). In fact, the \code{conditional\_effects} method turned out to be so powerful in visualizing effects of predictors that I am using it almost as frequently as \code{summary}. According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability \code{zi} is pretty large with a mean of 41\%. Please note that the probability of catching no fish is actually higher than 41\%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-\emph{inflation}). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish, since children often lack the patience required for fishing. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. \begin{Sinput} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) \end{Sinput} To transform the linear predictor of \code{zi} into a probability, \pkg{brms} applies the logit-link, which takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. \begin{Sinput} summary(fit_zinb2) \end{Sinput} \begin{Sinput} Family: zero_inflated_poisson (log) Formula: count ~ persons + child + camper zi ~ child Data: zinb (Number of observations: 250) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept -1.07 0.18 -1.43 -0.73 2322 1 persons 0.89 0.05 0.80 0.98 2481 1 child -1.17 0.10 -1.37 -1.00 2615 1 camper 0.78 0.10 0.60 0.96 3270 1 zi_Intercept -0.95 0.27 -1.52 -0.48 2341 1 zi_child 1.21 0.28 0.69 1.79 2492 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your chance of catching no fish at all (as implied by the zero-inflation part), possibly because groups with more children spend less time or no time at all fishing. Comparing model fit via leave-one-out cross validation as implemented in the \pkg{loo} package \citep{loo2016, vehtari2016}. \begin{Sinput} LOO(fit_zinb1, fit_zinb2) \end{Sinput} \begin{Sinput} LOOIC SE fit_zinb1 1639.52 363.30 fit_zinb2 1621.35 362.39 fit_zinb1 - fit_zinb2 18.16 15.71 \end{Sinput} reveals that the second model using the number of children to predict both model parts has better fit. However, when considering the standard error of the \code{LOOIC} difference, improvement in model fit is apparently modest and not substantial. More examples of distributional model can be found in \code{vignette("brms\_distreg")}. \subsection{Example 2: Housing rents} In their book about regression modeling, \citet{fahrmeir2013} use an example about the housing rents in Munich from 1999. The data contains information about roughly 3000 apartments including among others the absolute rent (\code{rent}), rent per square meter (\code{rentsqm}), size of the apartment (\code{area}), construction year (\code{yearc}), and the district in Munich (\code{district}), where the apartment is located. The data can be found in the \pkg{gamlss.data} package \citep{gamlss.data}: \begin{Sinput} data("rent99", package = "gamlss.data") head(rent99) \end{Sinput} \begin{Sinput} rent rentsqm area yearc location bath kitchen cheating district 1 109.9487 4.228797 26 1918 2 0 0 0 916 2 243.2820 8.688646 28 1918 2 0 0 1 813 3 261.6410 8.721369 30 1918 1 0 0 1 611 4 106.4103 3.547009 30 1918 2 0 0 0 2025 5 133.3846 4.446154 30 1918 2 0 0 1 561 6 339.0256 11.300851 30 1918 2 0 0 1 541 \end{Sinput} Here, we aim at predicting the rent per square meter with the size of the apartment as well as the construction year, while taking the district of Munich into account. As the effect of both predictors on the rent is of unknown non-linear form, we model these variables using a bivariate tensor spline \citep{wood2013}. The district is accounted for via a varying intercept. \begin{Sinput} fit_rent1 <- brm(rentsqm ~ t2(area, yearc) + (1|district), data = rent99, chains = 2, cores = 2) \end{Sinput} We fit the model using just two chains (instead of the default of four chains) on two processor cores to reduce the model fitting time for the purpose of the present paper. In general, using the default option of four chains (or more) is recommended. \begin{Sinput} summary(fit_rent1) \end{Sinput} \begin{Sinput} Family: gaussian(identity) Formula: rentsqm ~ t2(area, yearc) + (1 | district) Data: rent99 (Number of observations: 3082) Samples: 2 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 2000 ICs: LOO = NA; WAIC = NA; R2 = NA Smooth Terms: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sds(t2areayearc_1) 4.93 2.32 1.61 10.77 1546 1.00 sds(t2areayearc_2) 5.78 2.87 1.58 13.15 1175 1.00 sds(t2areayearc_3) 8.09 3.19 3.66 16.22 1418 1.00 Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.48 0.73 494 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 7.80 0.11 7.59 8.02 2000 1.00 t2areayearc_1 -1.00 0.09 -1.15 -0.83 2000 1.00 t2areayearc_2 0.75 0.17 0.43 1.09 2000 1.00 t2areayearc_3 -0.07 0.16 -0.40 0.24 1579 1.00 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 1.95 0.03 1.90 2.01 2000 1.00 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} For models including splines, the output of \code{summary} is not tremendously helpful, but we get at least some information. Firstly, the credible intervals of the standard deviations of the coefficients forming the splines (under \code{'Smooth Terms'}) are sufficiently far away from zero to indicate non-linearity in the (combined) effect of \code{area} and \code{yearc}. Secondly, even after controlling for these predictors, districts still vary with respect to rent per square meter by a sizable amount as visible under \code{'Group-Level Effects'} in the output. To further understand the effect of the predictor, we apply graphical methods: \begin{Sinput} conditional_effects(fit_rent1, surface = TRUE) \end{Sinput} In Figure \ref{me_rent1}, the conditional effects of both predictors are displayed, while the respective other predictor is fixed at its mean. In Figure \ref{me_rent2}, the combined effect is shown, clearly demonstrating an interaction between the variables. In particular, housing rents appear to be highest for small and relatively new apartments. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent1.pdf} \caption{Conditional effects plots of the \code{fit\_rent1} model for single predictors.} \label{me_rent1} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_rent2.pdf} \caption{Surface plot of the \code{fit\_rent1} model for the combined effect of \code{area} and \code{yearc}.} \label{me_rent2} \end{figure} In the above example, we only considered the mean of the response distribution to vary by \code{area} and \code{yearc}, but this my not necessarily reasonable assumption, as the variation of the response might vary with these variables as well. Accordingly, we fit splines and effects of district for both the location and the scale parameter, which is called \code{sigma} in Gaussian models. \begin{Sinput} bform <- bf(rentsqm ~ t2(area, yearc) + (1|ID1|district), sigma ~ t2(area, yearc) + (1|ID1|district)) fit_rent2 <- brm(bform, data = rent99, chains = 2, cores = 2) \end{Sinput} If not otherwise specified, \code{sigma} is predicted on the log-scale to ensure it is positive no matter how the predictor term looks like. Instead of \code{(1|district)} as in the previous model, we now use \code{(1|ID1|district)} in both formulas. This results in modeling the varying intercepts of both model parts as correlated (see the description of the ID-syntax above). The group-level part of the \code{summary} output looks as follows: \begin{Sinput} Group-Level Effects: ~district (Number of levels: 336) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 0.60 0.06 0.49 0.73 744 1.00 sd(sigma_Intercept) 0.11 0.02 0.06 0.15 751 1.00 cor(Intercept,sigma_Intercept) 0.72 0.17 0.35 0.98 648 1.00 \end{Sinput} As visible from the positive correlation of the intercepts, districts with overall higher rent per square meter have higher variation at the same time. Lastly, we want to turn our attention to the splines. While \code{conditional\_effects} is used to visualize effects of predictors on the expected response, \code{conditional\_smooths} is used to show just the spline parts of the model: \begin{Sinput} conditional_smooths(fit_rent2) \end{Sinput} The plot on the left-hand side of Figure \ref{me_rent3} resembles the one in Figure \ref{me_rent2}, but the scale is different since only the spline is plotted. The right-hand side of \ref{me_rent3} shows the spline for \code{sigma}. Since we apply the log-link on \code{sigma} by default the spline is on the log-scale as well. As visible in the plot, the variation in the rent per square meter is highest for relatively small and old apartments, while the variation is smallest for medium to large apartments build around the 1960s. \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_rent3.pdf} \caption{Plots showing the smooth terms of the \code{fit\_rent2} model.} \label{me_rent3} \end{figure} \subsection{Example 3: Insurance loss payments} On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see \url{http://www.magesblog.com/2015/11/loss-developments-via-growth-curves-and.html}). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are for now assumed to be the same across years. We load the data \begin{Sinput} url <- paste0("https://raw.githubusercontent.com/mages/", "diesunddas/master/Data/ClarkTriangle.csv") loss <- read.csv(url) head(loss) \end{Sinput} \begin{Sinput} AY dev cum 1 1991 6 357.848 2 1991 18 1124.788 3 1991 30 1735.330 4 1991 42 2182.708 5 1991 54 2745.596 6 1991 66 3319.994 \end{Sinput} and translate the proposed model into a non-linear \pkg{brms} model. \begin{Sinput} nlform <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE) nlprior <- c(prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta")) fit_loss1 <- brm(formula = nlform, data = loss, family = gaussian(), prior = nlprior, control = list(adapt_delta = 0.9)) \end{Sinput} In the above functions calls, quite a few things are going on. The formulas are wrapped in \code{bf} to combine them into one object. The first formula specifies the non-linear model. We set argument \code{nl = TRUE} so that \pkg{brms} takes this formula literally and instead of using standard R formula parsing. We specify one additional formula per non-linear parameter (a) to clarify what variables are covariates and what are parameters and (b) to specify the predictor term for the parameters. We estimate a group-level effect of accident year (variable \code{AY}) for the ultimate loss \code{ult}. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in the case of \code{ult}, contains only a varying intercept for year. Both \code{omega} and \code{theta} are assumed to be constant across observations so we just fit a population-level intercept. Priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. Otherwise, we may observe that different Markov chains converge to different parameter regions as multiple posterior distribution are equally plausible. Setting prior distributions is a difficult task especially in non-linear models. It requires some experience and knowledge both about the model that is being fitted and about the data at hand. Additionally, there is more to keep in mind to optimize the sampler's performance: Firstly, using non- or weakly informative priors in non-linear models often leads to problems even if the model is generally identified. For instance, if a zero-centered and reasonably wide prior such as \code{normal(0, 10000)} it set on \code{ult}, there is little information about \code{theta} and \code{omega} for samples of \code{ult} being close to zero, which may lead to convergence problems. Secondly, Stan works best when parameters are roughly on the same order of magnitude \citep{stan2017}. In the present example, \code{ult} is of three orders larger than \code{omega}. Still, the sampler seems to work quite well, but this may not be true for other models. One solution is to rescale parameters before model fitting. For instance, for the present example, one could have downscaled \code{ult} by replacing it with \code{ult * 1000} and correspondingly the \code{normal(5000, 1000)} prior with \code{normal(5, 1)}. In the \code{control} argument we increase \code{adapt\_delta} to get rid of a few divergent transitions (cf. \citeauthor{stan2017}, \citeyear{stan2017}; \citeauthor{brms1}, \citeyear{brms1}). Again the model is summarized via \begin{Sinput} summary(fit_loss1) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: cum ~ ult * (1 - exp(-(dev / theta)^omega)) ult ~ 1 + (1 | AY) omega ~ 1 theta ~ 1 Data: loss (Number of observations: 55) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~AY (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(ult_Intercept) 745.74 231.31 421.05 1306.04 916 1 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat ult_Intercept 5273.70 292.34 4707.11 5852.28 798 1 omega_Intercept 1.34 0.05 1.24 1.43 2167 1 theta_Intercept 46.07 2.09 42.38 50.57 1896 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 139.93 15.52 113.6 175.33 2358 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} as well as \begin{Sinput} conditional_effects(fit_loss1) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth,keepaspectratio]{me_loss1.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model.} \label{me_loss1} \end{figure} (see Figure \ref{me_loss1}). We can also visualize the cumulative insurance loss over time separately for each year. \begin{Sinput} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_year <- conditional_effects(fit_loss1, conditions = conditions, re_formula = NULL, method = "predict") plot(me_year, ncol = 5, points = TRUE) \end{Sinput} \begin{figure}[ht] \centering \includegraphics[width=0.99\textwidth,keepaspectratio]{me_loss1_year.pdf} \caption{Conditional effects plots of the \code{fit\_loss1} model separately for each accident year.} \label{me_loss1_year} \end{figure} (see Figure \ref{me_loss1_year}). It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. In the above model, we considered \code{omega} and \code{delta} to be constant across years, which may not necessarily be true. We can easily investigate this by fitting varying intercepts for all three non-linear parameters also estimating group-level correlation using the above introduced \code{ID} syntax. \begin{Sinput} nlform2 <- bf(cum ~ ult * (1 - exp(-(dev / theta)^omega)), ult ~ 1 + (1|ID1|AY), omega ~ 1 + (1|ID1|AY), theta ~ 1 + (1|ID1|AY), nl = TRUE) fit_loss2 <- update(fit_loss1, formula = nlform2, control = list(adapt_delta = 0.90)) \end{Sinput} We could have also specified all predictor terms more conveniently within one formula as \begin{Sinput} ult + omega + theta ~ 1 + (1|ID1|AY) \end{Sinput} because the structure of the predictor terms is identical. To compare model fit, we perform leave-one-out cross-validation. \begin{Sinput} LOO(fit_loss1, fit_loss2) \end{Sinput} \begin{Sinput} LOOIC SE fit_loss1 715.44 19.24 fit_loss2 720.60 19.85 fit_loss1 - fit_loss2 -5.15 5.34 \end{Sinput} Since smaller values indicate better expected out-of-sample predictions and thus better model fit, the simpler model that only has a varying intercept over parameter \code{ult} is preferred. This may not be overly surprising, given that three varying intercepts as well as three group-level correlations are probably overkill for data containing only 55 observations. Nevertheless, it nicely demonstrates how to apply the \code{ID} syntax in practice. More examples of non-linear models can be found in \code{vignette("brms\_nonlinear")}. \subsection{Example 4: Performance of school children} Suppose that we want to predict the performance of students in the final exams at the end of the year. There are many variables to consider, but one important factor will clearly be school membership. Schools might differ in the ratio of teachers and students, the general quality of teaching, in the cognitive ability of the students they draw, or other factors we are not aware of that induce dependency among students of the same school. Thus, it is advised to apply a multilevel modeling techniques including school membership as a group-level term. Of course, we should account for class membership and other levels of the educational hierarchy as well, but for the purpose of the present example, we will focus on schools only. Usually, accounting for school membership is pretty-straight forward by simply adding a varying intercept to the formula: \code{(1 | school)}. However, a non-negligible number of students might change schools during the year. This would result in a situation where one student is a member of multiple schools and so we need a multi-membership model. Setting up such a model not only requires information on the different schools students attend during the year, but also the amount of time spend at each school. The latter can be used to weight the influence each school has on its students, since more time attending a school will likely result in greater influence. For now, let us assume that students change schools maximally once a year and spend equal time at each school. We will later see how to relax these assumptions. Real educational data are usually relatively large and complex so that we simulate our own data for the purpose of this tutorial paper. We simulate 10 schools and 1000 students, with each school having the same expected number of 100 students. We model 10\% of students as changing schools. \begin{Sinput} data_mm <- sim_multi_mem(nschools = 10, nstudents = 1000, change = 0.1) head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.5 0.5 16.27422 2 10 9 0.5 0.5 18.71387 3 5 3 0.5 0.5 23.65319 4 3 5 0.5 0.5 22.35204 5 5 3 0.5 0.5 16.38019 6 10 6 0.5 0.5 17.63494 \end{Sinput} The code of function \code{sim\_multi\_mem} can be found in the online supplement of the present paper. For reasons of better illustration, students changing schools appear in the first rows of the data. Data of students being only at a single school looks as follows: \begin{Sinput} data_mm[101:106, ] \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 101 2 2 0.5 0.5 27.247851 102 9 9 0.5 0.5 24.041427 103 4 4 0.5 0.5 12.575001 104 2 2 0.5 0.5 21.203644 105 4 4 0.5 0.5 12.856166 106 4 4 0.5 0.5 9.740174 \end{Sinput} Thus, school variables are identical, but we still have to specify both in order to pass the data appropriately. Incorporating no other predictors into the model for simplicity, a multi-membership model is specified as \begin{Sinput} fit_mm <- brm(y ~ 1 + (1 | mm(s1, s2)), data = data_mm) \end{Sinput} The only new syntax element is that multiple grouping factors (\code{s1} and \code{s2}) are wrapped in \code{mm}. Everything else remains exactly the same. Note that we did not specify the relative weights of schools for each student and thus, by default, equal weights are assumed. \begin{Sinput} summary(fit_mm) \end{Sinput} \begin{Sinput} Family: gaussian (identity) Formula: y ~ 1 + (1 | mm(s1, s2)) Data: data_mm (Number of observations: 1000) Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1; total post-warmup samples = 4000 WAIC: Not computed Group-Level Effects: ~mms1s2 (Number of levels: 10) Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sd(Intercept) 2.76 0.82 1.69 4.74 682 1.01 Population-Level Effects: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat Intercept 19 0.93 17.06 20.8 610 1 Family Specific Parameters: Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat sigma 3.58 0.08 3.43 3.75 2117 1 Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence, Rhat = 1). \end{Sinput} With regard to the assumptions made in the above example, it is unlikely that all children who change schools stay in both schools equally long. To relax this assumption, we have to specify weights. First, we amend the simulated data to contain non-equal weights for students changing schools. For all other students, weighting does of course not matter as they stay in the same school anyway. \begin{Sinput} data_mm[1:100, "w1"] <- runif(100, 0, 1) data_mm[1:100, "w2"] <- 1 - data_mm[1:100, "w1"] head(data_mm) \end{Sinput} \begin{Sinput} s1 s2 w1 w2 y 1 8 9 0.3403258 0.65967423 16.27422 2 10 9 0.1771435 0.82285652 18.71387 3 5 3 0.9059811 0.09401892 23.65319 4 3 5 0.4432007 0.55679930 22.35204 5 5 3 0.8052026 0.19479738 16.38019 6 10 6 0.5610243 0.43897567 17.63494 \end{Sinput} Incorporating these weights into the model is straight forward. \begin{Sinput} fit_mm2 <- brm(y ~ 1 + (1 | mm(s1, s2, weights = cbind(w1, w2))), data = data_mm) \end{Sinput} The summary output is similar to the previous, so we do not show it here. The second assumption that students change schools only once a year, may also easily be relaxed by providing more than two grouping factors, say, \code{mm(s1, s2, s3)}. \section{Comparison between packages} Over the years, many R packages have been developed that implement MLMs, each being more or less general in their supported models. In \cite{brms1}, I compared \pkg{brms} with \pkg{lme4} \citep{bates2015}, \pkg{MCMCglmm} \citep{hadfield2010}, \pkg{rstanarm} \citep{rstanarm2017}, and \pkg{rethinking} \citep{mcelreath2017}. Since then, quite a few new features have been added in particular to \pkg{brms} and \pkg{rstanarm}. Accordingly, in the present paper, I will update these comparisons, but focus on \pkg{brms}, \pkg{rstanarm}, and \pkg{MCMCglmm} as the possibly most important R packages implementing Bayesian MLMs. While \pkg{brms} and \pkg{rstanarm} are both based on the probabilistic programming language \pkg{Stan}, \pkg{MCMCglmm} implements its own custom MCMC algorithm. Modeling options and other important information of these packages are summarized in Table~\ref{comparison} and will be discussed in detail below. Regarding general model classes, all three packages support the most common types such as linear, count data and certain survival models. Currently, \pkg{brms} and \pkg{MCMCglmm} provide more flexibility when modeling categorical and ordinal data. Additionally, they offer support for zero-inflated and hurdle models to account for access zeros in the data (see Example 1 above). For survival / time-to-event models, \pkg{rstanarm} offers great flexibility via the \code{stan\_jm} function, which allows for complex association structures between time-to-event data and one or more models of longitudinal covariates (for details see \url{https://cran.r-project.org/web/packages/rstanarm/vignettes/jm.html}). Model classes currently specific to \pkg{brms} are robust linear models using Student's t-distribution (family \code{student}) as well as response times models via the exponentially modified Gaussian (family \code{exgaussian}) distribution or the Wiener diffusion model (family \code{wiener}). The latter allows to simultaneously model dichotomous decisions and their corresponding response times (for a detailed example see \url{http://singmann.org/wiener-model-analysis-with-brms-part-i/}). All three packages offer many additional modeling options, with \pkg{brms} currently having the greatest flexibility (see Table~\ref{comparison} for a summary). Moreover, the packages differ in the general framework, in which they are implemented: \pkg{brms} and \pkg{MCMCglmm} each come with a single model fitting function (\code{brm} and \code{MCMCglmm} respectively), through which all of their models can be specified. Further, their framework allows to seamlessly combine most modeling options with each other in the same model. In contrast, the approach of \pkg{rstanarm} is to emulate existing functions of other packages. This has the advantage of an easier transition between classical and Bayesian models, since the syntax used to specify models stays the same. However, it comes with the main disadvantage that many modeling options cannot be used in combination within the same model. Information criteria are available in all three packages. The advantages of WAIC and LOO implemented in \pkg{brms} and \pkg{rstanarm}, are their less restrictive assumptions and that their standard errors can be easily estimated to get a better sense of the uncertainty in the criteria. Comparing the prior options of the packages, \pkg{brms} offers a little more flexibility than \pkg{MCMCglmm} and \pkg{rstanarm}, as virtually any prior distribution can be applied on population-level effects as well as on the standard deviations of group-level effects. In addition, I believe that the way priors are specified in \pkg{brms} is more intuitive as it is directly evident what prior is actually applied. In \pkg{brms}, Bayes factors are available both via Savage-Dickey ratios \citep{wagenmakers2010} and bridge-sampling \citep{bridgesampling2017}, while \pkg{rstanarm} allows for the latter option. For a detailed comparison with respect to sampling efficiency, see \cite{brms1}. \begin{table}[hbtp] \centering \begin{tabular}{llll} & \parbox{2cm}{\pkg{brms}} & \parbox{2cm}{\pkg{rstanarm}} & \parbox{2cm}{\pkg{MCMCglmm}} \\ \hline \\ [-1.5ex] \parbox{6cm}{\textbf{Model classes}} & & & \\ [1ex] Linear models & yes & yes & yes \\ Robust linear models & yes & no & no \\ Count data models & yes & yes & yes \\ Survival models & yes & yes$^1$ & yes \\ Response times models & yes & no & no \\ Beta models & yes & yes & no \\ Categorical models & yes & yes$^2$ & yes \\ Multinomial models & no & no & yes \\ Ordinal models & various & cumulative$^2$ & cumulative \\ Zero-inflated and hurdle models & yes & no & yes \\ \hline \\ [-1.5ex] \parbox{5cm}{\textbf{Modeling options}} & & & \\ [1ex] Variable link functions & various & various & no \\ Multilevel structures & yes & yes & yes \\ Multi-membership & yes & no & yes \\ Multivariate responses & yes & yes$^3$ & yes \\ Non-linear predictors & yes & limited$^4$ & no \\ Distributional regression & yes & no & no \\ Finite mixtures & yes & no & no \\ Splines (additive models) & yes & yes & yes \\ Gaussian Processes & yes & no & no \\ Temporal autocorrelation & yes & yes$^{2, 5}$ & no \\ Spatial autocorrelation & yes & yes$^{2, 5}$ & no \\ Monotonic effects & yes & no & no \\ Category specific effects & yes & no & no \\ Measurement error & yes & no & no \\ Weights & yes & yes & no \\ Offset & yes & yes & using priors \\ Censored data & yes & yes$^1$ & yes \\ Truncated data & yes & no & no \\ Customized covariances & yes & no & yes \\ Missing value imputation & no & no & no \\ \hline \\ [-1.5ex] \textbf{Bayesian specifics} & & & \\ [1ex] Population-level priors & flexible & flexible & normal \\ Group-level priors & normal & normal & normal \\ Covariance priors & flexible & restricted$^6$ & restricted$^7$ \\ Bayes factors & yes & yes$^8$ & no \\ Parallelization & yes & yes & no \\ \hline \\ [-1.5ex] \textbf{Other} & & & \\ [1ex] Estimator & HMC, NUTS & HMC, NUTS & MH, Gibbs$^9$ \\ Information criterion & WAIC, LOO & WAIC, LOO & DIC \\ C++ compiler required & yes & no & no \\ \hline \end{tabular} \caption{ Comparison of the capabilities of the \pkg{brms}, \pkg{rstanarm} and \pkg{MCMCglmm} packages. Notes: (1) Advanced functionality available via \code{stan\_jm}. (2) No group-level terms and related functionality allowed. (3) Cannot be combined with other modeling options such as splines. (4) Functionality limited to linear Gaussian models and certein pre-specified non-linear functions. (5) Functionality available only on GitHub branches (\url{https://github.com/stan-dev/rstanarm}). (6) For details see \cite{hadfield2010}. (7) For details see \url{https://github.com/stan-dev/rstanarm/wiki/Prior-distributions}. (8) Available via the \pkg{bridgesampling} package \citep{bridgesampling2017}. (9) Estimator consists of a combination of both algorithms. } \label{comparison} \end{table} \section{Conclusion} The present paper is meant to introduce R users and developers to the extended \pkg{lme4} formula syntax applied in \pkg{brms}. Only a subset of modeling options were discussed in detail, which ensured the paper was not too broad. For some of the more basic models that \pkg{brms} can fit, see \citet{brms1}. Many more examples can be found in the growing number of vignettes accompanying the package (see \code{vignette(package = "brms")} for an overview). To date, \pkg{brms} is already one of the most flexible R packages when it comes to regression modeling. However, for the future, there are quite a few more features that I am planning to implement (see \url{https://github.com/paul-buerkner/brms/issues} for the current list of issues). In addition to smaller, incremental updates, I have two specific features in mind: (1) latent variables estimated via confirmatory factor analysis and (2) missing value imputation. I receive ideas and suggestions from users almost every day -- for which I am always grateful -- and so the list of features that will be implemented in the proceeding versions of \pkg{brms} will continue to grow. \section*{Acknowledgments} First of all, I would like to thank the Stan Development Team for creating the probabilistic programming language Stan, which is an incredibly powerful and flexible tool for performing full Bayesian inference. Without it, \pkg{brms} could not fit a single model. Furthermore, I want to thank Heinz Holling, Donald Williams and Ruben Arslan for valuable comments on earlier versions of the paper. I also would like to thank the many users who reported bugs or had ideas for new features, thus helping to continuously improve \pkg{brms}. \bibliography{citations_multilevel} \end{document} brms/inst/doc/brms_monotonic.Rmd0000644000175000017500000002075314111751670016577 0ustar nileshnilesh--- title: "Estimating Monotonic Effects with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Monotonic Effects with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, $b$, takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, $b$ can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, $\zeta$, estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, $x$, the linear predictor term of observation $n$ looks as follows: $$\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i$$ The parameter $b$ can take on any real value, while $\zeta$ is a simplex, which means that it satisfies $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$ with $D$ being the number of elements of $\zeta$. Equivalently, $D$ is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation. ## A Simple Monotonic Model A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: 'below 20k', 'between 20k and 40k', 'between 40k and 100k' and 'above 100k'. We use some simulated data for illustration purposes. ```{r} income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ``` We now proceed with analyzing the data modeling `income` as a monotonic effect. ```{r, results='hide'} fit1 <- brm(ls ~ mo(income), data = dat) ``` The summary methods yield ```{r} summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ``` The distributions of the simplex parameter of `income`, as shown in the `plot` method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories. Now, let's compare of monotonic model with two common alternative models. (a) Assume `income` to be continuous: ```{r, results='hide'} dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ``` ```{r} summary(fit2) ``` or (b) Assume `income` to be an unordered factor: ```{r, results='hide'} contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ``` ```{r} summary(fit3) ``` We can easily compare the fit of the three models using leave-one-out cross-validation. ```{r} loo(fit1, fit2, fit3) ``` The monotonic model fits better than the continuous model, which is not surprising given that the relationship between `income` and `ls` is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets. ## Setting Prior Distributions In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., $\zeta_i \in [0,1]$ and $\sum_{i = 1}^D \zeta_i = 1$) and zero otherwise. The Dirichlet prior has a single parameter $\alpha$ of the same length as $\zeta$. The higher $\alpha_i$ the higher the a-priori probability of higher values of $\zeta_i$. Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of $\zeta_1$ (difference between 'below_20' and '20_to_40') and hence into higher values of $\alpha_1$. We choose $\alpha_1 = 2$ and $\alpha_2 = \alpha_3 = 1$, the latter being the default value of $\alpha$. To fit the model we write: ```{r, results='hide'} prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ``` The `1` at the end of `"moincome1"` may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model. ```{r} summary(fit4) ``` We have used `sample_prior = TRUE` to also obtain draws from the prior distribution of `simo_moincome1` so that we can visualized it. ```{r} plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ``` As is visible in the plots, `simo_moincome1[1]` was a-priori on average twice as high as `simo_moincome1[2]` and `simo_moincome1[3]` as a result of setting $\alpha_1$ to 2. ## Modeling interactions of monotonic variables Suppose, we have additionally asked participants for their age. ```{r} dat$age <- rnorm(100, mean = 40, sd = 10) ``` We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the `*` operator: ```{r, results='hide'} fit5 <- brm(ls ~ mo(income)*age, data = dat) ``` ```{r} summary(fit5) conditional_effects(fit5, "income:age") ``` ## Modelling Monotonic Group-Level Effects Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for `city` to the data and add some city-related variation to `ls`. ```{r} dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ``` With the following code, we fit a multilevel model assuming the intercept and the effect of `income` to vary by city: ```{r, results='hide'} fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ``` ```{r} summary(fit6) ``` reveals that the effect of `income` varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed `income` to have the same effect across cities. ## References Bürkner P. C. & Charpentier, E. (in review). [Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models](https://psyarxiv.com/9qkhj/). *PsyArXiv preprint*. brms/inst/doc/brms_distreg.html0000644000175000017500000054651614146736036016476 0ustar nileshnilesh Estimating Distributional Models with brms

Estimating Distributional Models with brms

Paul Bürkner

2021-11-22

Introduction

This vignette provides an introduction on how to fit distributional regression models with brms. We use the term distributional model to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, brms uses Stan on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue.

Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term \(\eta_{\mu}\) for the mean parameter \(\mu\) of the normal distribution. The second parameter of the normal distribution – the residual standard deviation \(\sigma\) – is assumed to be constant across observations. We estimate \(\sigma\) but do not try to predict it. In a distributional model, however, we do exactly this by specifying a predictor term \(\eta_{\sigma}\) for \(\sigma\) in addition to the predictor term \(\eta_{\mu}\). Ignoring group-level effects for the moment, the linear predictor of a parameter \(\theta\) for observation \(n\) has the form

\[\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}\] where \(x_{\theta i n}\) denotes the value of the \(i\)th predictor of parameter \(\theta\) for observation \(n\) and \(b_{\theta i}\) is the \(i\)th regression coefficient of parameter \(\theta\). A distributional normal model with response variable \(y\) can then be written as

\[y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)\] We used the exponential function around \(\eta_{\sigma}\) to reflect that \(\sigma\) constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number.

A simple distributional model

Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values.

group <- rep(c("treat", "placebo"), each = 30)
symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1))
dat1 <- data.frame(group, symptom_post)
head(dat1)
  group symptom_post
1 treat    0.7494321
2 treat    4.6104747
3 treat    0.2906019
4 treat   -0.4612407
5 treat    0.6131628
6 treat    0.4834311

The following model estimates the effect of group on both the mean and the residual standard deviation of the normal response distribution.

fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), 
            data = dat1, family = gaussian())

Useful summary statistics and plots can be obtained via

summary(fit1)
plot(fit1, N = 2, ask = FALSE)

plot(conditional_effects(fit1), points = TRUE)

The population-level effect sigma_grouptreat, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the conditional_effects of group. Going one step further, we can compute the residual standard deviations on the original scale using the hypothesis method.

hyp <- c("exp(sigma_Intercept) = 0",
         "exp(sigma_Intercept + sigma_grouptreat) = 0")
hypothesis(fit1, hyp)
Hypothesis Tests for class b:
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (exp(sigma_Interc... = 0     1.12      0.16     0.87     1.47         NA        NA    *
2 (exp(sigma_Interc... = 0     1.93      0.26     1.51     2.52         NA        NA    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

We may also directly compare them and plot the posterior distribution of their difference.

hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)"
(hyp <- hypothesis(fit1, hyp))
Hypothesis Tests for class b:
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (exp(sigma_Interc... > 0     0.81       0.3     0.34     1.34     665.67         1    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp, chars = NULL)

Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations.

Zero-Inflated Models

Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (), the data are described as follows: “The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish.”

zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv")
head(zinb)
  nofish livebait camper persons child         xb         zg count
1      1        0      0       1     0 -0.8963146  3.0504048     0
2      0        1      1       1     0 -0.5583450  1.7461489     0
3      0        1      0       1     0 -0.4017310  0.2799389     0
4      0        1      1       2     1 -0.9562981 -0.6015257     0
5      0        1      0       1     0  0.4368910  0.5277091     1
6      0        1      1       4     2  1.3944855 -0.7075348     0

As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations.

fit_zinb1 <- brm(count ~ persons + child + camper, 
                 data = zinb, family = zero_inflated_poisson())

Again, we summarize the results using the usual methods.

summary(fit_zinb1)
 Family: zero_inflated_poisson 
  Links: mu = log; zi = identity 
Formula: count ~ persons + child + camper 
   Data: zinb (Number of observations: 250) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.01      0.18    -1.37    -0.68 1.00     2796     2890
persons       0.87      0.05     0.79     0.97 1.00     2866     2708
child        -1.37      0.09    -1.55    -1.19 1.00     2679     2717
camper        0.80      0.09     0.62     0.99 1.00     3175     2259

Family Specific Parameters: 
   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
zi     0.41      0.04     0.32     0.49 1.00     3031     2507

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_zinb1), ask = FALSE)

According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability zi is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-inflation). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here).

Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data.

fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), 
                 data = zinb, family = zero_inflated_poisson())
summary(fit_zinb2)
 Family: zero_inflated_poisson 
  Links: mu = log; zi = logit 
Formula: count ~ persons + child + camper 
         zi ~ child
   Data: zinb (Number of observations: 250) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       -1.08      0.18    -1.44    -0.74 1.00     2863     3006
zi_Intercept    -0.96      0.26    -1.48    -0.48 1.00     3613     2910
persons          0.89      0.05     0.80     0.98 1.00     2688     2945
child           -1.18      0.09    -1.37    -0.99 1.00     2568     2241
camper           0.78      0.10     0.60     0.97 1.00     3735     2754
zi_child         1.22      0.28     0.69     1.77 1.00     3457     2755

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_zinb2), ask = FALSE)

To transform the linear predictor of zi into a probability, brms applies the logit-link:

\[logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}\]

The logit-link takes values within \([0, 1]\) and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors.

According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying.

Additive Distributional Models

In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of brms. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the mgcv package, which is also used in brms to prepare smooth terms.

dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE)
Gu & Wahba 4 term additive model
head(dat_smooth[, 1:6])
         y         x0        x1        x2         x3        f
1 14.49903 0.90367316 0.5599891 0.2892207 0.25739676 14.65363
2 15.62866 0.05106966 0.3067924 0.2004563 0.46890431 16.76730
3 18.96673 0.41930344 0.8640141 0.5152081 0.49768229 19.30891
4 18.00491 0.88620754 0.1923437 0.6606654 0.01195286 17.42330
5 11.57063 0.47699339 0.2445614 0.3427586 0.77137988 12.80572
6 14.70050 0.24687260 0.7712136 0.7318031 0.96783381 14.48305

The data contains the predictors x0 to x3 as well as the grouping factor fac indicating the nested structure of the data. We predict the response variable y using smooth terms of x1 and x2 and a varying intercept of fac. In addition, we assume the residual standard deviation sigma to vary by a smoothing term of x0 and a varying intercept of fac.

fit_smooth1 <- brm(
  bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)),
  data = dat_smooth, family = gaussian(),
  chains = 2, control = list(adapt_delta = 0.95)
)
summary(fit_smooth1)
 Family: gaussian 
  Links: mu = identity; sigma = log 
Formula: y ~ s(x1) + s(x2) + (1 | fac) 
         sigma ~ s(x0) + (1 | fac)
   Data: dat_smooth (Number of observations: 200) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Smooth Terms: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sds(sx1_1)           2.81      2.10     0.31     8.22 1.00      808      902
sds(sx2_1)          19.35      4.78    12.06    30.65 1.00      911     1431
sds(sigma_sx0_1)     1.09      0.95     0.05     3.58 1.00      641      780

Group-Level Effects: 
~fac (Number of levels: 4) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)           5.09      2.36     2.36    11.25 1.00     1006     1100
sd(sigma_Intercept)     0.12      0.19     0.00     0.54 1.01      555      732

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept          15.32      2.33    10.69    20.26 1.00      746      905
sigma_Intercept     0.78      0.10     0.59     0.98 1.01     1027      717
sx1_1              10.25      6.01    -3.87    21.54 1.00     1095      951
sx2_1              63.62     15.84    32.24    94.36 1.01     1262      856
sigma_sx0_1         1.70      2.17    -2.41     6.82 1.00     1063     1180

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE)

This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with brms and to fit them using Stan on the backend.

brms/inst/doc/brms_phylogenetics.Rmd0000644000175000017500000003004014010776135017436 0ustar nileshnilesh--- title: "Estimating Phylogenetic Multilevel Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Phylogenetic Multilevel Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify phylogenetic multilevel models using **brms**. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book *Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology* (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (http://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit. ## A Simple Phylogenetic Model Assume we have measurements of a phenotype, `phen` (say the body size), and a `cofactor` variable (say the temperature of the environment). We prepare the data using the following code. ```{r} phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ``` The `phylo` object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010). ```{r} A <- ape::vcv.phylo(phylo) ``` Now we are ready to fit our first phylogenetic multilevel model: ```{r, results='hide'} model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ``` With the exception of `(1|gr(phylo, cov = A))` instead of `(1|phylo)` this is a basic multilevel model with a varying intercept over species (`phylo` is an indicator of species in this data set). However, by using `cov = A` in the `gr` function, we make sure that species are correlated as specified by the covariance matrix `A`. We pass `A` itself via the `data2` argument which can be used for any kinds of data that does not fit into the regular structure of the `data` argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail. ```{r} summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ``` The so called phylogenetic signal (often symbolize by $\lambda$) can be computed with the `hypothesis` method and is roughly $\lambda = 0.7$ for this example. ```{r} hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ``` Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis. ## A Phylogenetic Model with Repeated Measurements Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models. ```{r} data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ``` The variable `spec_mean_cf` just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows: ```{r, results='hide'} model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The variables `phylo` and `species` are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for `phylo` and thus the `species` variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal. ```{r} summary(model_repeat1) ``` ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ``` So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define ```{r} data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ``` and then fit it again using `within_spec_cf` as an additional predictor. ```{r, results='hide'} model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of `cofactor`. ```{r} summary(model_repeat2) ``` Also, the phylogenetic signal remains more or less the same. ```{r} hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ``` ## A Phylogenetic Meta-Analysis Let's say we have Fisher's z-transformed correlation coefficients $Zr$ per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success): ```{r} data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ``` We assume the sampling variance to be known and as $V(Zr) = \frac{1}{N - 3}$ for Fisher's values, where $N$ is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that **brms** requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of `obs` represents the residual variance, which we have to model explicitly in a meta-analytic model. ```{r, results='hide'} model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ``` A summary of the fitted model is obtained via ```{r} summary(model_fisher) plot(model_fisher) ``` The meta-analytic mean (i.e., the model intercept) is $0.16$ with a credible interval of $[0.08, 0.25]$. Thus the mean correlation across species is positive according to the model. ## A phylogenetic count-data model Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example. ```{r} data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ``` As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of `obs` (e.g., see Lawless, 1987). ```{r, results='hide'} model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` Again, we obtain a summary of the fitted model via ```{r} summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ``` Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead. ```{r, results='hide'} model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(model_normal) ``` We see that `cofactor` has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks. ```{r} pp_check(model_pois) pp_check(model_normal) ``` Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit. ```{r} loo(model_pois, model_normal) ``` Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family `negative_binomial`), which already contains an overdispersion parameter so that modeling a varying intercept of `obs` becomes obsolete. ## Phylogenetic models with multiple group-level effects In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In **brms**, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large. ## References de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: *Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice* (ed. Garamszegi L.) Springer, New York. pp. 287-303. Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. *Journal of Evolutionary Biology*. 23. 494-508. Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. *Canadian Journal of Statistics*, 15(3), 209-225. brms/inst/doc/brms_threading.Rmd0000644000175000017500000005721714105230573016541 0ustar nileshnilesh--- title: "Running brms models with within-chain parallelization" author: "Sebastian Weber & Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Running brms models with within-chain parallelization} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(ggplot2) library(brms) theme_set(theme_default()) ``` ```{r, fake-data-sim, include=FALSE, eval=TRUE} set.seed(54647) # number of observations N <- 1E4 # number of group levels G <- round(N / 10) # number of predictors P <- 3 # regression coefficients beta <- rnorm(P) # sampled covariates, group means and fake data fake <- matrix(rnorm(N * P), ncol = P) dimnames(fake) <- list(NULL, paste0("x", 1:P)) # fixed effect part and sampled group membership fake <- transform( as.data.frame(fake), theta = fake %*% beta, g = sample.int(G, N, replace=TRUE) ) # add random intercept by group fake <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g") # linear predictor fake <- transform(fake, mu = theta + eta) # sample Poisson data fake <- transform(fake, y = rpois(N, exp(mu))) # shuffle order of data rows to ensure even distribution of computational effort fake <- fake[sample.int(N, N),] # drop not needed row names rownames(fake) <- NULL ``` ```{r, model-poisson, include=FALSE} model_poisson <- brm( y ~ 1 + x1 + x2 + (1 | g), data = fake, family = poisson(), iter = 500, # short sampling to speedup example chains = 2, prior = prior(normal(0,1), class = b) + prior(constant(1), class = sd, group = g), backend = "cmdstanr", threads = threading(4) ) ``` ```{r, benchmark, include=FALSE} # Benchmarks given model with cross-product of tuning parameters CPU # cores, grainsize and iterations. Models are run with either static # or non-static scheduler and inits is set by default to 0 on the # unconstrained scale. Function returns a data-frame with the # cross-product of the tuning parameters and as result column the # respective runtime. benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, static = FALSE) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) scaling_model <- update( model, refresh = 0, threads = threading(1, grainsize = grainsize[1], static = static), chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark <- function(cores, size, iter) { bench_fit <- update( scaling_model, warmup=0, iter = iter, chains = 1, seed = 1234, inits = init, refresh = 0, save_warmup=TRUE, threads = threading(cores, grainsize = size, static = static), inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter) res <- with(cases, mapply(run_benchmark, cores, grainsize, iter)) cbind(cases, as.data.frame(t(res))) } benchmark_reference <- function(model, iter=100, inits=0) { winfo <- extract_warmup_info(model) sims <- rstan::extract(model$fit) init <- list(extract_draw(sims, 1)) ref_model <- update( model, refresh = 0, threads = NULL, chains = 1, iter = 2, backend = "cmdstanr" ) run_benchmark_ref <- function(iter_bench) { bench_fit <- update( ref_model, warmup=0, iter = iter_bench, chains = 1, seed = 1234, inits = init, refresh = 0, inv_metric=winfo$inv_metric[[1]], step_size=winfo$step_size[[1]], adapt_engaged=FALSE ) lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value) elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit))) c(num_leapfrog=lf, runtime=elapsed) } ref <- sapply(iter, run_benchmark_ref) ref <- cbind(as.data.frame(t(ref)), iter=iter) ref } extract_warmup_info <- function(bfit) { adapt <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n") step_size <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2])) inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]])) list(step_size=step_size, inv_metric=inv_metric) } extract_draw <- function(sims, draw) lapply(sims, abind::asub, idx=draw, dim=1, drop=FALSE) ``` ## Introduction Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with **brms**, since its efficient use depends on various aspects specific to the users model. ## Quick summary Assuming you have a **brms** model which you wish to evaluate faster by using more cores per chain, for example: ```{r, eval=FALSE} fit_serial <- brm( count ~ zAge + zBase * Trt + (1|patient), data = epilepsy, family = poisson(), chains = 4, cores = 4, backend = "cmdstanr" ) ``` Then running this model with threading requires `cmdstanr` as backend and you can simply add threading support to an existing model with the `update` mechanism as: ```{r, eval=FALSE} fit_parallel <- update( fit_serial, chains = 2, cores = 2, backend = "cmdstanr", threads = threading(2) ) ``` The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads *in total* as you have CPU cores. It's thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores. - Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The `epilepsy` example above is actually too small to gain in speed (just a few seconds per chain on this machine). - Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis. - Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable. - Enabling threading *usually* slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed. - Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores. - Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive $\log\Gamma$ functions whereas the normal likelihood is very cheap to calculate in comparison. - Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel. - With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable. - Avoid using hyper-threading, that is, only use as many threads as you have physical cores available. - Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort. ## Within-chain parallelization The within-chain parallelization implemented in **brms** is based on the `reduce_sum` facility in Stan. The basic principle that `reduce_sum` uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. **brms** leverages `reduce_sum` to evaluate the log-likelihood of the model in parallel as for example $$ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} $$ As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree. Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by [Amdahl‘s law](https://en.wikipedia.org/wiki/Amdahl%27s_law). For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user. In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector $\theta$ has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the `grainsize`, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance. Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the `static` option must be used and set to `TRUE`, which uses a deterministic scheduler for the parallel work. ## Example model As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with $`r N`$ data observation which are grouped into $`r G`$ groups. Each data item has $`r P`$ continuous covariates. The simulation code for the fake data can be found in the appendix and it's first $10$ rows are: ```{r} kable(head(fake, 10), digits = 3) ``` The **brms** model fitting this data is: ```{r, eval=FALSE} <> ``` Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of $1$ as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone. The Poisson likelihood is a relatively expensive likelihood due to the use of $\log\Gamma$ function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters. ## Managing parallelization overhead As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller *partial sums*. Creating more *partial sums* allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each *partial sum* formed along with further overhead due to splitting up a single large task into multiple smaller ones. By default, **brms** will choose a sensible `grainsize` which defines how large a given *partial sum* will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling. While we expect that the default `grainsize` in **brms** is reasonably good for many models, it can improve performance if one tunes the `grainsize` specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of *partial sum* accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix). Below is an example R code demonstrating such a benchmark. The utility function `benchmark_threading` is shown and explained in the appendix. ```{r, chunking-scale, message=FALSE, warning=FALSE, results='hide'} chunking_bench <- transform( data.frame(chunks = 4^(0:3)), grainsize = ceiling(N / chunks) ) iter_test <- c(10, 20, 40) # very short test runs scaling_chunking <- benchmark_threading( model_poisson, cores = 1, grainsize = chunking_bench$grainsize, # test various grainsizes iter = iter_test, static = TRUE # with static partitioner ) # run as reference the model *without* reduce_sum ref <- benchmark_reference(model_poisson, iter_test) # for additional data munging please refer to the appendix ``` ```{r, munge-chunking-scaling, include=FALSE} scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize") single_chunk <- transform( subset(scaling_chunking, chunks == 1), num_leapfrog_single = num_leapfrog, num_leapfrog = NULL, runtime_single = runtime, runtime = NULL, grainsize = NULL, chunks=NULL ) scaling_chunking <- transform( merge(scaling_chunking, single_chunk), slowdown = runtime/runtime_single, iter = factor(iter), runtime_single = NULL ) ref <- transform(ref, iter=factor(iter)) ``` Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don't quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup. Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program *without* `reduce_sum`. As we can see, the additional overhead due to merely enabling `reduce_sum` is substantial in this example. This is attributed in the specific example to the large number of random effects. ```{r} ggplot(scaling_chunking) + aes(chunks, slowdown, colour = iter, shape = iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) + ggtitle("Slowdown with increasing number of chunks") ggplot(scaling_chunking) + aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_chunking$chunks) + scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) + geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) + ggtitle("Time per leapfrog step vs number of chunks", "Dashed line is reference model without reduce_sum") + ylab("Time per leapfrog step [ms]") ``` ## Parallelization speedup In practice, we are often interested in so-called "hard-scaling" properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it's not useful). As we have seen before, the `grainsize` can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of `grainsize`s. ```{r, speedup-scale, message=FALSE, warning=FALSE, results='hide'} num_cpu <- parallel::detectCores(logical = FALSE) num_cpu_logical <- parallel::detectCores(logical = TRUE) grainsize_default <- ceiling(N / (2 * num_cpu)) cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical) cores <- sort(unique(cores)) grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4) grainsize <- round(grainsize) iter_scaling <- 20 scaling_cores <- benchmark_threading( model_poisson, cores = cores, grainsize = grainsize, iter = iter_scaling, static = FALSE ) single_core <- transform( subset(scaling_cores, cores == 1), runtime_single = runtime, num_leapfrog=NULL, runtime=NULL, cores = NULL ) scaling_cores <- transform( merge(scaling_cores, single_core), speedup = runtime_single/runtime, grainsize = factor(grainsize) ) ``` It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups. ```{r} ggplot(scaling_cores) + aes(cores, runtime, shape = grainsize, color = grainsize) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks = scaling_cores$cores) + scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) + theme(legend.position = c(0.85, 0.8)) + geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) + ggtitle("Runtime with varying number of cores", "Dashed line is reference model without reduce_sum") ggplot(scaling_cores) + aes(cores, speedup, shape = grainsize, color = grainsize) + geom_abline(slope = 1, intercept = 0, linetype = 2) + geom_vline(xintercept = num_cpu, linetype = 3) + geom_line() + geom_point() + scale_x_log10(breaks=scaling_cores$cores) + scale_y_log10(breaks=scaling_cores$cores) + theme(aspect.ratio = 1) + coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) + ggtitle("Relative speedup vs 1 core") ``` The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model *without* `reduce_sum` and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example. For this example, the shown `grainsize`s matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed. ```{r} kable(scaling_cores, digits = 2) ``` For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains. ## Appendix ### Fake data simulation ```{r, eval=FALSE} <> ``` ### Poisson example model ```{r, eval=FALSE} <> ``` ### Threading benchmark function ```{r, eval=FALSE} <> ``` ### Munging of slowdown with chunking data ```{r, eval=FALSE} <> ``` brms/inst/doc/brms_customfamilies.R0000644000175000017500000000665214146735325017306 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## ----cbpp------------------------------------------------------------------------------- data("cbpp", package = "lme4") head(cbpp) ## ----fit1, results='hide'--------------------------------------------------------------- fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ## ----fit1_summary----------------------------------------------------------------------- summary(fit1) ## ----beta_binomial2--------------------------------------------------------------------- beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1[n]" ) ## ----stan_funs-------------------------------------------------------------------------- stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ## ----stanvars--------------------------------------------------------------------------- stanvars <- stanvar(scode = stan_funs, block = "functions") ## ----fit2, results='hide'--------------------------------------------------------------- fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ## ----summary_fit2----------------------------------------------------------------------- summary(fit2) ## --------------------------------------------------------------------------------------- expose_functions(fit2, vectorize = TRUE) ## ----log_lik---------------------------------------------------------------------------- log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ## ----loo-------------------------------------------------------------------------------- loo(fit1, fit2) ## ----posterior_predict------------------------------------------------------------------ posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ## ----pp_check--------------------------------------------------------------------------- pp_check(fit2) ## ----posterior_epred-------------------------------------------------------------------- posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ## ----conditional_effects---------------------------------------------------------------- conditional_effects(fit2, conditions = data.frame(size = 1)) brms/inst/doc/brms_multivariate.html0000644000175000017500000022231014146741515017520 0ustar nileshnilesh Estimating Multivariate Models with brms

Estimating Multivariate Models with brms

Paul Bürkner

2021-11-22

Introduction

In the present vignette, we want to discuss how to specify multivariate multilevel models using brms. We call a model multivariate if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the tarsus length as well as the back color of chicks. Half of the brood were put into another fosternest, while the other half stayed in the fosternest of their own dam. This allows to separate genetic from environmental factors. Additionally, we have information about the hatchdate and sex of the chicks (the latter being known for 94% of the animals).

data("BTdata", package = "MCMCglmm")
head(BTdata)
       tarsus       back  animal     dam fosternest  hatchdate  sex
1 -1.89229718  1.1464212 R187142 R187557      F2102 -0.6874021  Fem
2  1.13610981 -0.7596521 R187154 R187559      F1902 -0.6874021 Male
3  0.98468946  0.1449373 R187341 R187568       A602 -0.4279814 Male
4  0.37900806  0.2555847 R046169 R187518      A1302 -1.4656641 Male
5 -0.07525299 -0.3006992 R046161 R187528      A2602 -1.4656641  Fem
6 -1.13519543  1.5577219 R187409 R187945      C2302  0.3502805  Fem

Basic Multivariate Models

We begin with a relatively simple multivariate normal model.

fit1 <- brm(
  mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam),
  data = BTdata, chains = 2, cores = 2
)

As can be seen in the model code, we have used mvbind notation to tell brms that both tarsus and back are separate response variables. The term (1|p|fosternest) indicates a varying intercept over fosternest. By writing |p| in between we indicate that all varying effects of fosternest should be modeled as correlated. This makes sense since we actually have two model parts, one for tarsus and one for back. The indicator p is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of brms, see help("brmsformula") and vignette("brms_multilevel")). Similarly, the term (1|q|dam) indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see vignette("brms_phylogenetics")). The model results are readily summarized via

fit1 <- add_criterion(fit1, "loo")
summary(fit1)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + hatchdate + (1 | p | fosternest) + (1 | q | dam) 
         back ~ sex + hatchdate + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.48      0.05     0.39     0.58 1.00      789
sd(back_Intercept)                       0.24      0.08     0.09     0.39 1.01      286
cor(tarsus_Intercept,back_Intercept)    -0.52      0.23    -0.95    -0.08 1.01      377
                                     Tail_ESS
sd(tarsus_Intercept)                     1353
sd(back_Intercept)                        606
cor(tarsus_Intercept,back_Intercept)      695

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.27      0.05     0.16     0.38 1.00      664
sd(back_Intercept)                       0.35      0.06     0.23     0.47 1.00      453
cor(tarsus_Intercept,back_Intercept)     0.68      0.21     0.20     0.98 1.00      316
                                     Tail_ESS
sd(tarsus_Intercept)                     1163
sd(back_Intercept)                        952
cor(tarsus_Intercept,back_Intercept)      557

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept    -0.41      0.07    -0.55    -0.27 1.00     1356     1387
back_Intercept      -0.01      0.06    -0.14     0.11 1.00     2247     1577
tarsus_sexMale       0.77      0.06     0.66     0.87 1.00     3631     1251
tarsus_sexUNK        0.23      0.13    -0.03     0.48 1.00     3904     1252
tarsus_hatchdate    -0.04      0.06    -0.16     0.07 1.00     1221     1286
back_sexMale         0.01      0.07    -0.12     0.13 1.00     3669     1885
back_sexUNK          0.15      0.15    -0.13     0.44 1.00     3706     1602
back_hatchdate      -0.09      0.05    -0.19     0.02 1.00     2152     1551

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_tarsus     0.76      0.02     0.72     0.80 1.00     1928     1652
sigma_back       0.90      0.02     0.85     0.95 1.00     2607     1610

Residual Correlations: 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
rescor(tarsus,back)    -0.05      0.04    -0.13     0.02 1.00     2597     1370

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation rescor(tarsus, back) on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of fit1, which we will use for model comparisons. Next, let’s take a look at some posterior-predictive checks, which give us a first impression of the model fit.

pp_check(fit1, resp = "tarsus")

pp_check(fit1, resp = "back")

This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of tarsus. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the \(R^2\) coefficient.

bayes_R2(fit1)
          Estimate  Est.Error      Q2.5     Q97.5
R2tarsus 0.4341246 0.02329163 0.3857624 0.4776837
R2back   0.1977502 0.02836021 0.1408088 0.2523545

Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color.

More Complex Multivariate Models

Now, suppose we only want to control for sex in tarsus but not in back and vice versa for hatchdate. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use mvbind syntax and so we have to use a more verbose approach:

bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam))
bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam))
fit2 <- brm(bf_tarsus + bf_back, data = BTdata, chains = 2, cores = 2)

Note that we have literally added the two model parts via the + operator, which is in this case equivalent to writing mvbf(bf_tarsus, bf_back). See help("brmsformula") and help("mvbrmsformula") for more details about this syntax. Again, we summarize the model first.

fit2 <- add_criterion(fit2, "loo")
summary(fit2)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + (1 | p | fosternest) + (1 | q | dam) 
         back ~ hatchdate + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.48      0.05     0.39     0.59 1.00      800
sd(back_Intercept)                       0.25      0.07     0.11     0.39 1.00      392
cor(tarsus_Intercept,back_Intercept)    -0.49      0.22    -0.91    -0.06 1.00      576
                                     Tail_ESS
sd(tarsus_Intercept)                     1270
sd(back_Intercept)                        589
cor(tarsus_Intercept,back_Intercept)      649

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.27      0.05     0.17     0.37 1.00      642
sd(back_Intercept)                       0.35      0.06     0.23     0.45 1.00      511
cor(tarsus_Intercept,back_Intercept)     0.70      0.20     0.24     0.98 1.00      229
                                     Tail_ESS
sd(tarsus_Intercept)                      937
sd(back_Intercept)                       1098
cor(tarsus_Intercept,back_Intercept)      311

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept    -0.42      0.07    -0.55    -0.28 1.00     1248     1372
back_Intercept       0.00      0.05    -0.10     0.10 1.00     1552     1414
tarsus_sexMale       0.77      0.06     0.66     0.88 1.00     3231     1537
tarsus_sexUNK        0.23      0.13    -0.03     0.49 1.00     2914     1644
back_hatchdate      -0.08      0.05    -0.19     0.02 1.00     2024     1543

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_tarsus     0.76      0.02     0.72     0.80 1.00     2089     1584
sigma_back       0.90      0.02     0.85     0.95 1.00     2066     1532

Residual Correlations: 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
rescor(tarsus,back)    -0.05      0.04    -0.13     0.02 1.00     2480     1767

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Let’s find out, how model fit changed due to excluding certain effects from the initial model:

loo(fit1, fit2)
Output of model 'fit1':

Computed from 2000 by 828 log-likelihood matrix

         Estimate   SE
elpd_loo  -2127.2 33.5
p_loo       176.2  7.4
looic      4254.3 67.1
------
Monte Carlo SE of elpd_loo is 0.4.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     814   98.3%   196       
 (0.5, 0.7]   (ok)        14    1.7%   76        
   (0.7, 1]   (bad)        0    0.0%   <NA>      
   (1, Inf)   (very bad)   0    0.0%   <NA>      

All Pareto k estimates are ok (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 2000 by 828 log-likelihood matrix

         Estimate   SE
elpd_loo  -2124.6 33.6
p_loo       173.5  7.3
looic      4249.2 67.3
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     813   98.2%   208       
 (0.5, 0.7]   (ok)        14    1.7%   107       
   (0.7, 1]   (bad)        1    0.1%   32        
   (1, Inf)   (very bad)   0    0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit2  0.0       0.0   
fit1 -2.5       1.4   

Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model sex and hatchdate for both response variables, but there is also no harm in including them (so I would probably just include them).

To give you a glimpse of the capabilities of brms’ multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of tarsus, which we will now model by using the skew_normal family instead of the gaussian family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the set_rescor function. Further, we investigate if the relationship of back and hatchdate is really linear as previously assumed by fitting a non-linear spline of hatchdate. On top of it, we model separate residual variances of tarsus for male and female chicks.

bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) +
  lf(sigma ~ 0 + sex) + skew_normal()
bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) +
  gaussian()

fit3 <- brm(
  bf_tarsus + bf_back + set_rescor(FALSE), 
  data = BTdata, chains = 2, cores = 2,
  control = list(adapt_delta = 0.95)
)

Again, we summarize the model and look at some posterior-predictive checks.

fit3 <- add_criterion(fit3, "loo")
summary(fit3)
 Family: MV(skew_normal, gaussian) 
  Links: mu = identity; sigma = log; alpha = identity
         mu = identity; sigma = identity 
Formula: tarsus ~ sex + (1 | p | fosternest) + (1 | q | dam) 
         sigma ~ 0 + sex
         back ~ s(hatchdate) + (1 | p | fosternest) + (1 | q | dam) 
   Data: BTdata (Number of observations: 828) 
  Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 2000

Smooth Terms: 
                       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sds(back_shatchdate_1)     2.10      1.11     0.34     4.66 1.00      563      515

Group-Level Effects: 
~dam (Number of levels: 106) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.47      0.05     0.38     0.58 1.00      476
sd(back_Intercept)                       0.23      0.07     0.08     0.36 1.00      249
cor(tarsus_Intercept,back_Intercept)    -0.54      0.24    -0.95    -0.05 1.00      458
                                     Tail_ESS
sd(tarsus_Intercept)                     1100
sd(back_Intercept)                        302
cor(tarsus_Intercept,back_Intercept)      706

~fosternest (Number of levels: 104) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(tarsus_Intercept)                     0.26      0.06     0.15     0.37 1.01      362
sd(back_Intercept)                       0.32      0.06     0.21     0.43 1.00      478
cor(tarsus_Intercept,back_Intercept)     0.66      0.23     0.16     0.99 1.01      148
                                     Tail_ESS
sd(tarsus_Intercept)                      518
sd(back_Intercept)                        696
cor(tarsus_Intercept,back_Intercept)      306

Population-Level Effects: 
                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
tarsus_Intercept        -0.41      0.07    -0.54    -0.27 1.00      797     1161
back_Intercept           0.00      0.05    -0.10     0.11 1.00     1219     1284
tarsus_sexMale           0.77      0.06     0.66     0.88 1.00     2604     1442
tarsus_sexUNK            0.21      0.12    -0.02     0.44 1.00     2548     1583
sigma_tarsus_sexFem     -0.30      0.04    -0.39    -0.21 1.00     2220     1266
sigma_tarsus_sexMale    -0.24      0.04    -0.32    -0.16 1.00     1723     1329
sigma_tarsus_sexUNK     -0.39      0.13    -0.63    -0.14 1.00     1636     1461
back_shatchdate_1       -0.08      3.24    -5.80     7.09 1.00      880     1003

Family Specific Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_back       0.90      0.02     0.86     0.95 1.00     2079     1550
alpha_tarsus    -1.22      0.43    -1.85     0.10 1.01      922      445

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We see that the (log) residual standard deviation of tarsus is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative alpha (skewness) parameter of tarsus that the residuals are indeed slightly left-skewed. Lastly, running

conditional_effects(fit3, "hatchdate", resp = "back")

reveals a non-linear relationship of hatchdate on the back color, which seems to change in waves over the course of the hatch dates.

There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see help("brmsformula") or vignette("brms_multilevel")). In fact, nearly all the flexibility of univariate models is retained in multivariate models.

References

Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. Journal of Evolutionary Biology, 20(2), 549-557.

brms/inst/doc/brms_missings.html0000644000175000017500000031141614146736273016661 0ustar nileshnilesh Handle Missing Values with brms

Handle Missing Values with brms

Paul Bürkner

2021-11-22

Introduction

Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using brms: (1) Impute missing values before the model fitting with multiple imputation, and (2) impute missing values on the fly during model fitting1. As a simple example, we will use the nhanes data set, which contains information on participants’ age, bmi (body mass index), hyp (hypertensive), and chl (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting bmi by age and chl.

data("nhanes", package = "mice")
head(nhanes)
  age  bmi hyp chl
1   1   NA  NA  NA
2   2 22.7   1 187
3   1   NA   1 187
4   3   NA  NA  NA
5   1 20.4   1 113
6   3   NA  NA 184

Imputation before model fitting

There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but m times leading to a total of m fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is mice (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with brms. Here, we apply the default settings of mice, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables’ characteristics.

library(mice)
imp <- mice(nhanes, m = 5, print = FALSE)

Now, we have m = 5 imputed data sets stored within the imp object. In practice, we will likely need more than 5 of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of 100 imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to m = 5 for the purpose of this vignette. Regardless of the value of m, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass imp directly. The latter works because brms offers special support for data imputed by mice. We will go with the latter approach, since it is less typing. Fitting our model of interest with brms to the multiple imputed data sets is straightforward.

fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2)

The returned fitted model is an ordinary brmsfit object containing the posterior draws of all m submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all.

summary(fit_imp1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: bmi ~ age * chl 
   Data: imp (Number of observations: 25) 
  Draws: 10 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 10000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    15.80      7.81    -0.17    31.02 1.12       56      237
age           0.80      5.06    -8.58    11.46 1.16       42      254
chl           0.08      0.04     0.00     0.16 1.11       58      187
age:chl      -0.02      0.02    -0.07     0.03 1.13       50      356

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.24      0.62     2.27     4.65 1.17       39      279

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

In the summary output, we notice that some Rhat values are higher than \(1.1\) indicating possible convergence problems. For models based on multiple imputed data sets, this is often a false positive: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of

plot(fit_imp1, variable = "^b", regex = TRUE)

Such non-overlaying chains imply high Rhat values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at

round(fit_imp1$rhats, 2)
  b_Intercept b_age b_chl b_age.chl sigma lp__
1           1     1     1         1     1    1
2           1     1     1         1     1    1
3           1     1     1         1     1    1
4           1     1     1         1     1    1
5           1     1     1         1     1    1

The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of age and chl.

conditional_effects(fit_imp1, "age:chl")

To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation.

Compatibility with other multiple imputation packages

brms offers built-in support for mice mainly because I use the latter in some of my own research projects. Nevertheless, brm_multiple supports all kinds of multiple imputation packages as it also accepts a list of data frames as input for its data argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to brm_multiple. Most multiple imputation packages have some built-in functionality for this task. When using the mi package, for instance, you simply need to call the mi::complete function to get the desired output.

Imputation during model fitting

Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with brms, but possibly to a somewhat smaller degree. Consider again the nhanes data with the goal to predict bmi by age, and chl. Since age contains no missing values, we only have to take special care of bmi and chl. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In brms we can do this as follows:

bform <- bf(bmi | mi() ~ age * mi(chl)) +
  bf(chl | mi() ~ age) + set_rescor(FALSE)
fit_imp2 <- brm(bform, data = nhanes)

The model has become multivariate, as we no longer only predict bmi but also chl (see vignette("brms_multivariate") for details about the multivariate syntax of brms). We ensure that missings in both variables will be modeled rather than excluded by adding | mi() on the left-hand side of the formulas2. We write mi(chl) on the right-hand side of the formula for bmi to ensure that the estimated missing values of chl will be used in the prediction of bmi. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way.

summary(fit_imp2)
 Family: MV(gaussian, gaussian) 
  Links: mu = identity; sigma = identity
         mu = identity; sigma = identity 
Formula: bmi | mi() ~ age * mi(chl) 
         chl | mi() ~ age 
   Data: nhanes (Number of observations: 25) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
bmi_Intercept    13.85      9.00    -4.14    31.86 1.00     1722     2201
chl_Intercept   142.27     24.27    94.38   192.26 1.00     2761     2961
bmi_age           2.68      5.59    -8.33    14.00 1.00     1481     1843
chl_age          28.31     12.95     2.71    54.59 1.00     2713     3062
bmi_michl         0.10      0.05     0.00     0.19 1.00     1823     2154
bmi_michl:age    -0.03      0.03    -0.08     0.02 1.00     1521     1933

Family Specific Parameters: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma_bmi     3.39      0.78     2.20     5.28 1.00     1533     1970
sigma_chl    39.87      7.34    28.46    56.71 1.00     2051     2525

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(fit_imp2, "age:chl", resp = "bmi")

The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the ‘one-step’ approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the ‘one-step’ approach is that the model needs to be fitted only once instead of m times. Also, within the brms framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because Stan (the engine behind brms) does not allow estimating discrete parameters.

Combining measurement error and missing values

Missing value terms in brms cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, mi terms are a natural (and somewhat more verbose) generalization of the now soft deprecated me terms. Suppose we had measured the variable chl with some known error:

nhanes$se <- rexp(nrow(nhanes), 2)

Then we can go ahead an include this information into the model as follows:

bform <- bf(bmi | mi() ~ age * mi(chl)) +
  bf(chl | mi(se) ~ age) + set_rescor(FALSE)
fit_imp3 <- brm(bform, data = nhanes)

Summarizing and post-processing the model continues to work as usual.

References

Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. Journal of Statistical Software, 1-68. doi.org/10.18637/jss.v045.i03

Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. The American Statistician, 64(2), 159-163. doi.org/10.1198/tast.2010.09109


  1. Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings after fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the predict method.↩︎

  2. We don’t really need this for bmi, since bmi is not used as a predictor for another variable. Accordingly, we could also – and equivalently – impute missing values of bmi after model fitting by means of posterior prediction.↩︎

brms/inst/doc/brms_monotonic.R0000644000175000017500000000634114146737241016261 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100") income <- factor(sample(income_options, 100, TRUE), levels = income_options, ordered = TRUE) mean_ls <- c(30, 60, 70, 75) ls <- mean_ls[income] + rnorm(100, sd = 7) dat <- data.frame(income, ls) ## ---- results='hide'-------------------------------------------------------------------- fit1 <- brm(ls ~ mo(income), data = dat) ## --------------------------------------------------------------------------------------- summary(fit1) plot(fit1, variable = "simo", regex = TRUE) plot(conditional_effects(fit1)) ## ---- results='hide'-------------------------------------------------------------------- dat$income_num <- as.numeric(dat$income) fit2 <- brm(ls ~ income_num, data = dat) ## --------------------------------------------------------------------------------------- summary(fit2) ## ---- results='hide'-------------------------------------------------------------------- contrasts(dat$income) <- contr.treatment(4) fit3 <- brm(ls ~ income, data = dat) ## --------------------------------------------------------------------------------------- summary(fit3) ## --------------------------------------------------------------------------------------- loo(fit1, fit2, fit3) ## ---- results='hide'-------------------------------------------------------------------- prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1") fit4 <- brm(ls ~ mo(income), data = dat, prior = prior4, sample_prior = TRUE) ## --------------------------------------------------------------------------------------- summary(fit4) ## --------------------------------------------------------------------------------------- plot(fit4, variable = "prior_simo", regex = TRUE, N = 3) ## --------------------------------------------------------------------------------------- dat$age <- rnorm(100, mean = 40, sd = 10) ## ---- results='hide'-------------------------------------------------------------------- fit5 <- brm(ls ~ mo(income)*age, data = dat) ## --------------------------------------------------------------------------------------- summary(fit5) conditional_effects(fit5, "income:age") ## --------------------------------------------------------------------------------------- dat$city <- rep(1:10, each = 10) var_city <- rnorm(10, sd = 10) dat$ls <- dat$ls + var_city[dat$city] ## ---- results='hide'-------------------------------------------------------------------- fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat) ## --------------------------------------------------------------------------------------- summary(fit6) brms/inst/doc/brms_multivariate.R0000644000175000017500000000474014146741515016762 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## ----data------------------------------------------------------------------------------- data("BTdata", package = "MCMCglmm") head(BTdata) ## ----fit1, message=FALSE, warning=FALSE, results='hide'--------------------------------- fit1 <- brm( mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam), data = BTdata, chains = 2, cores = 2 ) ## ----summary1, warning=FALSE------------------------------------------------------------ fit1 <- add_criterion(fit1, "loo") summary(fit1) ## ----pp_check1, message=FALSE----------------------------------------------------------- pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ## ----R2_1------------------------------------------------------------------------------- bayes_R2(fit1) ## ----fit2, message=FALSE, warning=FALSE, results='hide'--------------------------------- bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back, data = BTdata, chains = 2, cores = 2) ## ----summary2, warning=FALSE------------------------------------------------------------ fit2 <- add_criterion(fit2, "loo") summary(fit2) ## ----loo12------------------------------------------------------------------------------ loo(fit1, fit2) ## ----fit3, message=FALSE, warning=FALSE, results='hide'--------------------------------- bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ## ----summary3, warning=FALSE------------------------------------------------------------ fit3 <- add_criterion(fit3, "loo") summary(fit3) ## ----me3-------------------------------------------------------------------------------- conditional_effects(fit3, "hatchdate", resp = "back") brms/inst/doc/brms_threading.html0000644000175000017500000042330014146747004016760 0ustar nileshnilesh Running brms models with within-chain parallelization

Running brms models with within-chain parallelization

Sebastian Weber & Paul Bürkner

2021-11-22

Introduction

Full Bayesian inference is a computationally very demanding task and often we wish to run our models faster in shorter walltime. With modern computers we nowadays have multiple processors available on a given machine such that the use of running the inference in parallel will shorten the overall walltime. While between-chain parallelization is straightforward by merely launching multiple chains at the same time, the use of within-chain parallelization is more complicated in various ways. This vignette aims to introduce the user to within-chain parallelization with brms, since its efficient use depends on various aspects specific to the users model.

Quick summary

Assuming you have a brms model which you wish to evaluate faster by using more cores per chain, for example:

fit_serial <- brm(
  count ~ zAge + zBase * Trt + (1|patient),
  data = epilepsy, family = poisson(),
  chains = 4, cores = 4, backend = "cmdstanr"
)

Then running this model with threading requires cmdstanr as backend and you can simply add threading support to an existing model with the update mechanism as:

fit_parallel <- update(
  fit_serial, chains = 2, cores = 2,
  backend = "cmdstanr", threads = threading(2)
)

The example above assumes that 4 cores are available which are best used without within-chain parallelization by running 4 chains in parallel. When using within chain parallelization it is still advisable to use just as many threads in total as you have CPU cores. It’s thus sensible in this case to reduce the number of chains running in parallel to just 2, but allow each chain to use 2 threads. Obviously this will reduce the number of iterations in the posterior here as we assumed a fixed amount of 4 cores.

  • Only apply within-chain parallelization to large problems which take more than a few minutes at least to calculate. The epilepsy example above is actually too small to gain in speed (just a few seconds per chain on this machine).
  • Within-chain parallelization is less efficient than between-chain parallelization. So only use within-chain parallelism if more CPUs can be used to run the entire analysis.
  • Due to details of the model and data-set, speedups with more cores can be very limited. Not every model amends to within-chain parallelization and an empirical evaluation is in some cases advisable.
  • Enabling threading usually slows down any model to some extent and this slowdown must be offset by sufficient cores per chain in order to really gain in execution speed.
  • Doubling the execution speed with few cores is a lot easier than obtaining larger speedups with even more cores.
  • Models with computationally expensive likelihoods are easier to parallelize than less expensive likelihoods. For example, the Poisson distribution involves expensive \(\log\Gamma\) functions whereas the normal likelihood is very cheap to calculate in comparison.
  • Models with many parameters (e.g., multilevel models) carry a large overhead when running in parallel.
  • With a larger overhead of the model, the likelihood must be sufficiently expensive such that the relative computational cost of likelihood to parallelization overhead is favorable.
  • Avoid using hyper-threading, that is, only use as many threads as you have physical cores available.
  • Ensure that the data is randomly sorted such that consecutive subsets of the data are roughly of the same computational effort.

Within-chain parallelization

The within-chain parallelization implemented in brms is based on the reduce_sum facility in Stan. The basic principle that reduce_sum uses is to split a large summation into arbitrary smaller partial sums. Due to the commutativity and associativity of the sum operation these smaller partial sums can be evaluated in any order and in parallel from one another. brms leverages reduce_sum to evaluate the log-likelihood of the model in parallel as for example

\[ \begin{aligned} l(y|\theta) &= \sum_{i=1}^N l_i(y_i| \theta) \\ &= \sum_{i=1}^{S_1} l_i(y_i| \theta) + \sum_{i=S_1+1}^N l_i(y_i| \theta). \end{aligned} \]

As a consequence, the within-chain parallelization requires mutually independent log-likelihood terms which restricts its applicability to some degree.

Furthermore, the within-chain parallelization is only applicable to the evaluation of the data likelihood while all other parts of the model, for example priors, will remain running serially. Thus, only a partial fraction of the entire Stan model will run in parallel which limits the potential speedup one may obtain. The theoretical speedup for a partially in parallel running program is described by Amdahl‘s law. For example, with 90% of the computational load running in parallel one can essentially double the execution speed with 2 cores while 8 cores may only speedup the program by at most 5x. How large the computational cost of the log-likelihood is in relation to the entire model is very dependent on the model of the user.

In practice, the speedups are even smaller than the theoretical speedups. This is caused by the additional overhead implied by forming multiple smaller sums than just one large one. For example, for each partial sum formed the entire parameter vector \(\theta\) has to be copied in memory for Stan to be able to calculate the gradient of the log-likelihood. Hence, with more partial sums, more copying is necessary as opposed to evaluating just one large sum. Whether the additional copying is indeed relevant depends on the computational cost of the log-likelihood of each term and the number of parameters. For a model with a computationally cheap normal log-likelihood, this effect is more important than for a model with a Poisson log-likelihood, and for multilevel models with many parameters more copying is needed than for simpler regression models. It may therefore be necessary to form sufficiently large partial sums to warrant an efficient parallel execution. The size of the partial sums is referred to as the grainsize, which is set to a reasonable default value. However, for some models this tuning parameter requires some attention from the user for optimal performance.

Finally, it is important to note that by default the exact size and order of the partial sums is not stable as it is adjusted to the load of the system. As a result, exact numerical reproducibility is not guaranteed by default. In order to warrant the same size and order of the partial sums, the static option must be used and set to TRUE, which uses a deterministic scheduler for the parallel work.

Example model

As a toy demonstration, we use here a multilevel Poisson model. The model is a varying intercept model with \(10^{4}\) data observation which are grouped into \(1000\) groups. Each data item has \(3\) continuous covariates. The simulation code for the fake data can be found in the appendix and it’s first \(10\) rows are:

kable(head(fake, 10), digits = 3)
g x1 x2 x3 theta eta mu y
382 0.496 0.623 0.069 -0.262 0.510 0.248 0
578 -0.748 -0.300 -0.768 -0.903 -0.032 -0.934 0
772 -1.124 -0.161 -0.882 -1.047 -0.551 -1.598 1
774 0.992 -0.593 1.007 1.578 -0.045 1.533 2
729 0.641 -1.563 -0.491 -0.291 -1.460 -1.751 0
897 -0.085 -0.531 -0.978 -1.296 -0.929 -2.226 0
110 -0.772 1.364 -0.629 -1.351 0.124 -1.227 0
248 -1.441 0.699 1.284 2.072 -1.020 1.053 1
754 -1.320 0.837 -0.137 -0.237 1.452 1.215 3
682 -1.345 -2.673 -1.628 -1.146 -0.388 -1.534 0

The brms model fitting this data is:

model_poisson <- brm(
  y ~ 1 + x1 + x2 + (1 | g),
  data = fake,
  family = poisson(),
  iter = 500, # short sampling to speedup example
  chains = 2,
  prior = prior(normal(0,1), class = b) +
    prior(constant(1), class = sd, group = g),
  backend = "cmdstanr",
  threads = threading(4)
)

Here we have fixed the standard deviation of the between-group variation for the intercept to the true value of \(1\) as used in the simulation. This is to avoid unfavorable geometry of the problem allowing us to concentrate on computational aspects alone.

The Poisson likelihood is a relatively expensive likelihood due to the use of \(\log\Gamma\) function as opposed to, for example, a normal likelihood which does is by far less expensive operations. Moreover, this example is chosen in order to demonstrate parallelization overhead implied by a large number of parameters.

Managing parallelization overhead

As discussed above, the key mechanism to run Stan programs with parallelization is to split the large sum over independent log likelihood terms into arbitrary smaller partial sums. Creating more partial sums allows to increase simultaneous parallel computations in a granular way, but at the same time additional overhead is introduced through the requirement to copy the entire parameter vector for each partial sum formed along with further overhead due to splitting up a single large task into multiple smaller ones.

By default, brms will choose a sensible grainsize which defines how large a given partial sum will roughly be. The actual chunk size is automatically tuned whenever the default non-static scheduler is used, which is the recommended choice to start with. As noted before, only the static scheduler is giving fully deterministic results since the chunk size and order of partial sums will be the same during sampling.

While we expect that the default grainsize in brms is reasonably good for many models, it can improve performance if one tunes the grainsize specifically to a given model and data-set. We suggest to increase successively the number of chunks a given data set is split into with the static scheduler and run this on a single core. This way one can control the number of partial sum accurately and monitor the execution time as it increases. These experiments are run with only a single chain and very short iteration numbers as we are not interested in the statistical results, but rather aim to be able to explore the tuning parameter space of the chunk size as quickly as possible. The number of iterations needed to get reliable runtime estimates for a given chunk size will depend on many details and the easiest way to determine this is to run this benchmark with multiple number of iterations. Whenever their results match approximately, then the iteration numbers are sufficient. In order to decrease the variation between runs, we also fix the random seed, initial value and the tuning parameters of the sampler (step size and mass matrix).

Below is an example R code demonstrating such a benchmark. The utility function benchmark_threading is shown and explained in the appendix.

chunking_bench <- transform(
    data.frame(chunks = 4^(0:3)),
    grainsize = ceiling(N / chunks)
)

iter_test <- c(10, 20, 40)  # very short test runs
scaling_chunking <- benchmark_threading(
  model_poisson,
  cores = 1,                         
  grainsize = chunking_bench$grainsize,  # test various grainsizes
  iter = iter_test,  
  static = TRUE  # with static partitioner
)

# run as reference the model *without* reduce_sum
ref <- benchmark_reference(model_poisson, iter_test)

# for additional data munging please refer to the appendix

Graphically summarizing the results shows that with more than 8 chunks the overhead is about 10% and increasing further with more chunks. For models without many parameters, no such overhead should be observed. Furthermore, one can see that 25 and 50 iterations give similar results implying that 25 iterations suffice for stable runtime estimates for these (and the following) benchmarks. The overhead of up to 20% in this example with 16 chunks may seem large due to the scaling of the plot. One must not forget that when we start to use more CPU cores, the overhead is easily offset, but it limits the maximal speedup we can get. For example, some 2 units of computation become 2.4 units due to the overhead such that on 2 cores we don’t quite double the execution speed, but rather get a 1.6x increase in speed instead of a 2x speedup.

Considering in addition the time per leapfrog step of the NUTS sampler shows on an absolute scale similar information as before. The upside of this representation is that we can visualize the slowdown in relation to the program without reduce_sum. As we can see, the additional overhead due to merely enabling reduce_sum is substantial in this example. This is attributed in the specific example to the large number of random effects.

ggplot(scaling_chunking) +
    aes(chunks, slowdown, colour = iter, shape = iter) +
    geom_line() + geom_point() +
    scale_x_log10(breaks = scaling_chunking$chunks) +
    scale_y_log10(breaks=seq(0.8, 2.5, by=0.1)) +
    ggtitle("Slowdown with increasing number of chunks")

ggplot(scaling_chunking) +
    aes(chunks, 1E3 * runtime/num_leapfrog, colour = iter, shape=iter) +
    geom_line() + geom_point() +
    scale_x_log10(breaks = scaling_chunking$chunks) +
    scale_y_log10(breaks=seq(0.1, 2.0, by=0.1)) +
    geom_hline(data=ref, aes(yintercept=1E3 * runtime/num_leapfrog, colour=iter), linetype=I(2)) +
    ggtitle("Time per leapfrog step vs number of chunks",
            "Dashed line is reference model without reduce_sum") +
    ylab("Time per leapfrog step [ms]")

Parallelization speedup

In practice, we are often interested in so-called “hard-scaling” properties of the parallelization system. That is, for a fixed problem size we would like to know how much faster we can execute the Stan program with increasing number of threads. As nowadays CPUs usually run with so-called hyper-threading, it is also of interest if this technique is beneficial for Stan programs as well (spoiler alert: it’s not useful). As we have seen before, the grainsize can have an impact on the performance and is as such a tuning parameter. Below we demonstrate some exemplary R code which runs a benchmark with varying number of CPU cores and varying number of grainsizes.

num_cpu <- parallel::detectCores(logical = FALSE)
num_cpu_logical <- parallel::detectCores(logical = TRUE)
grainsize_default <- ceiling(N / (2 * num_cpu))
cores <- c(2^seq(0, floor(log2(num_cpu_logical))), num_cpu, num_cpu_logical)
cores <- sort(unique(cores))
grainsize <- c(grainsize_default, grainsize_default/2, grainsize_default/4)
grainsize <- round(grainsize)

iter_scaling <- 20
scaling_cores <- benchmark_threading(
  model_poisson,
  cores = cores,
  grainsize = grainsize,
  iter = iter_scaling,
  static = FALSE
)

single_core  <- transform(
    subset(scaling_cores, cores == 1),
    runtime_single = runtime,
    num_leapfrog=NULL, runtime=NULL, cores = NULL
)

scaling_cores <- transform(
  merge(scaling_cores, single_core),
  speedup = runtime_single/runtime,
  grainsize = factor(grainsize)
)

It is important to consider the absolute runtime and the relative speedup vs. running on a single core. The relative speedup can be misleading if the single core runtime is very slow in which case speed gains on more CPUs may look overly good. Considering instead the absolute runtime avoids this problem. After all, we are interested in the shortest walltime we can get rather than any relative speedups.

ggplot(scaling_cores) +
    aes(cores, runtime, shape = grainsize, color = grainsize) +
    geom_vline(xintercept = num_cpu, linetype = 3) +
    geom_line() + geom_point() + 
    scale_x_log10(breaks = scaling_cores$cores) +
    scale_y_log10(breaks=seq(0.1, 1.4, by=0.1)) +
    theme(legend.position = c(0.85, 0.8)) +
    geom_hline(data=subset(ref, iter==iter_scaling), aes(yintercept=runtime), linetype=I(2)) +
    ggtitle("Runtime with varying number of cores",
            "Dashed line is reference model without reduce_sum")

ggplot(scaling_cores) +
  aes(cores, speedup, shape = grainsize, color = grainsize) +
  geom_abline(slope = 1, intercept = 0, linetype = 2) +
  geom_vline(xintercept = num_cpu, linetype = 3) +
  geom_line() + geom_point() +
  scale_x_log10(breaks=scaling_cores$cores) +
  scale_y_log10(breaks=scaling_cores$cores) +
  theme(aspect.ratio = 1) +
  coord_fixed(xlim = c(1, num_cpu_logical), ylim = c(1, num_cpu_logical)) +
  ggtitle("Relative speedup vs 1 core")

The vertical dotted line marks the physical number of CPU cores on the machine this was run. The horizontal dashed line in the plot with absolute runtime marks the respective runtime of the model without reduce_sum and the dashed unity line in the plot with the relative speedup marks the theoretical maximal speedup. We can see that there is no further reduction in execution time when increasing the thread count to be greater than the number of physical CPUs. Hence, the use of hyper-threading is not helpful when aiming to maximize the speed of a Stan program. Moreover, the use of threading outperforms the single core runtime only when using more than 4 cores in this example.

For this example, the shown grainsizes matter on some machines but not on others, so your results may look quite different from what is shown here. The overall speedups may not seem impressive in this case, which is attributed in this case to the large number of parameters relative to the number of observations. However, we can still outperform the single core runtime when using many cores. Though the most important advantage of threading is that with an increasing data set size, the user has the option to use a brute-force approach to balance the increase in walltime needed.

kable(scaling_cores, digits = 2)
grainsize iter cores num_leapfrog runtime runtime_single speedup
1250 20 1 300 0.42 0.42 1.00
1250 20 2 300 0.30 0.42 1.43
1250 20 4 300 0.24 0.42 1.76
1250 20 8 300 0.27 0.42 1.59
312 20 1 300 0.58 0.58 1.00
312 20 2 300 0.33 0.58 1.79
312 20 4 300 0.26 0.58 2.27
312 20 8 300 0.27 0.58 2.19
625 20 1 300 0.47 0.47 1.00
625 20 2 300 0.30 0.47 1.59
625 20 4 300 0.26 0.47 1.84
625 20 8 300 0.24 0.47 1.92

For a given Stan model one should usually choose the number of chains and the number of threads per chain to be equal to the number of (physical) cores one wishes to use. Only if different chains of the model have relatively different execution times (which they should not have, but it occurs sometimes in practice), then one may consider the use of hyper-threading. Doing so will share the resources evenly across all chains and whenever the fastest chain finishes, the freed resources can be given to the still running chains.

Appendix

Fake data simulation

set.seed(54647)
# number of observations
N <- 1E4
# number of group levels
G <- round(N / 10)
# number of predictors
P <- 3
# regression coefficients
beta <- rnorm(P)

# sampled covariates, group means and fake data
fake <- matrix(rnorm(N * P), ncol = P)
dimnames(fake) <- list(NULL, paste0("x", 1:P))

# fixed effect part and sampled group membership
fake <- transform(
  as.data.frame(fake),
  theta = fake %*% beta,
  g = sample.int(G, N, replace=TRUE)
)

# add random intercept by group
fake  <- merge(fake, data.frame(g = 1:G, eta = rnorm(G)), by = "g")

# linear predictor
fake  <- transform(fake, mu = theta + eta)

# sample Poisson data
fake  <- transform(fake, y = rpois(N, exp(mu)))

# shuffle order of data rows to ensure even distribution of computational effort
fake <- fake[sample.int(N, N),]

# drop not needed row names
rownames(fake) <- NULL

Poisson example model

model_poisson <- brm(
  y ~ 1 + x1 + x2 + (1 | g),
  data = fake,
  family = poisson(),
  iter = 500, # short sampling to speedup example
  chains = 2,
  prior = prior(normal(0,1), class = b) +
    prior(constant(1), class = sd, group = g),
  backend = "cmdstanr",
  threads = threading(4)
)

Threading benchmark function

# Benchmarks given model with cross-product of tuning parameters CPU
# cores, grainsize and iterations. Models are run with either static
# or non-static scheduler and inits is set by default to 0 on the
# unconstrained scale. Function returns a data-frame with the
# cross-product of the tuning parameters and as result column the
# respective runtime.
benchmark_threading <- function(model, cores = 1, grainsize = 1, iter = 100, 
                                static = FALSE) {

    winfo <- extract_warmup_info(model)
    sims  <- rstan::extract(model$fit)
    init <- list(extract_draw(sims, 1))

    scaling_model <- update(
        model, refresh = 0, 
        threads = threading(1, grainsize = grainsize[1], static = static), 
        chains = 1, iter = 2, backend = "cmdstanr"
    )

    run_benchmark <- function(cores, size, iter) {
        bench_fit <- update(
            scaling_model, warmup=0, iter = iter,
            chains = 1, seed = 1234, inits = init, refresh = 0, save_warmup=TRUE,
            threads = threading(cores, grainsize = size, static = static),
            inv_metric=winfo$inv_metric[[1]],
            step_size=winfo$step_size[[1]],
            adapt_engaged=FALSE
        )
        lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value)
        elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit)))

        c(num_leapfrog=lf, runtime=elapsed)
    }

    cases <- expand.grid(cores = cores, grainsize = grainsize, iter = iter)
    res <- with(cases, mapply(run_benchmark, cores, grainsize, iter))
    cbind(cases, as.data.frame(t(res)))
}

benchmark_reference <- function(model, iter=100, inits=0) {
    winfo <- extract_warmup_info(model)
    sims  <- rstan::extract(model$fit)
    init <- list(extract_draw(sims, 1))

    ref_model <- update(
        model, refresh = 0, 
        threads = NULL,
        chains = 1, iter = 2, backend = "cmdstanr"
    )

    run_benchmark_ref <- function(iter_bench) {
        bench_fit <- update(
            ref_model, warmup=0, iter = iter_bench,
            chains = 1, seed = 1234, inits = init, refresh = 0,
            inv_metric=winfo$inv_metric[[1]],
            step_size=winfo$step_size[[1]],
            adapt_engaged=FALSE
        )

        lf <- sum(subset(nuts_params(bench_fit, inc_warmup=TRUE), Parameter=="n_leapfrog__")$Value)
        elapsed <- sum(colSums(rstan::get_elapsed_time(bench_fit$fit)))

        c(num_leapfrog=lf, runtime=elapsed)
    }
    
    ref <- sapply(iter, run_benchmark_ref)
    ref <- cbind(as.data.frame(t(ref)), iter=iter)
    ref
}

extract_warmup_info <- function(bfit) {
    adapt  <- lapply(rstan::get_adaptation_info(bfit$fit), strsplit, split="\\n")
    step_size  <- lapply(adapt, function(a) as.numeric(strsplit(a[[1]][[1]], " = ")[[1]][2]))
    inv_metric <- lapply(adapt, function(a) as.numeric(strsplit(sub("^# ", "", a[[1]][[3]]), ", ")[[1]]))
    list(step_size=step_size, inv_metric=inv_metric)
}

extract_draw <- function(sims, draw) lapply(sims, abind::asub, idx=draw, dim=1, drop=FALSE)

Munging of slowdown with chunking data

scaling_chunking <- merge(scaling_chunking, chunking_bench, by = "grainsize")

single_chunk  <- transform(
    subset(scaling_chunking, chunks == 1),
    num_leapfrog_single = num_leapfrog, num_leapfrog = NULL,
    runtime_single = runtime, runtime = NULL, 
    grainsize = NULL, chunks=NULL
)

scaling_chunking <- transform(
    merge(scaling_chunking, single_chunk),
    slowdown = runtime/runtime_single,
    iter = factor(iter),
    runtime_single = NULL
)

ref <- transform(ref, iter=factor(iter))
brms/inst/doc/brms_multilevel.pdf0000644000175000017500000107541614146747053017022 0ustar nileshnilesh%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4053 /Filter /FlateDecode /N 76 /First 632 >> stream x\[sF~_u*eq[l,ۑ8N* IXBBs~zf@7r訶jK zn , ^ºBABh Q8B!ՅJa!R MUB#]hQ76maC (Xz(sl9Y/ฎUЅԅңy\z"ᅷ(Qx<@j࠯h A9E0\,|<RVa04`50zn Llq\`8Bc<4'S48U@LYxCl!r@q!e@l'D;pe@GƠ'apUXP1"H}(,(kqBbw`%lmaA #pBr 9 k$l+@+lGP_]ӪrM;/TyuU#7ţG__Tι;!rĹV8&Q;Ωi\ st?BjxVŃ$$X⟜x<~.۫ g^3 /+ 2^S\sr1sykkuAK,AcA"cK nygtSWvM'+ސLNmx9kF7 xYᬾnPU*4yusjpnB6" Mw:sdd!y\tx-G]ey>wvQ2zMfڒeP_* §$,93u&zZ\Ts2ԟThb[g5!ltڴ4,2bFbrOQԹ|16^F֍fzcfz6ӳC}Lj gxZ`5LI  t77!L>iH3fjA<>-(V_"%nmB]= %d,PBJV["p(! Bz2Wqv|vzLjR 𬺼ff:lF`o^^=B+x* M%V3bmDF(Ht7")(x]2.W|Ij03nןH-4XSm !E1^y!SEv͇$FRG+(14{Hn;e؀!] H]+ચcc6aSְk'nFleه/┊=,xp9'o6IffJp~K7OqǶ]z>Twp%f!vZP(Ky[M~"ϲ/힢3/4Lr7]I3e3E0}C.=uѪQ0ױp[!ާk, tC)x?߁1q jqԭjj=+n﫥)Q6Z;Co{5*'jPdMMm,y.4틣/iZSba7X)1J8Aokmlp[YEX KJ.ylϚn;0d&fG9o=ȀԫEgT# h"ڱ?w,&FSIϔ'ݶ4oyٰ/fž}9bvžow_{ 69K;ò~bo[ ,`TWj^`er>u6fd-UU`~E-keb ^g䨍6"atMV͜~ijt>Jz i2:A^va7f1!%n35Ow5S۞)mb50O~y{^_D p3{ ='CZ "O(並jRG'E_e𵳎B\0Kl"pl'| KYN8muufD!rcFJGwǔ (1;+{`-RF,,HaM@ޛݻҹ^'i7A I )(9!;_Lȝdh-󱙍YR#X#R$oT(ï]w%~%o ֲ7.F֛\ 7KININޝ-˱rw s&'Ylsʦ!4$Ґܶ! )O̩)HcI9ƤҘTSNDE%**QQJTt3z:Q%DE'*:QɉiI)9ٞɩK*Q'-س'?.BW=DHc]I~y7Z/n|~ w$ϿKoOAnoյK 6jZSks5`w! 5IJo[iO/yy{q>a뉄3]DeHB^c](9T](ruI|%ѹPB0P~3Փٽxޚ&RwĦ[V@0Q=;RoWRn/Р~ *Ȓbѡ$h 9S f-/ν`P` 48!LAV}ODuaJ ]jsTT%l|=L鼣׊eˢ}ia5joPR2ti)>L8AN%NTs"a?3 (57 PR6RAjqVFy&$Dl ; IoPN.0WL |=LV1mI@Mڄd]iRt Sp!!1 HcH /19Q:6 !rj! G\D@KH4~!ia&PDՉN}d Ut&1Fpz%_wPew;8. IBnOr\/c+$CnYƢ0x,1U;zZ >>.Dfɰdb;XA k0+ڋs~:TwRGa}PZYca!9~XYOF-ep'3Z7? V.3Eqvڝ1l:ߙ\shpQ}8,3DAՆ^'Çv0Rj u >ݵж`|nB&JmMu) jS@NSU; /t_A#d)S E$x>/C;41ACu#741iG{6nB҉epZTgг0q;;n;zp3.oٻi5KJpt JX!"C?+8,/m&+e#QM ޑBX@~v:H?(ξbLgǃx} b=oXݸO+t}? endstream endobj 78 0 obj << /Subtype /XML /Type /Metadata /Length 1550 >> stream GPL Ghostscript 9.53.3 Bayesian inference, multilevel models, distributional regression, MCMC, Stan, R 2021-11-22T18:06:49+01:00 2021-11-22T18:06:49+01:00 LaTeX with hyperref Advanced Bayesian Multilevel Modeling with the R Package brmsPaul-Christian Bürkner endstream endobj 79 0 obj << /Type /ObjStm /Length 3692 /Filter /FlateDecode /N 76 /First 698 >> stream x[[sH~_я3E߯[SS``-DI\8v9G-ŖdGCjQZݟ>(dQ1)i)<)c*͢gZ)6ʑ %LT%ŬI3kd0kmb2Rd1,FxsBȜFSbuHŜGjBymeAJ:;Eu &Ed+XH~ >be߃4:>K)] X $^Dgij^z%2ДV OīaWphx+G=cXA@$CЂ4"/HFg+Oj( ĘR`8-F[([, BT"keM#HZOlIDrFp /)}8RW:,yﻨ6ᒌ&&bCl@dYg.}>|ܞܞܞܞܞ홲LL'bM&g3|#Za/3x=A1.gŜHBkD2zb6==.АxWp. ( .<?=LOgLaҜ w+neDóWb9}#`N&͍)%%Rk[JtByቅF3ub͍7,vIܴ]>!82-ukǤXku5u>-mqq(f[eY'xsIl]n`~PCjw#56f-6~{1v|:vI]: f ~VZƤ7rNw'VH)|0)C`P8U(5*[`*p2q,gƤaDqӚs<(.7$G0a[ !aJHIi;8  MhaJ|dxm\F Z0`HYc\~= 33zpTW8]O_B\&(ɲ}9]SkBrRIK qKL4 H$kϪ`B kLTxݲ/φR)fNn$LSai(mrz}mgUD˛h兹TOHh/ꮐn63n:SP`Kb&G,Cً&xUDSw}GDqwPVXFkD.RLMrD _UK|k  k&v%<@O1rbĺ%ZZmOo'\+rٝ0mW|͈a2%z5BV-|GyaNſL\Mur#NV %)F|?Y+Ѿ3z9UMT1@oK}yhGOs_N+\tt?|IY*ebM٫dj߶B :ъZi4v8})o׹.1֔=g&JHTbMENZۺ;*Nh ; O? rus9o~>Z7jC•\ءZ<bߏv•&]}Gzï7*%7Ȏ/G l8}z0_v$[SUj橯dvّ/ iUAM程M0v+ѶT f5c ?Pk2'Ml@Fp}Uq[Pr:ۭ$Vã? ͻմV?;(gZJu"Uh(jAVt}$F4WTc +#] g{-^P_܃TzfRɃb׶g[iXfE?uPตyrJ\9~r[_4&#oLD3澣q9#8ƻ"hE+X4j%I+hf+0AKXԄ2iG:LU_;Hù_Jendstream endobj 156 0 obj << /Type /ObjStm /Length 2793 /Filter /FlateDecode /N 76 /First 688 >> stream xZks۶~>1AN3fnvrN?msBH_ςD+rX 8],.(c)3)o)3( +WɄt mc5RST{>,nBG뙔VhR&w(&B:SbJi@Π0LC L9MOtS *NCKxɴ D1m=P3xڣWp:HL& 000H4 4]`2wH6A3+ @ˬ21kREaxf] `֣3B)TBޞxfN H45(*˜scI,|H@Ry+%iy/J3  ߲,@+(8tlY0+Xp &xP  &%fJK%'ձ=DDZ /1:J-XJNJЫT^VAi c맟?m9$ZSwU?6Z OA&( 47^e t$ob7F~ޢƺWzr舿z=3_9uuv wH費Ʉ~2L5yɄ;M,^'TIV@= @nYG :2F̈́FB/?#?O3W gͤ(&bu(9m(e ~&UyA# e]w6ODG@/ ?,6|䳢C1FS|M5"yKԋg?;!2h:=^*V$eU?)ے6Efç_̪VG'RYYYL2gGXE4Eu񶘶geKVA}E1ok[o K}BlڻwB{(Waoc&o~yW줞e݈q5qOEJ`FdD$z .ڈx($:"!"E51YcU{JcLl6 fhGXn#Z^`uzOuҥۻA|GW˲'ZJ}\I B3뉵DtƴVzB@4Qa*}#PN(F(C4ϕI7CA&DFNQg!#wDy}i_(.۞A5p@2WjvVJrRBJ%A2{6£:l@i7u)oNLڢG]dI"O2K2>My .0=pQuu-|]b8/Rgb`J N˧IRui Ï`Ix@!.7-֒ve]P8$S);ʆ1[ |47j/Ƭ^u1˛H "n"kHzģVִQBKI?5?֐*c+/g۽OǸ~n{,RaK 풃\F@5FA*FUJOB4gEӇ>^ZGIloEI),_X=$grĚt@:f vmgG >dd 4f|,V7ٹ-SlUtHňc!ǝ9Y:qbVf?c=BIEe.nͨRN(Ѝv]c!.z">ѭ`H@4TFUr"I#:Ѩ]cAMh(0b +L&?9g܈J:A(z  (m* @QaQ$&葏&tF֯4=kaޘvvɉ$2oi']B7b#lYiPfY[gC$6 b8J@ L:D2.$ز;C6D Anja3861VpA6,)0F$ K9rfiĠJ*Sl\q O5Č-#&0S#bOP^o=~M__wJjd*AEldSfhSac!IXS!%(<}iq's"&# FK8=(D+h)yގnG;O>%dYY_$f9Bn)bвh},E<,f4lgMRh ;Y[{ N['M_`*#;zRȾGuW_} ;<9`GYCK BkÖ'nzs/;' Ccly*b>o :~%C"0e;{4>|AilȦa Ja7@]pf.e }K7vϻD3ʘj1M^Z0eLuwžG񸻋gW/^b$FnX$Q*BK;f"Lj$/ `'p7Uv,-=u9}uX*,(kkzTF%뎐Ptjz!̉e|endstream endobj 233 0 obj << /Type /ObjStm /Length 2738 /Filter /FlateDecode /N 74 /First 670 >> stream xZ[o8~_,LEQlidc+N;%;vԋ!%Qw.<7J+ VyA>M"si!A[t` :FP|?'6|a*Ip)a`G^ia##+L$rN0^RXay]kϓu^{%OIؘ!haS[%@ oy'5E C.\`R|s" Jz(ho;+8=# |<\&IF`H"8-BX8$^ |zt"Y=s>F 2&]IMZkH%##YQc z2KIzC D@*(H%`TH,z4,fwD{DTJ~R1%+ZEV<ᬞ,/Wm?O. X)[~#ynςmx8)D e]wyKn e4HԶ5PѣJ S9SRNx;؞nraC@J&llȦ%{=+[rd7WG2jb.M0 2`G 'ރ k陵nemߚ@۪E8,İ"TT.nBЭ h zp!zXkDdmrK&F JF.N1v?BHڦ{uPy'QƙB*L: د[(iD;w>D$ѩnJE;^_g9VxZ{M& `7ɔl[]ZSZ[ZWZ_PXږ#Xvm[edHndx$y\·~Իj1nF&7z`EFSuo~VH[zeN˻k0 `pO{|g#|(\lu!Fb{buV IX@Nsɓ;XjM.ѝS dh'l\ϊΩlC 2D(;e=V"dY{ n&:y|1&Ҹ LG ktIK4L0cQ !Rԍ4Io (k{$%ŏ61y(똠4fUI},1/-䰸ra6EU0N\> AzCJdQ1Y֛SԒ60mGVr͏(pd@TWCԦ t\+!=&!Sjɝ ePD7J,rnA g & i#&BVmm[> stream x[KsF󾧵#xDo70{Z3>H{|`ݢ eY 痙8Ju^:=Ǚv?\kU(r挧Z yЪ9؝-_x+Pjm-?UY[V:U<ѓk|7]Ǚ*_{gtqܰq[(7^ömMcw^O'ڕvpڋYzu'Xkn mUgLnZsŖ({ec*ޣP:ˊvWړ?>ĿAq/oQ kasS8+9E}s-ė{{ NοحPcmiA As7{>.~ߤq8-ҵܓ &(۰i$7^+݊Yk<0ɺFbKڹvAʃF1(S x,g28;A(W ;fn|(M{sa-`%~rEђwJX;^*{5`~.UP h7 mt#'-FN ^! 2i xcy }|?d:N썹fߥ{@Ӡ@,'憉ℸſ-I%jG3,} +i\^qlȓ:CS1:3BdĠ܌b6uI\|1RD!Mt8H-mSCHƁz宕q>a͐2(u}B{/. E8ea-J\-pfm,1pa^ |X9ŮJ$PPnOcj (?5g4>zQ:CL =XUk 4dpNO'^Dž:bh!0&adtzj [&"Wd6 O.z2.(-U-SXhTsLM7:6MTlJ#gh,mu ZGo3֌qq*\$FƠ^ԙ= IV4rgy{Ng@<"{ k9I&auaur$z{qF?nw%K͘"uNMJ.AcE@r8vxC>5"Bˀ@[acBѪ渍 ؍m#myyڏ;4lu >0Y(]~8cy<)gmE;|gcH E3V y2 F0*TE(k I%-#pxŸ? ؂y"AMt8ߥ T r,"0{HtT`BHes4p嶵V=0Wf/h$nĩzzv{lpM+4Tc:OwYT8 hw/tNxl?PvDd&~T #ŽR^11V <KS-&+Ƞ]))ˊXQp v Tx~⍆PR>lPS94s19sKJrz<~]]v Cq@{Dx^鈤bdRQxK1nK%D7 u͒Wא4jk2~`)zIU+z3*|Bp4, Z_sUS ' 2o6'3w*o\[1DXGm< 1yDjL iAG=-e]iPAJTEy5EzγP&Pan/b1%W7R M2GP^"nmRP]󠇓 EJB_,"p  Uv[yh"{2~R ^DŽ<8 ZP/[gP!wqXȗȦ\ 4Vc/Ǫ)OR/{O4zl(5p,fST>B'Q]4"ƒZ[@ g= unT"Ѝ=ۄU]WBXo4UhÐy =X=V]ԯvFڞ"`TTK1:L![tTs1[q`N)LÐsp\Y=гCÉ@L!mE%S4plr v֚P 7kGZ穦pc!BJXb9Dд?R߅R/}vV%gQkF Q'!v#H hv;0Q_rZ,*c+TAn~"i3)60U|~Fi3_|tHqI$"uKq޽Qh@ ]M:RࠑVޖ{Srf3 yJ<=BA0ϴe/ 4HmC/ڇɽ 8@)7v@ձ"> s\8sELw3:|K7*CЩxklaE.)ppL?yU0x9caD+K ufEp)2={%UA G M}M7F#҃[b$UIMBWx @z*)ԇ:~wS'4Rȹ\HtS&ᕨ'Ma">G'\#{"p6Hkv¬c43֏"^ Q:KhqA.O: "FR$rlExo}ѽ`܋C`9j`(~l]wUG3.*OK 9Hvk^i()Bz';IH g@bK͸H8l`Z2gV.8.Cƛ5D$heb x@IJx=(9\ uH ,>. 3sL}c#ᙘP"9^ ~ E΍ #׎k ,_iTq0UWP&Eoxcyzi&>=68|v﷝#pF`2xBW*j}J&'qm[h3Λ n(!4H߰JW@4xE9MXU?ɤ4̕Ex8UUc9e*r$c+8~&5; ]G#҅tSѰ=8Ԅq:MAL.d=@WIYh@o8wQ*BܔF:m!k*~YKT h5z/w GLL}宍BPԌGl( Ho$ADsSͮV ƧECY;/czSuVV/_"BJpECjd|F k6&Z^fYM} yW|b*9{_P9uw{E澊W@f0:k)o@5EXpӚnx}?(2nAlDlKYLKLLi &~9HTcBbXA4U^@>@N p7Paoa 7LwyvvG]/W&>u'q#I8k$mmZK+'d1uC6o'slVL]f )%: N)L%i'r8 |R "҉HDNr6ZXWR[`*c|uM+>)eFE<J4F5&wbuk( O/\ʼ{a.ZMd GL'nc;(SV*E Ka6)LAaIm)NgR{tRwY8Co< $O.}IYPOpL?'M"HgWafHHᒧɬCIHnjoצ'VAs!I)?`t .l {PټWjcg}2&Ng9jJK *+ @`?[i'5wM%MmSoǾ=MD/ecqSX+"3!Rm]Kq͗ ԶeIi\.܌9I!)@D+qxL>&St?ZOendstream endobj 309 0 obj << /Filter /FlateDecode /Length 5004 >> stream x[KsF= 67 ]p,{l#z$Nx#=&AѢw~63 ]vǗ_fU~ZĿrsR^~iw9}z~r(xRTeOϯN|~*.$ԧ盓ׅpE)+,J--/+.Rα M],װLoIb_w:.X銄5Vu:a/~T..15naTV%UYqeEQZvG(aBrF۴St.`}E+KW pPmEKTͼVa]f{h%!L7\9/"&]Ӭ$)b& sfhyIY=QiVVfA鰫P^NYy1TBՖtv5;^ خte-jQ+gcYtTIvnqВՙ]d_&[*}W,DUv1Oi Tࢴ\+м֤@'ѶQw\QbWAɁ 轰*F{`M[_$Fݮs-X(?(T/ruj$KUoCd܀l,97 !'=0O1zG 95 N`Y.6[SeF VPF[6WP;trΰ+8G_}s?*n+.X -Q ܰ:[Ȇ@i$%/ɖ>WnR&r{ fVr0YM #47 bڛ (;Yj[U'UwdZ~%n$02D$C+XάeAFn}9xr kJ(k4X]&6906`N4u~3H .(yo{ h"̆ei %X[Ä6u`BtjQ:49~$*Fs|5UfoC= gDz dLa^8#•.fդz,B8旁+/Չq!Du:[}\a$ah \Z1K!ؿ>L+~h\!!Fƫ`ewXS9*aʩdN#Lq\Mќ>r5\ϕ HRǖ:UMׁՏih $qO䋓^2 G!_Jsѫy YgPxa@FzvS2`~3~mm HQ#>?9ׁA`A8]KԼ"~؆qK5(01j;u ՛a}{vv 7ۜv F3دggn_s5eF '"zUD]}?Jpc*1] ZWZ*lv>"j Q!%>R !7 M[Vm'mΒz3kfFxCc} @aҴl%^ʐ֞h)40|1,YOJ社*,MQ~ɑen>!n!|59u's@1FHPXqEIPwk6nh)ؽ]$5^ 3G0H`Ɓ atr7&j9H\R񭱘S\.U$Hӗf CMz+"tDl <ܒ *5TtG@z'U?e99v<Y\QGƆ3[L?lӕ@K@0/Dyc.zkl2 #I#[5^%z{(v¨@aX?I!ZÓD_ !A^>"TnyA9{ڵ;sG1=3n;xutOP\zXށ'TDAf(xQX`G/.)Z(<^Dnp|ՁZ4KPcW;% cٿJ -^@ 󹗅' M@N_uW5[ϟ*9xblܦ9#Z:dkQ\‡ 7f*9-'y_y>Y?Gvw-ݕ|(ƉqM XǧtѨp x} OɢiluD}[Vkl =0Rg:\杯uPJLjL84oì &?£ 9t9%I]g3nS('1q-,WlJ yJU~A?*&S@m|yQ!u׫1g?5}ׂIѰ~G!|rpN+Ql]N_')tۧ*Ўż}=qp/gx} cM%>@ŨD~gS9nxv~#|o49^Rb -/~R ,OҒ#-Y7!ň]1RĤZ0,~6 W fӛNm9^ W<}FX;*sH^KĿzn H=#Q7"CgstB<\BUz# @H>¬!2Ϗ_+EYU| m0\% r҂G (N [ܿPuk]ұ5=ğ8Mz*aBRMbYx"ج8CdQl = FaNE_PQF5g$o4n`&~A#~@/Sğx٨?1;endstream endobj 310 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2839 >> stream x}VyTSw}1X#,b_V*O[;3UmQE%A԰|I@vd\\NǪԶӱhX̋K='%߽WBٍ$ڍ1ɳfV'fd؞|$¸QR < r]ejhG 8K1XI/QRd]Z6=?#91)K9{9|"?e`L\6735YW)Whs/Se:)&-AMPQ)CCV(^'߽毾(jF럞NHLJIM7VR$jBBu"j=O-j zz R˩ jBRrGĈ$QGS:UZf`Wb>FȪe^`[;q8)j忣)q'knP̓T8/?,v3<Ȃ{}nftۋUlJvȾ@EЗ Uq%KTq[d,ϘM&I#^\C%G堂uEE Y6ݻ%=0D]w}0=ԺFcuJ yW'ޓs-ƂP`t %pI.MO]4W_A LԩTzWVpF􇵽FfwC2q|6zx%Yx SݮhCwO%afqߗ.2f&{$h=z(\fd fZ?i AnJ;p0fu8qt2*@=hcll^Cx$N~pgxDLٜLb DX\\*|y'߀MlwݭxaUfŦݒMRAΧumiJIUՇIfrLT^nW|Ғ\]V^[N 65nSt//ALpdaa0 eY "󠸹\uՎjs(!r TY?NRCrݾBFÏotQҡ4pq8'1DE^+L"vj|<(N=uڝ#L%I/J!R!ʴȨw :ʊ"\3 gyq;G75Tjؾ ^w\q6H*iG2W-E&Qe&he0qM!,y[vg5ZUk:`gRE"M&g8L`^zyX+korGe-WAQ&:ҋЫl@Q6ߋzKi^T]%xtje!&T(VîW e\Qc[1dx2yK^*餱xZc0mv^׈}JN7D=2(l0dV΁R\]f{VEȥ٤13`6F8d1k=[j[O T5U7b8fWk$lE |*θ\Eآ`NztP))hv .H0i5gCIy GR;;OIM^!8/Zy?hqZOmce˶{pMoO!P9 *˷s+Vĉs^fYGqDt]Y>kH'=U1JthF l.1TnEct GIgL%2Us:it`CWT61і΃ݭG81H߅V|n5['DRPɎ|F -d5ǭw KOBЙ2llM pGCЏLpdL0^^i5r:RԠhmQeWC;sY;]+, 80tǧM>~|]ݖ](T\.g"--pۋ!RrV[PATsGEҠ i Fl1dlӔ 2,=M?U&ܰ킇C#O(. }^;ŢLHO_iqxt.TXJCQ}QT8J{l\$G_3%C8S?$Y-Iˀm!$>Wl7Cvs9qdp$^D򪊶E#p Y-JD0]t BpF7xG9ڭ:;Sh24]o7iھP< gendstream endobj 311 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3402 >> stream xVyTwTd+Z6-cEqWEe[6 "b,c*qOV) ?B7tch1>UT*InǦ|U-Qa6dKOZ* Cf,^a<@Emur-4q*ٛ#5›M01b Cw^cN˔|-2/}d7@B0~K⟕n\:#^X7@8,\8n ՠa,Ż5ʚ Llh%:M}M>$Ocw8bW ",az\qDW肮U62F jGBavD.gv,C/0A^?q>k34[D_2Dt=<I\-ޥM P`ͺjR//<_P ̭ˈ-o웊)@˝hizDsu~/IPNhbğ8S 5-:9_>DismW+{6U.JNi8UI% 0ןA彶59}> Z_?+H/Rø So>{$heˀTHreg.`@ `+NE#؄SL\b* T¨pFI` W\ :&\[Ҡp.髯{.sEqˏѶbI*l&2ez(' V}3ZSVR`x|)2a5Z}Uͱ>!iőRI JeԌͭ'g /E;#ohmZT])7/PeC Eց3Oq›_*X?s|tc0h dM)2Y5ҨBL/*:!gGnIQ >l± b)[翌#BOHēdX@8xສ`} )77F'Y1*v ڝY]N#lhCQX7R֥f8Y$yhb|ά~ʼA4a7tr@/ O7Juu$ dv@>(3gk%;otDUGR[)Mb[҂ G31STNީ7Z,qEs<)Iۿ,ո̙ qz5gdtR,E^p({},zDOq.zJNOԄ'd+ٌ.d,`r8C8yP ~,+hۮg}'v4ot|yдDH'?7"n`VnIr}S ȼA/oыX"eaHaN)WA:_+#Z#"[#[['2H'T;gY~w;oF(6%nE$$_=mΨҴi k3h<t !d\ʹ7DGEtmDYŻeaHL5z|YDK$sD-ôW)UJ&k]S᰺:?<.|q|;ŵp:S+*+6qUB+Ln.)QY.b?] q؛}jB&EuDqW*r«蓆pF:SMw3zz8h,#"%E?"nW_G} sbzO\/٧Z<_ o%c|hNNnvܜ=&Zkpcۨ .F:j b9*9օ…e^CTHOr({!Lg 9csswsUId\E6腿$@}꾆p{nNNaسr T"3VЁ>]s\ZnOQ^WS[ 4`qEt 4;yC!,/@!~'0c/%/ec^cmӅ 2}Қ{h4Z׷۞F2~rD~2c%I f*i@EUwZx a8I V&&z W2̞E\4s'xpG[tɨ<u[~( pA^Gq7U,6M19(I"]E9ŌJA 傢벲?C:)5Y4#@<PpH_)NN EvT\P\yc mm>rm&!`)Cx|DZ wڸ$|3WGoZjqpzG,< $0!qdO%q? ^{f|tHRS"(MU% a*sX -^/,<~B(*+(1kAQ,Oendstream endobj 312 0 obj << /Filter /FlateDecode /Length 674 >> stream x]=n@{7В~li\$\ S,}fFrC`HQ?|yَarZ~å>.K^p\6wןs o/-C?K{*=kߣz鈟2ePǦ1՛R,M)%᭥)e,zT=M|y ɔ+j4/M0CS)+LB(  I8e)SR*[1ud:֦VVk *P!W%X)X!W%X)X!Ǡ&++++ԙU@h4BMFH i4A! &H#i4 @h4BM&H,iQbXӢŰ E1.aALb\ǂŹ(Wu:\]N_pu:}._uȹ9S0bBAB##b`)@a1#=8v徏D!FɱF{p쁑{`qۚ{`䡱ǎpM&})ߤo5M| הo7M&\SI߄k7pM&})ߤoG>|qyy~ҷN]> stream xyXW ;cW֕2%jXbIb(R"KYX.ׅ8 cLb5%!]И~假avv==={F@5~.VS'ٺ{ZCpH0N_`8u`k?#>&MXnDMJ(l Z+rԩ'M"XFY.l' "g˕L\A.oaa}[;e6ޝ-@Q€-.Z4dY+#\VEr[kZwulwyo &$eK쨱jeOKmS fj!5B-&Q[djB-R(+j95ZAMVR3ULj5EfSG~T՟@ (15PʜRj(5Q4P;^ޔ9?ʄ^ {Z'd2ǤtihmOsrgϠOz>}Uc@ ֚o$128ϼ>r!5Cmç ?3v {Q6Pj92l#W7jΨzgc<<d`]T߯#8G:\sQ do49?53AHnI6W1ZD+M9zKJQkd))) 8Syp NW>||?aUUΰw8Pa)hT7U `Z+["# x:H(קb9SvL-?,yQIPp Ji!{٣.]H T1}vLB1Ȍ"^&Qsx^tD^BxLL`@[.Ct1dSR .bѸ6x&z[h3$C+FM\5Tt\5p`<|rV'ݑOT@XHlO0 ׶#wZgl4@Wk t{ȡY$ahMkKKp ƓCG*+{L?yOG%rJ.NM EXE(̴|9AV½!Wm-y(u^9E= $+RiدiFL=om2S* TEAP[WϫcIꋆ@p0anۃh\o7{ϛچ[<17- 2zHYؙ(C$~ >"4vE%*PQrQjV =j.: ]#%.wy>gZVV L>~0hr*Ųʔ{+&G}WQ@u.FY #Khf/y<=dx`b!)J.HMWKKY4%]IЇ?pB*(1{ŽLS{ੵkU46℀xk`_+<~?=Z] #w.w& eh!ݜ9CAה\ޏ@Za5rC shk?wM184ã kFH5&sp6zlW̡]< ?4d_I7jroKi gxiJژ%8Do j~N:>Hu I\t\'r4%dxoemY)GN>-n(xzLE\Z՝^Sܰv!z8 ?PvGͽV"ĵ>0FVEvGv}nwXN8?1B>M'4_ 3 I "gI SCEU3@pSi,|+E1fhܙ-ivE6aIT=2 Y 2YL~ IJw7vg,Qtp'H?X/Y=eFV:'PT뢲A5(+¿kx(@:/J.A$XyoSzh8R-4RWtJERuuBk~b0C3"=?2ِN0CtL/>ٹFO( R}&?6DDoR!"&'SՆfI-m3{%-.z+/\eoXzX!C{ U7os sJ*d_P@brn_ihݜc.~N ]4KpuΧ+(!O$x[+*2UBVA$Y/p/ӓժ4?4;N2Ke1d$_9Zht mINHU$^ ۞& P);fhw_w Ke+Nj2л{yuwXUlA&:v''/Ó Jd<-E+49 $5e5*@yh'QqaVC 4~NLڞ<&K8Ϳq_ {"уBH*dRb! --,d}U gj_"_:Lxٸp $ͣQD}B6zʜΊ1*QM[ Zl(@PWaAQ8Iq\ݎ' +zmK~sׇbwGys6|Y|ztVC̉5ƚ3$}9#9EY ̷׮ln 5mL/!m.J,8fW~\QNifcJ*qǹBL|UUQ#*C=:{Koywk:e/"ٝ'xkfe_7;\tE\,wKr WJW {?gp!Wqzz CXh +ZpboI(,E'd#ЏGv:`6:GSee] G_=a!S4l&ΒѸM3'%ܟxe}x͢a^3aN6ў z=jr'lT.Nɮ_K2 7V I^QO|{Guukҧ.-{,DzrZYh(†<Jpkqg'<[( U|F钥!s8t<T@N2`#O UؾH/s ?V7x!->- } {m4UY$աU~;#+-ѳ]gC<.qhnՏW| Z++*Ã8[}hѨ&TqI_k4BYG>V,7Dܧ$xx7xˢYӏmdןV^U5rAm;vZhOq;z^3CExD}͇q{r /D$7 icI;&\ AlaZYT3UQeQAoX񠎑&+h#'hԣvvX%Ebhfa=%P9) PdNɺZ?aZH#`AU.(ITD^]g>2Tͫ/ښIt,r-4D5]uD @TMA>r 1qD|(-nhQ7uzV;ܕ%q-Ԫj$Hܑ)ܛK{iGG,?84L"Q1-lϠt=2jI2҄ tMielB &3,2ړ T^.vFQ`ٵ@oB/%{bt;432]ŠIO/@!iASu'-#8=u[_7k2 DxRϸ`3+5\W)T_%*HP( R;]xJK >}y2"̀T? @R43EUF<Ki6z)>/DH+[rw2Ag~"%E(ٳqtdqfqD*EZJFO{CB3ӯC]Sjԋ2̙4Mx)   .J,Z:OޞT wfu7Ȟ[ 8Haf!Ma Fݳ&GmtA k%e:T/*҈$z:&""HOeePZzTGB7|Hhwڀ !U*R驪T1d*w>25UьwUliEEў Z'ޛŢkk h}83U+~GFF" ‘+Z"'jHWT`PPq`caAFN. `BRA w6ٷJŒay?iOE ) WҒX`7JLmc_ȆjIHH ]0859/E%BTK|y\&#> *$Δ0_  $!!!!t.᳼&dcR,?&B29*yvBnAZNEua:Q#"rC4CY3!;w>"A}&σaAUsoDeꮥ߂< LvOsN"< pڲN_dڢJ|Xmi \V&M(ft1sNV1XAz#H<ӛh?pf~3|c.G OKrߝ/z"ixaDVwp?lj=eq7TEUy%JV|t<0;G&5x Ej#@~OHOv4_wWU~N3f.+w,w-3aVq냜d~v*U$HT% gB1{PtiٻxR'd ɜ1e!>1NS]7QEC8>z 7F@̈NP^;M}Q ߴf $V8eT\`)XQg-:d~oɳp6#Ixv =9G{yh-feIuCQOP'=) 6Y5b9T=(7؞PM12|ĂL6#-j:"B8¢j"Z#4>_qEO~h)'6lrtf]C]`~.2zqwkj°hO>@ԏ(qٔ';OghW88HyS} ,vd 瓶z$ygǷLLo5ƿ6s隮Ude4ouRoİ]rNtH{3;{'={tTNJf`,[ᅄ/{⾟ƹvdRGO>8_ .#MvU1Ibym"#[ {`ual?NM6EGs嗟:a=m:ǾAxaTCu"Pά޾+  RޒGUJن˜75 ѧ/~%0%cQ4vxA`o1.ob\>JBrNUގ`N;|j'o77hD|_+ <s]Elegd횤`f>='e:BkJ75y0]Jf`I|œx;IծjψxUiu],3LNKϞ{T:h(2k3'ω7$Ŀkv4 }h!w?'EZ͸G8/=M/W4(e^HKT. ;FvSY&@`&bf7c#2ۏ9cW+VF& upu2 4މLw[?:V [>F9K~;#久Q8n; mp)$ܓ(T<>1T#5!" ,/$ b~BeCr@5/L/*F:ZoɆ N}zB:ƐR>m&CզgKQjendstream endobj 314 0 obj << /Filter /FlateDecode /Length 569 >> stream x]n@D{}@|G 8AȥAE>3#)EK`(r`ǗׯW6yqܧڜy=t}3=:m~jrz9Vw{ic[=M,LM,LM,O@!A )I Q(A"J%hRDIM(II%4)$Q&E$JФDm*8@szձcyNs k%endstream endobj 315 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8090 >> stream xzxga1`1BYlŴ&7$[zs/r l:!t $H)[le67A˖X"seXoW]VN>?vCAM I[nmO}5-bY̞[MGDbDb*Fl#ۉīijD1A,'f;lbC&ˆ_!b>X@FH鿅ɠ0HƤ7[A+pي]Ш${EQ":v8i/J[t.6[L(lo;\%)$pA#CRgf dଣtpdwגݛ$rJs"% A#M7{VVb2ymTͪ>  ]!e$z);cu G:ؼWXr Lͥuݷ92o^n,'3*#6% ͰQ UݮTi(n8MlB PI4mFT2$XR2)\F 06 Ԍ"?ν"yLY@ I}qPz֪ur\ _I1Z7M> 4SmqJ-7fU&m6B696́2/?Ѡ1($k'tE}AKP>]ݳ…F6{'o_(*_w*_#0`hn\!uV /.(ѧ|4 ACSg/O¿,B4ǽ*̽+xI { 6雨--m~à oX; ~~c'&rS'_)z OF@TdѦm;"wuZDɯZ\/=d5Jѭ (TeQ76!É\[ɈB(i5%u\4x=D_꫞t"`=S?ΦxuӉMөY -H54T8P!Mt+1vb%vmw4Xh>4nG48ZPʻNbvfg_a{(:sǿS]+@^3'k |mE(^)tni~QIWÄFrckSŋ{>t ϿS &OQIk@r#)3u4Sc,T`P3v\i_GȘ"ˁR| n=GD}Lɦ: RLբ0K@؛/PBAP inlW(e| $Hp~X2 N&qgRJ_fi2 MP-KR0"БrWz0B@>CGGa { QRZ_+ed UYҥ+&M tjVceQ+Yw/77ٌɡ+Z97>BsĂ2}='^qepQc2CLP {A]EC񑞘k13#^&kRGJN2_vJJQ܆*yI^NFAog;yXpN*86Uj2"* "-Q@ VxvKXbaFC5#wA(YKDuhLj+_)M@zve{=9k@x,#h} }(B"_|5;v 3&]t2.3h̸56e28n~B~}B|_2R w=N=lðok@.BNgy5.ݘB%nwA6r`i~.k]8H.;{{ `~ F]и}եs<:ZJU>}X_[l=2q&]msݺ|0BIFz^ #UG_봳*d/jN7]ۯR+5</[)[gϴQۏhj|JiTo ٭LB{I˩C"jyA4ΐ?^ !iuFJrDT.fq {n8ZsՑPVIjцG} joF|QCm@>>z <`dX͋Hnn X>udz]vc S= 52(苓K^'!Qo-EK_ɘ`3?ngӤn YJJxnɲ;-봁0>[q#5RO>5Vѕw_~_Dv|wylwڣJ5l<}۳ϟiKqX[ e1;'Hk뼝':ݟ_X9z?7Й75ѿ%j("rVkr6<[,_i|qYDZq Xc* P[ˑm 1/~k.MK{ء 27ϸ߁Iwbw< } NjxSylX@ RؕVRdԠ3:5MMh rEbk1狴TN tK j.-V9 bF;P jr!NPXRHbnQm t1 J4vwʬrqguO}D&h4 }Ѹgԍ/3pB2-uޠ%^(ӯ}J ݋ ْ \iS=Yeܠu!&B[n4ᙊ{gqsa4(ۖ ?-Aol&C)(dUPRHsfi)6U p.Ӌ^Ĥ@NvA~ e9^a+\ٓppDnT. O(L@ ?ZPmfr@#r߲wyf6\O7b9%W&G{PEZjaO䄯]/rORz|;MW+xvG NPyܠ5b1RC6(cyBu<" (ZluPMzҬ,qR䑼Pv.~{@$^ⷢϙ tBDḙTWSV /<:*ov'~̸LW|4 3JopFޤM٣Q2}X:/;1 x PZ/i9 ϩ`n7تۧtyvj2{bqĕC*dz58b;7߫(/azm79ނGZYn߆7̫RwP#j73oU jJ*<Z6^v0+[[\o#rgo~;ft?+`ˍ"*ؚ&²46X, vj\h;䇯_Fd5Ǹ,z郿k<ç+dJV@BZvItNNj ~i;sV9r8an* fPA? =ucFc3geƆ%Uٌ"|=>av27CSpo/Pr 8a,^I*9~& j i"Q(&o<C8ڜeo$mE c9] n\W~6Y]& .?B1N 'y(v.z+ QJBn|xNEdF#0=hhg6+^СCP5"X4WT/U\Y0$ WA)L 0wk,UP}?.CۿBC9e جtQcQ/\'(Q$c8*14<.w?ʺ3fe Mde,B.)UQ+Q%0m,i"n9e~c*:5:5U,I閴}э&eN0 G?Ov!騩QC%+euU:]z#qz Qs%cQmL|\t.p,-0^}E`4^n$^N6~vVK1e]x5} m-K2*W`M[\{Cpz]c3bAםh|Lҡ9pMԮ9Q GGo~Ӆ\fٹ#HQTFozۀ:nYw jhAUb%>/ܔkњ.5]өjFca0M: ~A e'(nXoZVBuyG?MG`-.iA#h<-z]ELՐqN5J6Jw|hg_^$ ѐξM_Eۣ? *tO@SF'ƙZI.4!E{> oX|'P=hc6apynJSkeeaFG`/Ĺ˵Ym2W7BQ*sfAñ:iysan6b4cmU͵Ҝmh5S8Ckc{nl.VFO'ƀ02MߩoLإ޼$>_P2 5)S:1V6U<4 (,Ɏ FCʚZI(JEAWkhpύ#? g!(hT8>FjTQjN}vA`;?\P;u_+*]n7rvO_@PoI4qoD}n4(du-އM\@q56pʝl.nH*{*3gwx~c`6*::-ABhINߏVʝ֖PLUI/ԾHcXDVnhr%&Kmdy0jy^&f"INrT~Aj.[H_}+[VN-)71*.3'W㼴 ]^x軐wX!wt;S0 wZy-x=nv`dz4P@qV~;z| 2U؁*Uw }jTZ&>{6̮'Jag<1x_s=q/OS }ze/TEE UI]I% y_;">y7 EӚpFzEmBu&733%%;8,{a19*q샊_[nR 1ٱ񇠤eWxP;76x۶#ˆ^ }Ĉ \jU5##t~endstream endobj 316 0 obj << /Filter /FlateDecode /Length 3790 >> stream xZKsF==5ac"J={(FĚ$r=lw 0rM t&0?9H~:k$EZÛl~(THxgyRfs|Iɜ[OOWtT*jvߠqy]Nxcq XD-5 9T)~8YbƟ9``7Z!aDm œW{Qp;$DOd25F$p^%".c]6|.x }緓i.MRŎD6+- \d\L|$\/Wc?֍{#IITi"4 arSKJ!rk^Ќ Bmgf"gZGfb̋:kTY=BX(gwZÄkgq>vBȌ7.u[n -)_<p^ eAc-aQe -mIvMo e3m ܰ1q]BJ .B&h~Ӳy A9@VOeӃwx7ENrvLuC(v t2|؟ր^Ж (ӂ\Wn%*vߖJQiq\#՞4.LsnۖY U9%hw-ha^+9#qźͫ;;bfr3zhL@OdAO5K j۠QTjm7v]MT~A]=N$Kvуvv˙mZ{}fT1R8<BG&ع%>1!]2_AL{R>Y懩Yٖ;PfGQ @\5IՌ"b,\%mL..dv)Ӻn#Z'@N0M9:h;0F8`B9JO$6{H%(-?!D@G~WY)(t|NYy8qj83d!z,-.94(չX9pE4Fz?qS"k&orcqؽYnj=X*S b&j;G ,tERkhA%aIPtJ1;dxz3erWn@nj*jsEZyk" `ܛYk+"zJlbl5u[VGJ9;RTmc(AH`QIlHaςn3*)?wo\nWF+&!;GSܯ7@ -yӝ<a񼘋kHk'&+1:^zbV=flRfl\2S6._'y韶Z@NtsqKTQ݅޶ZkPEJ0ے`6~]zTz){wW՗ϗ Efg $laGRgmf@j;uX3&!3IQhbh;Wnz=;Q^ݬ}7Tc㇄|TҝN#L5]P}.yZ/,~qPےԠ8\R]~& -YrSXٴh["E ˠDDQbw8Y6(YbWa C,Jۥ/jea/W6[/Fe+ ma Y )-[ 0x yK=oq-34fyx :Ҁ Br6xwz*GDlZ6;5#k EPoU1iTYL8{M J][?hw[Vw"vRӳ3Ʋ] 2ד0>;椪7˶ p1rQm+IssZQj';nhnDа\DŽsw_KRk)Me ^fWJɡQB0]!& "M@;$"a\ Sٷؕ>oD0X1n<^6 AqP> stream xZKs=kv+i4ˤ=&9vlP$[ͲAWWk{P45bu_&I]IoV'dqNN?7˫gR/=\? MնROV'L?ck_iɏT@ '$UǩVѬYg?Lg3LJQy&t){ Sy,| 3/e[l睲򪮽8 ޲]6oq s03+S k.f:4XS}kZn:WoqL6uMNC>3)|ŵ̤tG-/5-F:;300.^.iJ;8mXu ܇eR.lcưoo [(;&ZVәM)i]!f׭,kǿt5bo$'O\rspdC+` >M۩5m~: 8b- {> i6|&@nl%` Ɏas A>4yVUv? }EBU0]a R$g4R>g4W'=+MѴ;|P/ueaZƊJi ?>ŋ?W3$Zh?7}`hG3 L6kp.Z* wa_C,1̌*^~ Z6bkA$:G!ࡲVCyAC^-UpEFJN NO YGX 1~ _w EZ .`Er9܅@6܅( hpگG_!k.cL~a߁QkmsB o#Rzp-ݧ=]6bfJkiZӎa  H:v6`Y/v@ry+9!Hz|k1op+(QMR8r& XtΛ.);q9U"¬ (@?͸W/"Y|IJZқRuIjb*IUg!eoL(Mg8w^@NρRr̲hV9@:~eÊVJ h Shd8k."똭R}IRgVtҝL *_ 3mD7AM"A$$-[szSjٿQJ(󀻰Ѱ}ӵә=X4)|%%H|\g )^F,6r<y`/8`\葽瀙Ɉs/k"8/B'B+Gr|-4h+m_Gâ|5Eeo*FY2\^JJ Qtj07_ƌzT"o7 }i~(9 #,w ճ~i3z_\)}mGw`Ds ufik` K59tp3' yEUrEU<9eTKuCzM=( D&uD ?Ob$Th iŞya0ޣ]A[oOU8؏򧇛'lՐ]׆s9Q8F7]S؝>$yBoZJneL?`C>wuG]Dz93 D:8HQ[;Ƶ{ʝ|DJ(3*Iof*{"Mh #TDUj. K$0f{qq%N^8wo>DPc>{+ź0o ]qopwMw[x~ؽ*:n3&#H; rĨ:(Q^`%D3|2)H1Ia.d> LK! J5d[@A¼` |؋Ӕh9_WkSL MuwP3Py ǵߖ|* Xb|]Ϣ;- ,v&^ f8ؤz2іTkc']VHN5,s[V 5^oSHh-%Ks/+֣D}=FށnO ~ҠLw0?+3>RUEkm5$0Ǧ4$˻A`pp@e/~eF̫MOS~)}+ a2Yۓ4{$!#/lu*DܯK~ =^B ԰ rfSfab4vbIc S@~8_u;dռDP$c Rv3iϣ/~.GFѸcwy0V>5tU~*̧!wXX䰐N<6tkJ8ni^[cx17^6Lz$=rc,o <Ϭi&/sszn(KVݿ+6 TPu]S$x̣oC+9&L壔^Rz/|* :cg# ơpH/bbeY>( rq8r 44]i@5i&}0MZא+vM hT7O4d[qpG )҉o! j֠6eHwR푠nrc$Uf%QEmP$֋Tt&lT6>tM7+ϒgudN?V:*B GEo-hZr[ap\-g 3tE&m~Xƈi0 {=[5_|J {wQx&~SwsD  Oo3rNa~8{UNxxw="mSӨ6;ZCendstream endobj 318 0 obj << /Filter /FlateDecode /Length 6975 >> stream x]IwF/=0r}rgʞqu=#!P*Ē d$IC rb"WߜO,ޜ|1KZ]pբrU8ۜdҊo lukͩ6 g*U4kvt:VKctXiKcS]E^{3ٻ]4z[غ`g-ekn_(hߟ.qU6b .^Uh=t4ҫUEuu3lNdhMX īlivϫQwd5ZvN+ TmzY vxL{)ڲibPxdWw d6j~ȃqZQq500\42WiZ5!UJ]: mlg*]]l/&>23fU`IU7vĀu|_,ɝm49M?m([f߲߼+bqSru^vQu4*_Z1!#:EP[@A)4:;E+!YC FS'Kb Р_F]6*U5Mu=6*7;ͱåRyf4ln1A /l[ ئIeݖ,Pcg}Ǯx nds.~CM> 5nĘԼ)a}uw~҂۵C+4BvjQ+WM#.Ͽ%I;遺^prf=Ʃl+xQ<cW2m;Yq:?]&yy"6U@nBY PyUXVS7x @|H*5{麞{b341zl r##Ka`&{-"O9 "= iϗ1%߉ly5*^Am@RtV m;-%ֶe۪D7kQ7WdaӍw w`30q=ףP+hӡʨP ޮ|.w1G$<$6TDx1( +n6`)UB~ ڎ,Fr,!HlQ1o~{V>K鴥°.F /  3J.$\׾.ʌY=5 8+wr^X{6R![9ݢ\+KvXzVYjK0̱21kG+N6,ϞKȳ3$ J (MғQƲ.7e} *b,~많߸>7p!EM zrٟtG)p$ve|8uD3xo4.qn4ވ^( Q >8BӐ|3w[,S4~06S.TD׽,Iƻd%7݀3u[b71!)*+2yߵn=4{%+wvғ(|;uY €]N͋w xrέ٦p|'$G O6d4B`hdǻwcJP$O0aI,jL9y5(WJ.(ryyԋQ*W_DY"HNGqz#u6ٛ!oeYm&** %@uCm~yy`QUE^2Mץ  @p(]qpHVa: wtC@Ǡ엿,h7aP9Q<4m敢t\SKWS) eȵO͸m(z),0kT=I +%SDq|ݫ4J &G1:TuV8`h>@k.?MCz"@ 9G:灰 1L89AnNޑ oЩQlwC:F` @ce}-3O9lv'];$3p1K9ImC0Ѝ!x]ߥnpʌҢʏĨha酭ФX7i> SdpwL~wG=JB1ʽSB 1A^;Xog᫓.F(jY-,m/%:՞6uQ;KZCs6hVVIscThj!)pql @ZZXm"Q| w4gOUCSw$}JSnL}) z'8f (fgc_|UOڗ-N Miص`U̫ qTf}qƋԱˮ*ѿ ߟ#x'Co+ \1,N8"6'OV5R>!9%u?'p`.(݉4n^G9h@@67~)k$k?`J%>} P?'NzPu/Is6D/Հx ;P .d!z {!I~^RNL"JµעJ >:].(]$HBS(4eJS1K>.]#W@*̫ vqXQ#"C!iGDIBJ PvIJ'K{yv._)ƒE= y<<]ddhw%m*;x(%Y+oY;vQi5ZΘm?bq(\RM:X\CǔO@SPH\e8qdʎ 3zl[&мznCATRCiu NX@nPkqvJ|^)?Ak˱@GJBLP%ǦE,wk)S g5?KQ!|կR3D぀%Ց@{)ٳ;ۦk,fCzgN"OS5k+7M8Kx~\ɥn?C"2I+Mr>iJʗ JQV3.L%Ct;9?YaxGqyO扣k8!L|o<3k853  ^;U"jxI'l78@Vv|ڪgWbNFҠSʊc.m`wn3@leh.]["{%pٸSJcLNa=S/<pfU9n#,5_ϴ{"Ysq_r^Ky^c:cU >*s>!Lbgd׾rhy1{*iKvMv K r '-]]v0g7`>?8H"Wm?rJ?9wXO`$TВΑr:wMJSE D=(x8^o$*^J?I㾄-_L\D:O6n-I(\z-0[G PR癷 D%P)FVF7Mu1W?ȳDǫn)zB\R|QF}p' 8&\Lķ ;Iy=OJ_`Pt&j-ĸFS{eg>,%ҸDAU'G5+tHHS|\Xt]$T;RgK*0=k6XP\ڀ5F6JdGw@%y&,4w>plHYns@VQ[ysw4(A*qX?*n±c &Cu+yTpMu21 ^'\8q==t|csq]>0UIZ)ݟA\6XW#ag#Kl $ܼ͕V~O<5eq^hM %ʋ"F'#~Cb;W +TI &JDms1;A 7szcIqt)=rON.KܟM >ğ8f[yn)}نF:N9W!}|qfW}\{]!]z =KhËJfe{(:J' UڼN(C3UE蛯Vqvɲvpjϊ(* Udw[znRϠ|W:IVUĈ J0qǖJMEtHq{z=, ?E}uoO2}fCCosciB}z?(\ip1LF#gqT0w }QV69`Ϯ@1`'RIx Y!,PO=QO.OFD,|AՃIYy<]h.;CC9ޓ$Ճ7FЎ/"*8%5 YuEf7FYGcuNVqnad*XStHd W@(sVoOj(n9$_'?U5YUb& rg$kG@)UXI+K/[Pn ҆o|{G*TࠗKΕq/Bg G9` wmP5 t0,Gi$7,Q4]G12xVYʻ|}1] SUŗ۩]v !2wBdé4XOU$K4@0d8[e2e,1tT5)֙W+;%|'e v@ȁj ,/yo#0 - \X;2Sk ;-RpF)zۍT2wp]*/`::p<1^q__u<omw给 _]GIP`0buUendstream endobj 319 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3207 >> stream xWiTTWTyUD½!QIZH4ִChBybA.wWݚJ 58&bI2Ne'Y&sCNi/֪Uֹ|wd%6nڞNZpD*:h/,1PEɟY u=/Egf"t)J96-=7391)+ /X@ ?-Ը A6yi1IѪB_no?^([&mm,uvNtn즸IU (jKmPjFvQ5T8*DFSj=HmP>SFU(SzO( M>tc.Gi+9ą6^ll:ǬFߡ!y(ޡY\~a;\, 5sE썀y^Qe_h7*O֩us.)_&dܻənL|E.֞/bI闩l%]`Tl>WVYQlYA7Ylg76C58Ksib>)&W]QgWq12UR6OتƦGd|oV1Ic$H.OPVZ9\DmeUh,-=pĝi/.rf5|b=ᫌ&0?'x_I~mM ^.mm>Z7 ՝ pP9}tI?,H[Z-CGfsvNY{RDԾ`ev>.ZሥTo;*< LvRɻJҌvy_2Åޖ4*?}ֿԌxѾ"9?:\[-|no1'.:ϐɕ"k,!YV!EkǏW#K$nt\U#6u=d6sTkiΫ͂8PO ov%B :`:>YnZ}+~$CS 9x]dP&v+ђVt( i^RV8 Xg9N3}Wwl!S0M$g d_n#5#t >'e]ʏ>:qW(2\<<~3!'ȃ֯yV^8SDD YK 7*uɍ 4bX\\%J4֨xaCzwB|z9\tw(d޿ik9S=fDo#`٘p :i׳*ʉJ>W(+̣^pu$o;s6d;i:6tBo~c=ǭLi@k{>bI/`$?ԭ>W9lQ " u_ww, $}00kSt{EFkqst]%,\D\wI:eߖg2 rg)4k4j}ys`஛H~3}/4O) ԅd=!FT•43G[6!;?>]^eo٘%tfO;?kk~ f7fLHJ*!h679ґvw_[Ս:-DEJe>RS `iSJCnNf&=-hFF j27Ҍ[Zm^~iȢύOUg><[/Ze;H~ RKYT RK] a[ԉ38K~ƩCl*p?Se }{bȰ~Lf=[wE3sH߃]WEb㜞b'tu{FZinmNdL3dǡZMy"ʘa1njHv,Ԑz{LQ> stream xTkLSgJ{U+(J+nn LflPt^(8 E, Q =rH'C2"X2lfY1eFc1ˏo;GΏ>< 0P(foظٲ'Ӽڴ'odca^^g熉s"G"V}:\TF Os4sgB=7&kEe?`}tzXN)01"rA،!p}+]h Ӌr~`VWm,ͅs; v8~18GB'ߞfOW?Q ^$Hã1STiA:[Jn3]RZgv%j~Ld0wB06G;*p5Zꎢ-Y 6]&ԯo,kj^p5{u\ ݭ`5Uub  b}7kO/VlMI۟( 96v`+49J˩B5vKdtza(tF2se0N֫zj5. t 꾾|Kv  ZFVw?\DG.pc>U0!4|䌳۠e{}TB#tYed˧QAt. YRFbT4gc㖍SqvY<:é'ֿt,{>m6_=dŷ4ʥ칳̤ye9֜0N``!5ykSd n%2pVA=T^TTzQ-\#]*q*}O^إ/ o,h9p'^s4mN wJh(-&5`c$;1GNqB2Zt&Ì6V$ír-裗- >bH1.jʊ/m}/뤌C+{A2|$o(~;LH&bh9`(Wn}l#ϒm2Bٹn ๤Fs!p{613664 c6MŢ CZ4 5 Bendstream endobj 321 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5931 >> stream xY\׷̎q%DcآD@AA}e p &ˢ&FM1jL4ż3{.f|%;8hwDDq-Dy~~le*%4WN}~T ;PwOE,\H^WǘZڹyG73s03En 2uq 2 2unt㦭k-w Yj- [ain&>N~[Y_B37xe^1|9Υ&j3eNNmfQlʉrR[m|ʅZ@mj!ZKQ(kMʆZDޢSʎZJmQrʁHPQ5DM,)TJC4JDM (CʈҤLW)b(15!YtjhTjO[MZ/+p3f'M@gNuz&'&M|C3&Ex'u)9(šOfSKe&?ugЀ3p9H7jj@/y Ui/k =Х+d'60.k+),N29\&GF*QwJJ GAY\<0w,Ży6t=ܸ:1U90 -:$~m{#o#ŁX"7epCr0%)2v Yp͍;}xJb@m/^ w[_tKVKC 7.`28>{aX"ۃC0edc(֓ iwoq$њfAƩz19.gE;WM'ߵUy2pUB@~R>hH:*2uBws5rOFދsQFGC\Hu'@ux1jtt,}m.x]Q]7;s_sTЕ,cMJ? &r9aQ͈ijUէ2i0U~ŢzueALKW*Cxʕ ; M\]Q4;+|X*K1^^~v;ޓp5JCx0I:Jrz˸>Ym\Q , ZH MA|మڷct"ڹ{bTjqnlab˟K(2#7t#F꽎M-cuJ#65Α lARE5/;2vpLG̻f1  HV.UX3CI/}ΗLJ8Ί$C m̀K:J0H+'پW._QB/>z`[/ϩG5&fCϑp2/jN ԰S+)vud&1;4LuU1 XbnSs$Y6IoGFvvL%تm6̈N݅ZT4EI#?b qh|d*vi,QGᰐ #6ZqΦ$J<s5%x'Edy`@M9>xhWP~x^dtȚT509tK/Z[P/ʫoCx#%?+88|A 䂼T~_7 Y5Up8'." ֊u&-`$v?uX3هMڈ=ED1 [8^?>;=4`?F@Ai j+ZG3aTI[ŖIeTCTr~<O˴=hy>LP$CxvpP{SFj eBͅ6{Q Q(Tw]3/Bwg(tT$!t^/T\aףGG6%ox `MgԦԧ՚Aa?R' 3(!#$o'H6-;+>A%b֢9F±CbcBy8 bxߥ7ܖCؐ&Q[JKΊJw["fߝi#ejRJJT_~{&ڋ-6W`@ć~AJT/_l#O9^*RTLΉvG)^D Rmɧ@YVarI㤌m' K ԰`p*oOTK\ ieieUW~ e癜PZ2lX< Q࣢Kj B}u9yP/5OG8&-+q_dTv&5m@?n(Du- HWiv}D H#fG?Ʊ7 =N{Wvӭg7߼䬆~'2;Vpp6dN aD}۸.Ρ~8ܧ*uB5ЕϮ4>YLjft}kJ0@ga]_z#ul/lGZ!µtDIX/V5$cm;DX}8}C+lJDMښ-Aijз#4n*Y6JIJ(,`*a{jѕwP;Ymq٤0UOPwdO]cUHj/L%eWy GXEH}F ؓۏ۶fdF?;nMIJ1WGW+W/E^z|%v έI;}pUSdZ*ŬJK4F>D:S,uI^ni0kizcnoM(rkx 1t*J@(%V0쌤ds+eC۟#=)<01 bx=9lu;}c\"V)1xO3N]g %Eu;i9 >Er#!Ekk <{i\ߙ[S0ˋ(Mm U3HFl`*޳l/DMUן}&a6[ٖ j˾ͫwzpnedWg#P94VtpGeqH|phD Q(qf _XxZ"FcӇ(yx.+%EEL[xudxDlǦ#: k;-AR'&FHX< K]Mj0&p0:aaSq6 BqٸFReZ ;Ǎfj#v~I߿M&et& UWIV_4Se݇!hgҲ?zN߾ŷa2՝ǻc>*ITt2]# ~U|}| t@/tO C(P_vdE}1zGpCU&bX'd>Xka0zHiѿ,Q 5%\!aGƗdaTexa16Y¨vPRɠN֤Q/j Fɱ(9L%5G'S ϿL(Uϼc kcݛwZ Id7}vD:vW>:vs&D:$+GR;ˆeEEG&%cnT_JPmn(oJKP2J.M/O`36'%((S> ͶEAF)ȯug|nbԕT*#tSQ75XHfex0ӃĤ1I'"SL!,#*Sfkfl¢".aw\̘ WVw|4 H_3z-Z}-/OՄ{OI@WGhN.._w~#HOHؤe#jS[PTY^mr*6k) Eq` 5Gr4@gS*svsa79#7IA8RjZ鉉r謸,Hj2*k> /e1|ޢ%? :5HA3?:)4;gwa'WO]o zIXg^mIme)E\ӝ!f=KwnuP1%5=IQceF$^2Cʇ@GALJnl~Etc b=]6~:Ł3[7z J#((;E[e"wKO 82d[|ɈNP2U;Sa*%ݝVa(#;! G? SP_IZ'rםWkPFZu @ MEsi㞵5~i)b\+A]5֮5HOZٽv? &.ë"S`GE@cȜ/.$y*zpquUkOC* |&&|pc<ɠecǯ@tlEqMMr/ϯT0DV~Q䏌a^R"n#J_A5-/AX"vza!X.F$=Orgss׼B]˖[O{X뽽Ԏ/B9E, 8"rtpn+]. i:.]EZN x1Mwڷ8Ed3B>lKzvp:5!X<)_S KKi\r֛tW( zsrjs^>7&endstream endobj 322 0 obj << /Filter /FlateDecode /Length 6122 >> stream x\K$q7tO {ʶ79z% H hYCȞNM=z1=8"Hf50U]L>/Of߬OW: Y^\wZ_j86y{a3.U-'L턚ݬ^WnU{nF W_W夵ծvs=Bzsxn]^eVÃs0#!qk-4fqU뛿C&s)l.TmV[CZ`>s!j3~bgKQTn]45X˕5MkFS՚no8LHٶ'a份>_6װ%I]͵Avr\Ta%95>uq6 /ӎB 5A^~Wwkm>\S[53Q+ܣ_j=zf[+v*LW pa_PsTS&u} U4mWR8OgN4N lmT“sayٙ$wmTp-OU:VaզO{ʛ@O/J }C;ߛZp1Y 9n A#AiP^.d0R{l/H×=\VovánfΤ9`#*A'C{qd s5jAӪOÎtUȠXҸ|8Mֵ6ԥ=`4X+RCJh^=բT̋y$ SeZxB#*6[Ke~Ѭv}k5ÅnHڀIN]?޹]gqȾ{6X5s_%|R3 P58~Z>jgI״m4u8f9FX[ۣSؕv @] XpLP^),ן0WgҪҡ*x~#ſAdz^\avDnuaX]w5!C;%f.AcvDm@7p>#]f^Pz_rNѩ~]<Z Gd37><BF#1]lN5@Y,me><6W3PFæQG33,3JKN}Zc61u[eYBw6W;s L˗t-V':MU{ؘ+([?G.? 'ϚefS1dU~1(k >mӤ`ZYͰLZyj OkѦ@to%ES_GOa+!|H|h%'93ׇRl^7 c~ܢJ& тzxY"ݸ^ϻ7  IDI#`>1 DyT5B6 ݪƏxO]^pkPd 0a#s Ӭ Z4w TYGlDև#h+Y/30fQֺ# CzY ð<yxO)GKVݣ@D>vE ٔ]ট,}-ҖI֣+62߄.a2#sI>f5?r o5OEM1>^SXS4MĦ.v$<8)dn`"}gmDUm!6y@7mP: .X| }2[àLE)OP Ou*X ^rȩڮ/[CC(t؃8Ѱ p3,shy0gNZPѐS{b%4 BmOө{+;| &S=)(WK0a ﻻpXX}y[FS/I bXS$!R$0 bCd]8wxa! Q HXÒ↼MH|4y,"eШ)BI3?q!rBZpUCd\wNOle)&gm sZ)wCvv7'CH:Z՜1+F܇}[al<>1 AJWj"gky(X5}8,Mv>V ֑3NJ0`'q 2&j!~U\3Y0y@JyD;ce,=Kdi$2 3nv*B.2lq9pSaK5Ҙx05q9؄HM}GJCYwr/򚫞*U54uO&;g% zh X {E\Eb2 };br71.$gC>gIjHHs n UPVYC:<;e6|3̀_N`Zo2GhS*kke)\2ti^4qI2*ÂT j2HC󬶛q6g#uIO!)ƸCD`ungyǼ yeMPq6h]S+Gl5jpѸА'WM9 b|k^ୣy]B\+"{⍡F;eG)F{a!RΎA7]N#!*ƲÉ &(Z:@E#ur֍+spr[ϊ kTo4&5 1_}l_W_F9x/VKkoC)l2oS3(ݧDdg@GM68N9I :)l,a`1DCtɆjpWcLY*^af^VFl"HtHIhN񳟑,&"X\划hRX#K}y@Qdr`DK MT[ڸ2jB ͓P)ه.>YPKeh, [vkkK| LӃ9]O' |Oe68!xb 98}RdcjUR"lMa80bx%i] ø"*SC/oGb+V h_q0$(Ekc)R#iWoP͊0({K[Qg9s@ȼ}wws ||eEh_ 9|C-<7؍ {6SPJ~a-7>-#djepj-898˱a#iSϠdCG->%1?G%i=(],Sهd+Y- <- pa~Rq'HGjS}qޕ}H:tܰ|@D=15MU @/ZSdV<mOol>'`(\o#ք0.6WU}sFBl(̽p/rq0{)ϼ1"JpfLH\[5Kpx] p٬#-̊ 'y"!&_R/oL0p ꒉ_О.%^&es!Q ہ-T'r#}Ʃ^&! \aSǭS}ABC*2eGQsGy(xsrRj]t&94#274J\87x9̀NfBholY(oʫ}UY\vq*RÏ՗ot&^͑y.fE'zK.}׮6!>XxƞjL)vysB ԃ41>IM? fĉjϤ賧6X*y} H%e:TԍͿN6j 08g.Sn (7MLxVWh!gU_4< $!;z|I#.+f*@# uqiކ6|rE旪0X. _*쩵!8ToQޘದBVU\TZ ߀/|qav@K2Nn<2HĔ'G⍘˗oXpᲹxa+Xvn9gD1YjP+gCȎLd? '3w|s i~ryŵ9dwi 5}!Mb(c&BQPr,&JBX#k)]6d*Ԟ6HWDH< (-v:MM,@vD *\A3CPk=*wE9J<YWq?qKU0c9bFe>gH Fr{* Σ)4<9C7݈|ݩ'zPyCؚr*Q;!7`Rco8t̶+ЧirXQGA*Q'uaSB_d93yn@F=|'X /CLEo86߲ySj ^ aVÛ6BȊJ]*sIchL%4ê- ߮ i/-W D?<Z_* X;rIXGEWlgn|OJ ⵎ<U? vɼcQ>N ."B;‚uZZ,!vl*d^:xFL>J4y ؠ8c 3gЍ.IBNoc#LɡnJW!o&'Bt;y*Q_$0Z$鵸IXXQ>&ڤ+.[_[{],x>Q֡,>Sn}o:jshGg}TI5 LtB߷w.|skWu`ϽЉ zӳ+S@ʋhtm9s)_#NeybߓT|ס[-_t$o{A00@U*}ΧXp*UC?-]ͭһxT3"ς5ú݂MAH8o ]uĦr蜡<81i,NIJ>,A9t }sV X(9W$.vedb%Kp~D,utz倯sBH%Z"0bvƯ2̢Zg$F?tVeޜ4<:IևW|ϚBX4kpt$:+JB윬yo}+8}&$#|Kx C<|ƴ1/qRvkd^> stream x]An@ELEXx(JrNj>Yd[ztCգOϯ/rO?yY|>U2q|m~Je>^N?sGx6eַR]RʗyUY̎7ci3f%52d%%G𸝲F VZtMVRll]VѲd 'zVN"BB"BB"BBl<A$h0QW-pqͺfт gWё+gWW#+ _WΞcS]d#6.#cXnY\!ߠo&d4 ؄FQ(`2 lBFAM(h 6!ьg)`:<ȼ_7?w ^e-vVTsDendstream endobj 324 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4557 >> stream xX XWYBw F'n■Udk~dW6DFq .1$ꛉm!_B$D˼y8fUu9 FP [z]h.dS/M\ҋJ [{rT#a d;f%X]zAgc0h(M;C )SٮhׅS]WFBvz롑bWN]k]7x/Ytݚ ^|}bf򂐅v/ ߻lH]>z;}8e괗+sŬaF3^f-3f3Ff,d`1o2ƓYLc1˙ fy fla=3# 82.!ZGƊg;탖̰kjwVU[M̃Y d3ࢵw9 vBh^zLC5E3uB#Pi!8U*:ufG%*p*≅ɠE8HEVi`HeMI:,7N&_B+^{3[8{׳n98C8u̼OfsOz;ݴf5~O5Kv,"2qyo'ͪ9*= fhA/m0mRj;2̜18ǎ)8 g~ &W3 G4pb \E`țl`b-iQnRZ;4Ei*ēDi/ mWn{{ՐŦL"吾O²Ld,%zH7nDOdE "hf6wQ3Ƒ>p S͑ۅ,IRTf. 3 i12n^.~@D#S I)~I@Fݾ$*ɜgτ{"6* .Re Dž%g/A18_Lˮhc5kAHQk䝒5iu!AkrqAo:g„nVpw[ܥ[W>kbږ2I&u>!FWBL )&NeMOPMkg>9딵2ڶg,=h%+7⹙}m'W^x'嫟o;ÐwEHfEꤛM7"9:XԉV8tt?d$VZZx L ZFH, iF~zIV%%8 &D^ !^gqq}N) !۔sgߝ 2ÀtO]7A1Q1}bsަ'"G uы1.~HLF|zbn8$%:FKһ雮8wxVtFB8QfNOHa>O2H*RUA>JielJex8$JB(0X>q&I3uuU5ե% sNwylq;K^&"J"o"ԥP&M8X3eftz"SC-rw4'SpBY˖RLFq 8TSa~ĢYG~|졄Y@TM?io8` EӪ+LRvzO?_%m7Q /kaJJ ml8CUxʚm.7gK\ , 8Yb\8uJ-fEQ %?Q.@LJJbjL#dȔ  WM=-s $C,2Mi5șK#}xnq$]汌l+PqlP4R]JɟBoIBBeT'u͘Fcs8]ƣ6s\B-%/~qZ-rYrF1m;E F@c!iPIX}zĐWE{$k92uA=]PL$k Vr 2X!}L$<:+yړdԸĕt|$nbOuc\  ebk .T^/:Ñ־ $HMIXC'.WD,τʚ{[ϋke}e,'?xK'E;:kHԜTPRwB9 :b@aYG)95~-=r'ᄱ wIoxl-dl_WBk{ d>t9Fjh*tZ}Dt39]%AqT?Ucmozn.U8K:,_X ! ނ4mVl#U촂\/m! buaEF)%.XԉJJsf+xlf>wn%xn) %N@4޸"wI텖[~>r2V3r?L9‚ЗxzpB7G;4EcRbR7"Ba%$|x#=.a`.qG}FV *5?q8=ue{jIgo0I4U>u+؀C83}f Em8ƞ,Js^bUמK!$ܐ;!iݩܵC_\gnzE6r U/Γ>p"v(=9p '>?7/R{ {ƞ_r891:YAƩ$-˧~ʅ/-+ơÉuRI' mp ZdKAT&46Qnt,ɿ"GU5}U2zhV#gl?:v\$V, < |iJx 6Ί-TYxy;㈚L Cq%lefH Ni.PcobMiwgc%F}iUcǮ dCR[nD*R$#l+R."J[>greDԲѤx:YWpR2a =D$x;0RsrWS9Oo7hq"I+'o,m(=T!9מQ \GE6.kEL"⩱G;fsƞiBO}#Z쩭A85 1HyTo}Mܫ\ wou"^sh:pTɹ-YBK-x'o ԋ{wcS#S)L8.\PnX^#Zr jk ||]H?cR#v,Pi$-`+S3¢p0V kTZf0 NOKOO3egݳi8Ĕnɰ0 endstream endobj 325 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1105 >> stream xmQmLSg~++D)FE :u 0ÖR aT`E'in6 u|eY'C# %q5 "1M,f>*W &=4ݻ6uSJ*%mrjX!lX&Ң8FziP0"}ʐzKVINY惞INF! 5zM׽BMXfMzƋa8 '5!a$'"Kuˣ2S V)7d0sM6J >pY vr~7r++;Ah7 ݗ 7v1B?>T[L HarsNC8{d7^=08kOPnEcR-;lW@f^Z\|K⯇GgrOTT\&ͭ e6읛g+6Ĉb #`9UoC%KFsw N2hW7=1S;:B-B^qޗ{Fn(IW^-FTٮZw$z[({_Sh2J)wT;`?(*_ݶzK{j@~4PCDO<=!P3]`4KB:q ;h\܈?[3268ԉCrLDۏw1X^#+p =l ]`XE8qnӉLwXK4tWUޟ\8B aM ɐ25VVFDXntr ;hÙ-0ה-I T:K> stream x]mLSw^Z:nؖMeɆS0[ FP)ҒBOK#/lY331 #:Yl~3#sqn1nQr&Ɨ$'9χw( Ei3 -VV~imG,6Rҳ )W ^iq*dKfdZ 괹ǿkIxp QRACp6ُǛ6o.2S}_ΗBl|y~Ж|7:yl,;~Mou&^z-ёDO:drPs YO7sbmY'1yp&a2Ahl;6&$8DWgEzZW  BHeworkNI^\?9yycQ#ndϻO]ddo}toSsA-@(n߭ޙȠ!Yy4[9,"]p9c,[Y-%:G/qG_ޅA`gNCsݬ?Cp8w9t E +SKb6/4z,zvK/'E8͎{?vvY?8TKj1?PeyG040J˙\FZNuf< #Zih8F77s8endstream endobj 327 0 obj << /Filter /FlateDecode /Length 456 >> stream x]n@D{~@G85N"A(h0ErHNbjwOswx~.cꭝuck{tvZ߆9<ڲw}xݛT߷aR9si:S{e~vbHb.e٩XX)}*,$Q}$[E[iO\SJ'mW,NX,ڣl6h3 3 3 oY;gs=zOŢ=bb x4 FcQ(p < b=Rׇ p}Bׇ 0AS28(08(08(Ţ9  % €! \0 pa`.  …CA0`0 P,ځv&l@Y7^oǾwܮZ Q'endstream endobj 328 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5117 >> stream xX TSW!Dվ0{]ƪZ*uD d*=? @XdX R:umlmZ]w:mg||<=[OD D"5k#}#pWƘ?- c,ŀuW3-_l/}:[>`=ǍvHEGPbhSPsdԮ89f͝1\iL'W_ذ'߈N3tCd"0ird_@")2iC w'unSf _MQ"GFEĺĭ/!7o΀` a)/L}Y̟^ Nee=)ꜜ+Yzh\ 蛠JdXW+ER2HiZ:{@4B0XJX)AV覥p xrW| :J{f7xA hxm64GǙ6Lر25cʴGrUiM;K$$4ZLTmr}m}E gQI 4"5։ڎ#bAFIPbS|}ȾeZ8m:SEoP4NU) CX ]V]/oS>y$c.$˘܉:!霪DSL*dQt2d *.˯Ge$rTH=zpeI7jp 0P\)GK͉0cњDF\5)VUjAx<5OU114Y܊;Ʊ/\!{<5ʞIa:CWz%*N&Syk~$ !I8 T̫3P9:y K Rx8]2xڏ͹yjO|{;Qo>hHl~@btϞ !nrF^rq?K2N_($KMKЩb] *a* 4)'WVcKKO^՝̯(ȈG^!-ܕ@+asP3Mͻ3p֊_ZQT]]90WJ{ s \0'Lܷ9{|M9^~T. 'fr5$x8˷*IC8ězK]L@?ܳokk8%Z" d; uYvJ27g۲ŏ:R`0o4#=nal*y:HRhVhմ&F %fU |cDYu{-$s;IT?[E,f^-Ajud0Ðʠ] +?pk>vGǮu&}y5 KQDR^YPQ\fLw+nE䪲ra,J/,+q0u6ǖPsQ]mo='G} uUNY{;IZ-!lSByNY[}#a$aP/ƔwYETܟ}WZ4pk 0eM̺K9e^c<+>Hz0'.D+]3P g_4q0G:g aHQW\_kZ` D+TQvw5Ԉ I*] ޗ b=Ǜu8Uj@%Yae·Җ؆ؖbKH.Bݩ ?\@s.8PJT-;NxV̭-0_ INX铴kM\']{ҳ5l.mWIYM SgMXܻ W~ȧ* T@{˖`B{ (L׉r6S=D2i"*#msnd~sk!Y~ s%MD%8a9ZXӌi&Ȭf\NdY x/k?Nz,T4jkKtMPdry%fy2d 0 :̰X/y:8@zv!ؤ,;Cˡt >^5*qf[TtrWi腽>3R^f1|dv`cO 0v0mCahg1lJhIXG>xDF'?.KG,?)"~^|)5YlBb7d;S2EfT:UQM|hOժ0LN3,mJOojkJlxH<˟\}fF!FLQq6BA9Fng^ҙjө M֞Ne^U/h['uZK'D(F:;v'EEEUG+*ɜg./r[אՏ̑Пw:0Gmm KuݵCe,8Kq8У۝UqٙWE)6(5pZ%I_"JKcc"~$o3A͇OpQu'>iXXia+s߂wU77p< ,YCF$dq 0{ ꙥ`1y9v,w0z!NF.[ 2CpyA&zUL.w> stream xU PSg1ps,1ޛQ U؊""Db$G@ g@J, l+ڒhTSZN{ٽ:A!W+ǽ!߸jFyІsؑ)8Ŏx*ay/ql ¨Lj#:NEPsj%HJQ5T0 Q)wjAmGPDeSgE*E&4ZY@gyGAdV_f%rKK˿'Ι8,v`C-S'Ygmm>8rk8V:h)<YjPl:b6~—h: Zz^SJKRTD!27w$2p&b^ 2GlA6o{@/~K"3Y?g&>FWG AOnŔ_?^H'^ ZȎkmOX%OG s4Qz6u4N `¬'nr^Y4sB9~|&\amJ]"̫TiSb]ꐣ6Q 1R!+,4 9 lmCO&žԷ.lM5t3:,_|Q"uT#wCK#gHAJS'?׸2!+~)5yܑTK2 6|j50 8?:L ,cc$'>o]>edPjk+i@duZH$IS5DO8gHa]#dIpOslrngYdD ;Iҷ{X]%fhzdQߠVpMHK(^Czh p2}i,".oIMM42 OcDA`|?Y֦mlۛ` x`|W,PԪR8Cfn2$Ct@P`3 ~E)zxsAB"HKA6g>ȩ1TZ(6}?0X2q5uE&cYrSI7  &"YxToDX==gD?^*j֜='Δ(Ec{~Y~]x4,w>jlH} LmL!ut:~d$hZjFnX,--$bD@OL6&3q=s]&0n97&>d3x͸UN?nq0߸q\?,pJ\vLlh)qʗh}yh)p1\9owUZF?W 4?w la VμdFz>{m.X%ڱQ=*aD{ĩڬdH cZ_z,7Byޏ='s2Ⱥ_9 @Y_C' 3?5XfȄ`` em-7|}oƦ+x?1/@U 9q=JEb;MHJP)ړ:;Qz :9GXnեr XrL(iF}CkeCu)WTԃʧ=WRv*{؟Jr [ QÎULxTNDr*_+ ٕ *7U@ӴzaC&"nLET^+ s] 3H0*J-TyB`N9"A ۷?o/Mݜ]WZZTTƢ^SQ]fwl s f = 5ٚO=ҲJf4/_X1a>+%<e*=_Ymݕ\WEE%ƃEnn-Eť%EJ+ܞendstream endobj 330 0 obj << /Filter /FlateDecode /Length 336 >> stream x]An0E`L!MɢU DȢ颋?yS//jQ7y1_n<{۽KizEWlg U lq?*lqMj-6޶J'`|Rhw89T q.dh lFo0gT ?NldIh* LǪq2 R ALT@AA-[z2 R lFuh<'Lu|XLYFl*ʬaendstream endobj 331 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4073 >> stream xW tS}B>@+ RBPH݀11%y%klk>f[^$onc f 65M2MBCLt_89:GO 2ļ~֋&db)V Y|~wͤY+fgk G6ݜ:#ˣdkGayNϪy)?'NK.ğK3+%_,3^aQb)~OW\(M,Mܖ}[W'oھys~>-s; 3 s227 f!Id,EƜ<~A2jxB%ۉbXE" vo/o5b#1e!x,rHg~?!=tId 5IeT{[|.U*i궈n1u @O ;W-tD3г43I-rm.OCZnogry</dٯ|']Ty1Po:*0ZL*{= l6 HŃhYF;Q*_.)W EI3Ӂd&UVdjYfE'xzm@-OS @ Э1v4>YKșɫr0i>{.*Hˠ(C}^gr禿vMS''T_e+~ #bjd/J&e]GGS)\4vRzx`XL*oNDnA::v\p>sg#`Wa*4T6d8U ULQrr@gq IeIgGcu5`R[&bluƃ!^Z {pz|vR+Q1kJPjᬁr8\fmuN@z,"U^IΓl]JG/ /Wxz! ˺GOfGyQ3*eNOE5IB#362rfrĩR,A7 GB~D'2 q0JC܈n+r w}lԙ)AjQ>]=!WwA\[fh,6HKADdmk[x)*vosNijc{dCfKbM.Hoe 8CMOmeRa~q顋}\vʟۗ=Wcv!֑q[c |<-F'*+f!,)XaGm:N7HU;.p MAsk ڮ5@Cy:t=pm+}mU?51-G }RrRC \m&hЅJB޾WW_|5a^2n>!m6 vuA?NX'X6x5`(ޖo`gO:YI7@巤LMZ_ԶdQ̳ O8]+8$LAf-ȝ'BÇ3@[T7-͑}cD8ĉu#@@g@@B5=fI Lf3Fm[bݓqR ~mIpT#۽ n;Vbt4;Xt 1 ]jѱ$X`OP5g]~kgYz$.lǛ.u8@ y([ y~>j/o7s0/A&-*#mEX۾jnDK:ܛ!->ހ<9䯗+Z$zh Ÿh|5B)=}PK~,g!сgꩮa4G?78iwTs4ki O7y$U @L?CWbG7[]6'1K>HyY7q\3nrZLQß72X=_+:UCIY<+_ohƕkHqnf^(.-q%9;'FДwcX`3ƐL~#o>?8Fw&q+%%Y9<$&lLG jg叀ƭn)c*$F5e}ja$h'  IBi: {f!#JjZ5Z[f=IeA97HwKc_<~gxk?2%mߜ 'wVHMOɥ,SOqsiS^]bXFއWo]6GbRU0&^ FS믯ntSV%6mg8p;5-jb䒻J)+Nǧt:D1C%*. }qx;C<]QF( 3_a6 Z /NQhnP ~g}#46 @Jp 7{ 0+Wk"Ȏ8刞"/0l "13/4~2xXa %Ҧњ|*MJ*]H"rn2c~5pc\>ibdq؝2,98mrT(G hB\zhfHS7o+JgkzmAPP]i902wilzZ6uSкu-eATEoy! `@s"pwpjૐK "xm?h;Ctxߩ[\Fndg&+_1d\ P _> stream xUytulTf"" +"Ӻh w&i4MLIImR* ( * Se ;]w7{38L$۔*Yʭ(_,"W1sHnQ잸x%'$>3;&CR<$%gPǭ2)nE m9yVQZ\xG-O rT[U ۰bBY1x^ [¶bX&-Ş`k &Y-X)㞈;)t ⾛VQʬJ.K v'" @u訓(e⿽~1ewj69Nikr={B Yy! !o wť>DaW4[XGmpw+DH~/rHxVч_ž];| gFTb=\И9I;FpME<ͷ) nia5!2H?zph xӨh1EQI)Acwi'sIQjՑ.' Oq}hn+fԂS8u1~pay0~z7Fb/\~=yg^eQ`bPt@„tq8dW:,$p}a^kGQ4Mو3&m)q~|:!DάDq᏾ +y_ n J43NӷގG$Q]JYY۪'"k%谚hBT^^>F ȁ>uMJȎry᧬~6ɩ7Uk׃z` g|3i_ ? }^,,HwϔNv,)FfpR:;k5~!/~4cS-K12 ` OCLhOȝIҵb/c Rޣ̇8$,/jt3|> m" PmG.C{JjyD> }zZaafLb EI0T6Dy7j4M6]`%BCxpՊa Zcak^ݠTs*.xZa6i'C'FvyX7Qn nrӫt%R(~óGO-/埇Et{?JƓŞkniAĽ/!f'dʓfAҜneX?i IIe< IQendstream endobj 333 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 404 >> stream xcd`ab`ddM,M)64 JM/I,f!CgfVY~'Y3u0w%V{Fʢ#c]] iTध_^竧_TSHJHISOSIP v Vp N4L$1v1032_͌?ί讕|^L[f/U'y;a|]3#mӚ;T};>?/CWQxRi}'|總@-W=OmW|S7O)|B\帘Ey8yM;gb_}}Oӑ'գendstream endobj 334 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1268 >> stream xeLSWkJ?\,&l@~8XR0~XBiDFpX8sBܦ33eQ-#-rsϹBb(J)5@_K0+3Vu_(h ܔa^BSH _](yQ-a8?E4EWFQq}yWℸ\Rݠ5= ڷB1_]Tmӛjj [ٙk222Ҳ3 !Yqa%;tImFZV 9Z$FEbQԧ(JQhK+(TCdMϛ|\}lRae$'wգr,HL7-]9 aXaěN9Sp(hǶ4r::LWo%tF$$ mMH.> stream x]yPSW_yy"*kF}NE- .uC!P"d_R1J[qå8NXu}̵cO3f|;G<ObZ0ڨm간;;Na!i8oS03X\=`o5t>>ZH)s4!! )DL$dCz$l ?/M&@,>XF=?P$g;5Yx&<"0Xa t5ԻCB!݅QŒ-O@X?zgo3)]nEK$jҠ߽!=ސ Dsx⥞~|z r>,c;7@:SwZYvx͝89cGY 2VNːTQ@ćj _6,-v;e}}P{RKY=U eG⊷L]}kvʷuY%gΆԎPXVt7E_*uU9}ާQWr%"ħ/QHja8YX2Oڙ#Eb1ONvBFK"gr6,:BCA"y$cѾ< ;jq^p~Ȁ| aQs_a Sc ,w2($\Ǵ]ȴXfuaf"H/7e  9\І&oPrEw2Lyڣ4Eͳ#V(rC5z_jA!\:(# _9$X/[ϵ\2h7T6e틁 jKu?l:; Tո1txFt:yVxB%!PئZ][uhw߹gߪ:”+a~$a?9 j8xpi 9g4Pj2G sܻ`Hg苘.K !g@[!!\^E3QȁԨa;EOlJiN.Ǵn/6 5׭efvM,0I.ERtvX/6Є2_?%)x:4udoyJbXp7b 7hgɮ8X=qa<2hﮜv_MKd nj;W؀0x~听^WɈZr!|MAh2e>xmvє7mchJd#BkMKƲ3p ꖙƟ$иv4;?XIE"1!A+|XDˠBULD鹬r$C+l|R9ȌI]Xv~ ؏5\cN~}\c:g=`? Vendstream endobj 336 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 560 >> stream xUoHqnK<{QL",KjsӍN-{q{;+_E"z*AAozq[zy(d4 lWp&.|vduU+4Jn!lA4E R=df:1x):81O u˕r[^՜(XvL7?ωEu߰]ZRkhUEnj֡UKO@űg]aB)ǧ]Bژ,sL-sXs-)DfgqP`ۼ!˒TX7wXV-VPPd0BZ endstream endobj 337 0 obj << /Filter /FlateDecode /Length 170 >> stream x]; D{N 0vDhE(0,qۇ"Ŭ4:=S/XVt"-Vݕ nҿ?h .`Tx-)aRA8FVm-LfOVd;QlSG0_LU[c~M"_Wendstream endobj 338 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 679 >> stream xuQ]HSaNS1P;ӓuV.EN1W;{3N]_(BAXP7 J(]I$~ǾS6,z/x}($AE_-/vbr* V+|')52`29%]֠7J!O'Ix8)FGx8yl1칼99QԲ(-n۝-ٲ\-Y̜ڍl*bK*TTe7?F#lY Fb24sDF+ׁE$G]hē15lcbo9)ߔW0{`-]-ň" Z66 (=rՕAJT$!R7oq H™+6eSA, %a^7J]|it \@?5@Ldh%UXqWHFG36^@*Mb_SS7ӧJȱM-B',SW4N!~V͉;.*'h >/C9U~x =2c:aO}ψWU{A!U`(:S% @yendstream endobj 339 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 407 >> stream xcd`ab`ddM,,IL6 JM/I,f!CG_VY~'Y3W0v0w,=_{LfFJʢ#c]] iTध_^竧_TSHJHISOSIP v Vp NAb&FFk3y24ի۝tGw5Jg/O=9jJMw /۞H.zzբ+fn;gnZr6tGuU%tugtY7qβ jfńJwr|e ~8O6{\XBy8yW͚3gr?φYzzzL: endstream endobj 340 0 obj << /Filter /FlateDecode /Length 6986 >> stream x\[sFv~_XoFN*UuvԺRVj<@!9\ȟ$ns.@w EyK"w.ݿ^6l_fw\]z!7U)nV\OĥθUWoZmt6oV?WZ]5ucM[2~7WZkdjS}qJWWM#T^s2نLVۼ-pufkouů0O/mm]M;^'99Tm2nV0*Nȧ`&yUz\[\f9}jROL~SC|<3Fm]uL_l+EXNZ ĝ{]qcdS8+c\j7FUaB,C?9TUП :y]|Z1yB6o|'f(k']]˖kI] )QnT}i>45h ϟV:b[F8mi~ bCy;Q3a9+{wA-ɫRE qpC#5qំpZs_җ>۫Y?\Kqڪ0os2)B` C]"&[,sJ F2>~s _!ֻ:<$49oS.یNIQyT(1J0`m%4LK ZC ]uBdˍ52uR+ }چ; ozcs+Iv<@SܤܴpmXJ{&'3U䙪Cli)yIɛ9kBf #=u-3pq"Q*u~P# %m!T `!9nL@Y h|3,IcFcF"g ='4aIю- )" `WHZH4" Z7)dSC3ۀ ͕2JBK00vowvC݉/1_M>o'mfYx=Ff5Ι2_[e]b+ Z8 D^˧xnvLW]6>n.,ZǸDyKC Ac0HPaB4'˴zΩ &G$aX+  \;fu ͻ\IIbeGzu5ˁ1x'med/X7J1mՅh utw)l~OfO17ℬ|:ëWhyKfm­a뉕3q{ѳB_s:#%Lg;`9cTS5ŷwr^G p*)"sHka.9R۰ C tsâtvKh5/$o"_L+;B'Z9 Y8Q  ئ%!rraKA5ioµ?z5w G.3 :%%Z9lY09< OZ|–47Nɹpb^^-]d{ضw}Jm5 p2`4TynsH3@sj-G֌pT*4CGOD= DBғak=1;K!M-23Ddc (6#"ݡtR# w1W :{>ǠGA mt 7sʀU6bPxXA[@"l.qpKz(%r8q{ہ={ϹE?]:vUsk2a/EfY{T39kЄON200DPp &$$?,Z JxT:>nd L/zP iok~Qߠ@1\Er`wCUpF ZRA%NF$"Nև~szVeWBrBHΪєYv0cS(,DoӚ\|q"a +F5;%3kN c^1xDDIpiYCV]\v}^Ļ:39fzLY#dH GڋzV&澪Ozl3?@gQ DVL%~C?(TQȍh<-$wO>oWCne3.ćLDr\I98 l/ ᅩbF/30 CuI}| T}-%ϰxs 3KsRk=U\2rX"_K_ةE % C(Ъ@?˺nm4KUJN b,vP)p”O0I/ i\Ftd)O>7oO@r!3j1| &i efp8, o힪c9)3&86,P8\n=זr6K#X1f <#NfS{Ex}Xsx3kEjE{ˡFx6{rP Lq_g6#'\饟Xć}hCQc=dܞpQ`nZQC]$3ɒ3^'i&Փx(D#=9e5[9M1^G/ 8 `Uix,9-@,0+E1W!ـ\Ry-oi &-$-#!D9☧Ás I<")x؞HmPg 犕 (cwP/ph\)-Tkj1VfJ&e|nMj hD \VnLIUOa6't 8M]hʫdL1v Ĵ~~ٕN:-$Foc{@ZY N.8L x>Fj`]Y,Q]m @2u}iQ@S% rM`> ׇezz[S. 1425Q/]I?n2e4s'HKy RP&9Kڀ57>ؤ}ROy#qה9܌G*3y1̕a($MKJ73-Ac3)ekO+p96IC%hHOHND%|Fʹπq o?Q9ȌXzEIJ&Hs뱨naѿ̱.S>RTKbzzl73uڃmj3^,|HP%'fgЪF WM7I2KGcY-U`qUjN!)=VPo?)uiT[qJwYb bӶs?,)@sxXOS%|i:Gǘhſ7t sl[W[gOun Ӂ:7Na!(}b>0 gn)̨1'jrS39K~:OiRMCwr mF:",\ndTg6|G:AaRw_xS\`B1g3gru@3(q a8aD '25ïJ)jD!W%{. y bp f?Ec.1Ƿ ShDqwv֭Um<6SOlP \1vD(oA؇ E^d1b+c`j@)R?nlteIX:+?C}IJ}eM,m`0ܤ0eջHǽh*ӫxXז:s$G6D$[ylVcr srb)OYr9/@Z꧳/_ZXfV@.]֚*.FN@Wgf>ӢڠOӘ2sm-Z:  I.>U`IfFu ^۪ɓVl]YiGk&eXBp AhL b0t+F8zkI63H1 Nnyb=?6VoGA|wz(ǜ^ 6!?L-*o^Ń(&=.ʁ&\ĿʕD `b)>/3O*hZhF/[OTwgGrzj6gL񗰖t 2ĸǚzdGcj3Ƶ@ vv9K$m܈a 4] =J!D]7r<%*c*-O+k\n\FI{7OB@z8&>\ +:oi39ąI(-a| 52-iC< ~[K8@,I텎R>y`#pX:>2Ǹ/rȾ-FR"p**Tôꛮ7:ԁ@.Vb& Q66_w>yTB/Ftܶ 6`}=by{8H"_.jmPendstream endobj 341 0 obj << /Filter /FlateDecode /Length 5762 >> stream x\[oFv~S=} ;Iօbxص t7e6{4_s"zðɪbw.n4x>t[w7_U)Y=oKS2wǛnZ-f:nwc U&K3XS 0gC9uY\Jۂ `iJkPNbԠX&&ex&A4A"SL(aj R[UFٚ\/0`GiV;s۳[je箳Hz>F'PA];9H_g9g e ^>eNl>~ GsᵅE)ʼnL8#m;6'n!Җg$Mo6ж*RvKr2(Uʔy*TlL~Qdkꎚ[hi*Y%-vvDPr; :n_ z0K|ԀL8 3&7it g4νcɸo//?sy5Di$}/йoO䢐2]A p}"_ЎB]'6V=`K meW,lۢ!+@_a ,O qZx)Ûs#\\}D  #$U$ӓGVl@4 8,D,@A?AӸqu5bFY"b%ˇ7ۧ~aAId6P:wYjMx0<} ]8UL9@ m2HD{YqP `7|ӖKx"ְ^g9%zJFJ(*A2s‡_Q'2#3)Ġhxk4"\Uq.S-{d8 k#cC\tNS B{m6P"oX9ˋ Z1CEp閁(5 psĉ+ꗦbkwg6E 5\`gZNA;F k ~@G$fdTln*t yŒ9fޖw D%iH_cLX_LDp.=$r4d8e\`i<_IO m.Ӏ+Sf^}f?#a;G͔(b M^hS-`?gj-~GG^,.X4oN qL c^y%Q-zJeY#Q͌\5&~{.fVaY͆H{ "N#{Ɋ5Zc j~f(ؔ,)/oHg8s>ujpo= cC}i3O`VS^ݹw4t:C2t8ܶ.icfpRiuTh;É qx Zp>p5֣!+]YFCFw7<3nD) U:.CuWqg1nm/JQU/gW"fWXe"Q,IC.e ?kǕ0w VeѾq6e̡9WQf{DˢPi9"] f!JyqZAaL{w h)~]V "atF %*Ƹd{Ċ)\ͱ%M HlR9ls'`X@Ku|ٛeM4쪀] (yLjuY:5Y9NYu7)J{Eu e?fݳ 4l9e R\rBqdIb \LX7jG-QEgjN?s@}!%fjHon>}^q:,,tOs\I.ד>#ys˥G`=ZDL)w6vDDo!gp@&hәLOC+a6S~z^7j>^ƽH]ϊV.;yLSߎOxf7o$ďn.hSnE+}PxJ؄u+:ǖ=A*Z֒k]6? ?weBS~sLЊsV'VsZD|J1ч[p^TgًCql!")TPĜ-(rSeK(R]g \P{>V%-r昱b }Ex@l:TtXZc6ݓgT!v}XZXMIeb3/bcxY̮ƨHa zIrߓ9eFTآ{Hi>m`Æ# 2mN5/MF'TadQG IoYgoD؄WĽDn3gVwohre~) .)rBwnI Ir}1.bzU5@&~̛6+ jP hi@5 {1/=rJFC/?:S%e扻0KMWK' \;~6OH T$4w2"uh|V<2R̹?}Q͚\Pp=kiB"jN=HnP1&]Q5ڵ Fa've3_p&bRg<.+^Yc ٕ@'qQ)NRW˃!\db1* :E>ioWr rHu6_t B3`+F-)(1~p*ߑG-msh\j;Gs@E  W>l]:΅j>E~%YS&UiUM1Q٢[P3*Q^n>TJ'e9t$ y@()g=iyqaWrb]Z v~~QQq&Z5+Nã~)͝DU,SL Oti{Kr֋i)X El;1e8O^h u.tz<~Gw'7۟Ux֦$k^=9sfsnjTً=[s<6s3Ck>+3MI=4[#NaVz$ı >p{9"vEI EU| wcG﫿[}L]! ~>jZ/ N5ik4Ͳ!koGV>q15c{;4S&G *Ǎ FxC`eAX֐ \ìu6Т2ȣ8Kxe v'*Jf^Z 9=%$5GY k|?.da*I,D48UI pLwg\F!Lv=O=C:;?p'.H?-kVLf[uqߩz:ƹCzE`^U!KthJ˓{b-KU5U ?w3⾹t7P%k?pG{T}GLUDXdM֗'?mQ_>qk1Fq ㌿q.DF%#OYMUKqmDGgf$׏MF9bendstream endobj 342 0 obj << /Filter /FlateDecode /Length 3443 >> stream xZݏ) (*N,&C:[;_"$މDuoCgfw]J&pp{7{7K>KJgW8ݝV7W!Sp'1᳛+  r'F-n.$F8OT˜m7t* Շ.BJ fOM@[xQ"H$e(ȳ$Hubf~ %dR'E![ײ/?/C,J}s-%)sO ʾnç#$\<ᅘ-L2r{Sʾ)˂6eӪrz_U[_4]/qﯳǔnrFi(4* QoEOx@k, X_||M{lǾZdžy/w%,^U+wwժ˹JS/ 6we_^;ɋkC<Q/8^~){I,We^U{0rbI%) . %252H\{ )Hȉ?e;@@xra2S^H|W\TqўjSo!bz2S!h0LLLEb52D<2??4у"",%Չ g#CDP|QD'`N|=䇵ћibj(|;~p ?nΓ!'vTeW*0A2ςN\< Ҽ~TrhCu=U6Qxɱ 7Ho>wXqvWEqv8*`PuUU+8z|LrQpopB빹 jmKAl=F&-b*|Ac;A nBl{T(M{3{2DɁhTA=bLd+ŐauhKJ]@!#d^e$Xxh1h@px E !bVn+P y)mz鬟)rG!A2!$j*~C+VӒ4ZVXA"-i G,I,-O#Jg&|kĜA6RԻ+tm☋16u6q {-A4x} +1{| ,Qu-Gwx*!΅sFF<QmޔMlKg%c.'+LmTOϻj(SI茂޿m#2k9r!UQ{bN-.V}+_cvӮ=.0vE\LU([+kj}/e@_'(mbAkӴi16*]јq0ڧZelJ?v/6ڋ$`j&*owJ  !o&i =9WjH嫆?ꓠ{ rmcPmq[,2S.v`k;嶎c 5Rr F(4ݤbU7Pij j6* ]`> EDv˶f:= u`Lr*+FΡ/n+=.F&\jDJf;s'9.!$'_mgW6PPia_ GV)p2*Li*®qq)L+2>v$k<A/oo]{ HEO90f؛n;ባ~`3_d}gЬ*ii 0/]HsTl="0TAR'9H3uUxN; ojT3J CI9=KTXZP{g>Ѵ;\nF,9~Nͱc9Vhdt"Nƻ&MŽ31ey}y 6:k{l;ukk%P0r|6ΐz? E< ;;˯ qh;[*>/%d'aCG .[ݒ 5OK!Ŋcж$|\׿%"8,ڀ k9 0M5!Lk!!yX$ v;;>X4XruidGl$|t _.Ijk9ao S':C'UW^;o P?Eh2uMuP|jԛsP@kˀh1XSϠQ#HP * BS#dzTdʡ-Ò j-{vgd]PqY (]& 4! ܵNˋ8 dn?rOv7u = C-,nW5sI3I@uçI_6(#>Ay RwqhrJG`hꐡY>2v7l&0r^B&HnGCzm "2WQ'V4$eGM0W@%>zPYpsal1 (2qkg#E = ҔN? Fd!8d$#O- {3-Xag6k;@pXo?{4utP*vހ98q|gٗN(*k@CL Z]통mvt-Chl,0)}R?TfBsi͇/,Z Em! ~ӷrמ<@~b} jҸ%LXq#*S(Y:k[8sM#-=UI$6Yk0]ھ; XJIC|Qy?۝_Gt&c%=PC|mxeCKp"`zQ㉓t=F1{>u^jdnuV(?C~as^]h- X7OWTRb2>N%唰a#YWLz8LOLle ;м[갢&+_lXAgACjGu,Kk`Gr7M's^ƀmIx %?FI hCsČce*EBo"@#!5 m?{hAۜ7)rP Oowyaخgt p hXylwg@φ1ئ*׷̞ h6>]endstream endobj 343 0 obj << /Filter /FlateDecode /Length 51389 >> stream xKϷ[rEH BBB n~z(9HRN۱;c]|&  0ALUUWZOp{U֟}MkOٗ,_hgc}ѷ䯥zc~e}w?Gu洿ÿgm~?Oz;;k?+qxEeQǹїr5?*};_SH㻟VǿOkK?_?eozKw~/~Ͽ/~_^+|_?'%x ? h煿|~ym&lc״!M^2{[_[m~zV_}oߔ;Kk܄Nǔ%/~znn_W&cvk{$%3;?a[n~|N[_s,ۯ_g_Y׻q ?8Ͽ2U)k_})5UR(0r^~ZgK\c3ҏ*:R>hۦ#H5\~m_lRvX|׾[xsأ#|>+,~F:[M =|>כ&4k|d}n>~ϮǞcN>}yγ32? >TOJx*#?Oe3%>?kUxw9OC,\|gzTIq^021Gû;O>iw|GIag.|M@f?2 +| FdB3is124YY56j+Yj&,k>_Xc_CY-S권3Y$3O[L܉ <|lءl6>+` %Fܶbom`Ҙngak7so^-HV\ʢhrӋ֌jaHt[>h]jo -%%qGEt9qFrS`]-usx[֭sc'vm69`%=L?o/ianEXe:Țsӥx,\Օzls4ynau;2|9sq9,m$F7693OF50ȹ:74,p_0Ș;yk0 AƤ>{LK/(z~3:IYO^gva]e.`x>o@+/H.||L2&eb곏n` L>xpFZT]g.PgUĔQUaUcJ BgDjw֑-p({u{VYHgN&!r:Μ熥zt3A{ꀬk䏳&yP\O<밉}#Ow><Ͽ[^O V+J+@:>wp0L<\ u b!ڈq<]U /- ءq:&~}QCx*{$G:wVn^^D$<,]q_ǝw泞z h>$mY+{59$x Vxɹ)Τ`saLWsL{31>0J?7131-#v#31_; y3BY b;1?zI2T0V;z|nFZ%f sC]0AiXsp)\ѥ' Eϼ&[BܸxC-DJ=ʜ= `]CWhuL&#R;BSBԾ*7.N3b*ZuWemB;EN&kW؟h(Sչ 5wC4 3dDxg,qS%bp.̥ B*2)\0"t9H.hd&\;qneɊZ]$Guf **~젲;TT}[&HONY!Tql 4K!\5#Ϥq !=9'S)dY A/8t99 B+C.6 `:ȏUUV@!]؍CrUל MCqÁrgɁ)i(1!G ,ΑAǒ8TB\LآS'yYuAsf3be-`1Q4p1z,Y1~-6O r_^]־h#~zPT<ދ7Íf !~4$` ~“oL"y(# lpc,8p VObH1mBt08vFĔm r8* !ZDFh 1цC0r9mhLϞ3#ɐIcfqن|v$,s1 ilyhD\"GșӘ'h|& 돓DgLp  E 4rj[g5p~k2Z4x<fpp%y0Ow#L|Ռi&͜f 2;o:D݌8sr&x|4*h挌ffNJ<.i= wHefE*L388 8^m,Q_d&q2@'21e꘥(Q8u3zgdH-rd 81F+P%?a]- @;Z+AlF|\n+%IP\ϩX`la!ˁI{]#@xo)LC!9[qH0C,`HYc bQpGpTk`MCƒ4\4!9i*cvx (." CC@cuiɫQ rHsnmeLx=niR6H0Nj֕M?98D.Urqx;##mF&'u,3$#=1BX뉡Yh1n˟<#u0f_AgHfpсˈ=#v݅FAu zͩ@ݹ! t3I9>8gI19֋BIy1r%6# aDf1S\8:ML􅣎(7S=q sZ&pOfj;\Ա#-4"$K<8]:ʃO؂k=SGdF|:!_>Rțp,a #q4BS^sc+K_br.rW]wtG>;:Fh n` s ؗ (..>*٬QwWqtm1GI4|zqs;y] YtT1oFU#{b1cVćGQtYC -JW dH`v@qt"#F&# d _Ġp&OT2t2R=0ӧ(?ڱQڗV?#6 %"^+_ڞtIҧbP.ij,B,>hPލ=Df lLIIFg~ V,CE3^PxL1zF,/@,-(B-j!C^$h{Fo"9fJЩ愛k ٩T4E25Ʃئ}aWlS5LJ:D 8+Bcٞt:cЁ#S*^.tOh%Rlo@Q&Q#JqHS2&/J-_EPI̸-sɞqNh.Ǎ:a9PxH9K^2Ã='FwDá|ED 1b9Ϲ$k.!YHO2ME88@ݺkeUC Crtܷa ;-h1JCd` uVCap^>"7c%mS:ce+JfGr,|*rhHU/j>A{KT$y .Q))ek*y `ن(mX$4E`''Ou1R#H);ȆlbMˆ#1]S4vYh""4v=ݞPIwhyfFAIuC#F<K S# vWάEΑ +88|n_`gElgAH펃cIQbeVtFDĬ$xd_r4JM3!#tL~QLo PTD>XImX-fwIQ`JJ<)y+?`E.ժI9k5IӴl5B,Br!a㒚nu)h6/ds K|..b@򲚓lJpmAmqlp֕-8jC[=tj]'L+%l됐QXg[@x4,/n%"/ȭ.֚#[+؈k#rk[啃j-_xеM]+H쉇ڭ^ H5ѵnno]"ɩ؊`e`k^YMr퓥7©#VR+#^l=-bKԍjRɽܻj &-#c> # hAd-LgRrmss^\cJ=3⹝A őXX"E33-,@ Gh릛%4 ˆVyLth܍vDŖ0m\@<^<[E D zb`5=.0/ Q]`ކ0_`P껈6QMђ ?[4x!0cdk 1bް'>Lj#&uPB.B;}Ryýq>#ou'h&S# hEW`(TR/5k`G<a9P] @5Rdh% 1{k6KB5"DhzV61i2Ę\tJnyxYKRaX '2 OHO M #:@uOFuz^1jr'K"wįfFo8[ |d3h3 ZLĬ9u}9ypr1z!3EjFGj<AڭXXSvU (""SW@Ź$1>A6$tAvQd)32)}@uiJĥ8ue& gcd>6{Ozh]W]waPQ(͇͇oRxXo2R3»è{Dl{ m`%I4;S8&3{'8:3QDvl>A%>m8*^T_9G+{`M.;vsl hůV -yK_wDuۃ*V$7]N[.lj41:֞(E1Q`m]4˶T .#6JrohN>r`u,¹4ȅk7!n5Jz™?󐶣? Aby$rWF$5+2(j`~: 0G4 *4 ؀G:@PЉ@kر_еTPg{Bmgd>ecXu?`&_].2'"* ~$؅A] iJU99l #[+į.HU.H 0-9\!tAMAN2k= h lW$([E$(%A*EFtU.h8ݨZ~AT5ɲ؊'odQ(A2. ԛELoW Ha~^4܃* dl#_@F`G`ʀ} P2|2FQc&#+LohZ3#dDFIc1YC ]4\R8)Afysr (-FeOyS5+Pq;T:Ta'hnWӾ"`W8uYG8߹ ي߂@馵~J+9Fc%Ǡt13*(9Puk"d ]1Yc2A%+1yc1%bEKEc_\i7Eѝ-r>̾4炾 4R[|*m=[EQMRhb5qzJ ϊl7]zkYT.SRxT"DŽ+)ԆnMWrŨ,3++ oAQ059ķ(s?qE1|7 X~:d}]P4oAcE Sx ثgsra H}Dӥ |%HHjLM` =r GNH|1 I#'ZfHꉋ~1.(䂟~-?GSI=(t :|"Y=/ۦ&0& SLMuʣxdv9Q('xh~PCSO[6[SzAvlYP3Dr],vC1z{m;!!i?YU3/V~vK^feET>5 &ЫfjevEHZY 12 儎'N- +ѫը^Bz8S oNߋ%=)^B[oK;XK63i2r͛ΐgY7IϚd#k59ٟd#oqMwGqCy7Y%)"?z 2eP%rcκk Tf1a>'d>)CWK>d% m~k%*X -;+'$}Ly\֎H:fx$џ kEBgJST8H?EtOjrN:ȭ\-v좧%&m(n$$xFFқ%&b6Ii ;!q l6/ ϳ/hk˷u5BGJo ,I]z9[^G -ICj'o">$A/i!ol']d2P[&^/t7_,߆H:6Q4߼EG7o& ug:]1ݺ^.H X/+~d ݱ3jZ v [ <>5zM'%cnz~m.{>-4&@Mtt?5ٽYr R{ zٯ5}򐙮=)=y0[,Lpoz7m0իܡ/*Lpo,ewןxV!8;l-B6a|r2"_qp {bb8Bs{״oQ֎d5nSǐVv;A8ʃoGNЁ5E2jGqcҪ cm0fEލ1bnԎIv.+1j%wA.>=-9{aO:RY[rc1vQ;Frv O y@1N:r9b-1M gwKEth鈎{AjgZ"'W^td[x^ "{g{jKtou2_ǧ'/G(r[cvz>}ڻ&#6ٵTv=H@A[3RA~`~=UaؾYT+=/>#|6n t6 ,[(-E 3m :M"s#;c$윖Vn^v6NVE{_S13Lo䎳uke皴uegbfQmvAgL~Ñ/nGg`љޛ$`@ni3N^jqh3T.F68ݔ?ssNdjշptrq-EpY%$ŨX#RkІHn#&Uz2\~&ta,NF`6 o##Òq‘l|IUlhޘ.531Ѽ(#3Ᵽy/^;ih>D|&/ƕ"͏16t6ߞ60k27srG_9_řD%;8sҸhޒl8Y_g,yglPG(;J;,6x>- lZ]ԧKy}VD^Onvh:?5On v%~X^>-5;Δ XZʩ21OIT&?/{/ \Ŀ2aAxC##cD+1~dk 5HZ M.uƣzn:s]y}O^UlSwjf?A4@]V^j;+Gðh pj 6i|ZGu1Tt36`Gv(Th#p8_&o*DI)/W+K{{W֕uQX? $x=[F?vQ٩UFt &=gvY@3?`Jod(fY_J͎Ccc3xrB7(fb5#󌃩jm&_v`W##rRDTs7[~)>YlMk;s^x o `m'd}G,i6ub4?SG;Yُ̲ݡ'&/B>0$Ғ',Afjtb;.t9޼kLD E6dgW&/EgS@҆% p3;/;7#3{WL]f"M qRA<5yi)MII1wsuH&Okq}Q ^Jvh~l.m;A; G##O6/QO(8>@ᮅǍqu4jhR{k&#\ W0^oNtN];Zucv|-`;0ʈ!d]:Ĩk׋ICD>=5O}}A6;c+{]SCG{X^&C{,=V›+)пTǜMy MKf;5U6.qݏ|mKe(M_⮒3!Ƴtz$I#}|ARu7!{IH<}a{>cDkvl8=ҧ 6QFHLeOX-U;/vyeLju9[f%| ץC糯PQҼPM\#R> [[ӗ]hB׷r%P:~u:UiKM;Xv^gG3y8wpl M^zF&/ah:;dS̆rl m.--y}}=#ô؀Dz"Cu~,b{c|͆pbv\"'cν[9!W1ޭ0'd9%W& y kc<a~Y)OJ8/Qj}hq.}xy`w 5wM`;O:HXzjW1ԱT46JmxTպo;ތϔl*1axwcu5%uN曎- ?)շ7$^=| TSIl$>ϊؚY{o~47Y}ZJeud[ uY}fKVSKѱRVY}+ /Vj:Ķ3i%s\|>{H\|hN|3w$>OS#4Yz8;hu'-MpAؽMf]<(8gDL@l{}i)=iޛKSOMoVs6[G1A{yi>\f Q*׆ x~KzP4̈|$Doi|Pq`ML@H{b`yrN mt]t.0R C?x~҇eB?'ź2ҲS.Gk׈Z8$"HuˏJ"-+:Liͧ\ܼⴖk0dMZWx&nVO:"FG 'O px,?{Axƙ a8cZg2I03֚j! ;k^o uYG5:/Y8i1A<0b?/Xb/z-w #1Bv~v1ɝĬ+Kfad<{'䴣fa9KnmKC hz,PxcW}9!s# 7Xo ST!:i+7Hix|:BXO)2tz(FO8&%\ONhL&0nEɎips j ɍ7SA]n۫I PϹc9+'"[,gbOq54ܰAN-n |OB_F|੬|^ÔNv{97˲> $}Cj̉ s&3G0ӣ؊sf3 %ێ ˨_|s$H#|r~N =y~;ڰYz#h|D(5z>]~G4zzCSlո}ݿpb>=~=}G |Zk<wӞػ1XN? hy1O;6ln u΀ϹRWp3cߚ9;bgLEcc_GrG9j=[ͩU@Yѥn@mAUfni:#mnM6c} +=&csK_#m1驱`i*K42=}Z?]%@УhM2}~IΒM2N,lH&YEldLrI.Zs-2 "^F?Ɍ %Dfa%]=]`KɇgMTkLv&| ݜNG\ ?Mƞ1(P FْS Cv+<&Oִ2I$@fa5 nKD%|bct (01T^ǘ- vR\27Vmb= v"z Dot:\'FvZ8ٯ'x#q[hV{yjh "Fӌ0S4K8Z|-]نOf6r_=`Œ&QܑkSSw-ckb/.Q{"rG7Y!]{54i !JĞ|ˆ$MWA7{=YzƵ{f}鵄%^$]Mh.ceC4?Њ|ݣF[ZaʼvP>s{n`&}v\>s36B5rfKgfuW4앮}533| rv~MC66wynRO@|` o?BjhZ4Zae;Mr XiѾW+%2zSף‚12W%Jҳy XX#ڽ;BnzG4Fs*]kD;9}9+.b7BinxhеYhhR=8\Sd<~<8YV}-FidZ"Dy-kwh7[k5\tSԲD cW+#ZãDt:\]3tZ]ْ`t67< f{'3 {쯜aD EFIv+ca/f"2AěYƙ #E#ȟpZծK "KyeG;52!蚪 FeOÆ2"^.CJaݛj&Z=M╾ wkGfcvhXw2aJ73aC w%)m˼SԴ$ƥn֩pH ~L K\ڇyzѰzXUº6%]zr@qKv{;iP1nifOXP;VA #) @oT i|.(jVӓIco^-"Wȟesgw?oG}ťdܕd+f{֍DXYz@\G=hH"[HB6ISkº!qaa "k c).!l(9NvC(+.Z`h0zB_BhwXdXim#VauLjV%4殐Dli\d'&\HDBsa1hێ[|>Y|c;Mn-=#݋_ 2Z -D<#r]G_6؎%_3itɞLlQ?9_W#zAjϖ*a|E@:3.~c|7*_Gma Ge>@;?>|_H3;)m?_PQگG+*}F4* ~0Gw_uGN&)>,`2wg왾[&ք0g}27D[h4#A'_Ju)_O~>ꇈE4bϣFU|/?G75ow/??߾o?__|~\xo/_o_W}o?o_/_?WrV?oY͔VwNyL /:~ %zao &ߩ-z\3PYDJMd/aMRTQHѪb7i )m,c! H.u4zFc iM.{S )+|wm(zچPOɰ.-ȱlhXqaIoCĕ?16ԎXE,.0޳a T<#vaZ^ƃ4gk<28$cjjIw*ULmjףMUZK%4;o 4UzP^2ɃnEm2GP *03oA4BT^ .&Bs)D'2"Bvp)) F2HF %CEĐ(1D:3"_lYsgdP.D6SCPf$]ǎS#Guȉ䂆a"BH6SGftũzy2FrSG@lHʡU [iÎ r::CG0qWeF\WqUgn!טH5\"j覤0[HU2PEn 3Fd$Ԇ]~(]qEjƀ%5;b}TëD1Ue'0U9 4#"[Њ3f<Fް*L"V9bf g fMg/聣ֶ%qFQjm;cQ?i3߳wkM2_xʟcH٭0}9o 1HŎϮQKFz<g.MCR[,E2d)qKtAQ pHߡQϤG`e~#4ph@Pa.T䦩j=i0h'!̇s^$Q K? Dm!q!߸H\ ۤ#6K2{M_H֊BHgO 0e%GelD87]Iڵ'4hĻG@.uMo} /A͐HMP juI}'v/{ YCء։Yq2Q`3wAAt0Z2Եuow)#;ơ6kPA+Hliլ֚bov9_g+o9:IulӀW&A7 XK"=,8ج3fq3: S/N:Fjڰ!LӖnX5' K6Gܝ7@[:pup#慿Rch{24s"aH2ct ,V RT9o?J6qҐƼ;L+J_L=* vP/+BӀ$fb 26k[PjWF.֮_Xk]%:5F|+m=i?\:S'mt7y3cՑUDVrx|/esi,RϊGWTpOX3NҪaftuk@ a;]nWC<*my5jExC'@ ԝہ[K-[ϲKw<ϝW53 NZ/NdX،|<3:փG(/=aNryf&)gLRulCwyzNzo3"=ЄZ^y#-Ǿ@}s$Ϛ-y󓝟w'H>onHFIxHtC [#s'@Q5.|RAȚ/h$'Y MI -=4uVl,"?/fag-kq祧C^zovbZ3:zaGhX ѯ-tI$s`~2bG:;Be|$h]^. ۡ9tT/AW\jo_^$qbֆ@|q3$@;;@#H$z0$Y{Öxm! TWƑiۤ)<3C෍\oXZ. gdꓗSad[eC3nӊ~ vo\hw 5qbLEsb_7|t"k {5qbœQ ?1p p"LI8DohrןKN`N6/0D3.g):;1l(\7,dNIJp7j/IΫkeP֕u{ X}w'K\ϭ/ϭ݈.ϥʇ:cǼ.gڈ'ڽ=ExxKM/93SXƕG|~o}I#/Xs yN6 y1i:JNvlSrYZ}fqڼzȫ'5db, n^A:8I$+WOk''LrbN)'ez0`*a%qGS׳+5iW8+@7(\Wds!sQمÚuN 1Oݰ+ o֯پʅĭ0В lr'^qWZhZ1ڌUq dpu{T4YgT Hy e?c`{0 "2P7LJJqOq^W7T]݀k2vfaԦ]]B!cujp]8Co Q-] {t:9#ws0L7@xP}6 Aؘn@Lf?wpmRtNkm%uΎ3˹q=T /#Ä ŏ hHK;W# V?‡!7 +lvD(ܯʰQf(1Tq3 .|tk+Bzs\3DW$iITYjQ5̚U#3js; We+jx+XGw i;wKW㱿wpNP̩Hi~:ȤoY;"\u5c: .NTN>E6s Z#<u1J]D&𲥑=5wRhdP H|"m5^v̸> uCrٝT9gE~`ٛIGfE4% $xxpNJh#=mtj:ziĐ@EkeH}n :q+uF.mC X^uPj!*]P/S\~Э;G;|K!tSHSDw؈#0f͚jO,F" 4$1T;Ik68Gk6ˎUvKtۓE.>Q)鵢>89nsQFS<ǫ]TVvIU.sy($}GIdz\ B<w;/ugm[WKM%/|D聥K«nRQWe+kuOW8mmwU^aWzvEPSvC}WӚ2LfWJs)mt;Łj9fj8*!Wwk2tA-'ُнi}I\pQyދ/Eƥܱ[~WԀY@ΧIvek4XB|Yx=f${ܒ5r#ʮJٓSN $nC@B+2{7ZBHY8رBmpdXq [ȡr+i#Ю oY'È1/*8ڥ[[3JZj*&ޚtCyTom:tsQ !zOSE.&ԨMgʭ1uDNV)\9Әpk<.\pövҏ\65wgYnͫzr_tMr,Ea+{Fqg),rr(A:e *sл"cX9 %LkBn" ͷȜMy.2z0lkv()XS -`Q[d.wȄ@:sF)/V书Xr]f߷̞l2SܛK+N co5-J̮%>J/:2Ui-err"w(]p99=ٖr# (S!$H2r^9ҙш$?#p90LPU؞J(8#j2dIZmXhڱ0yA, K֮&e{lH-`V#cw<Envhyc'7jbǾE8 ʪe}.yj6d+D~Du \__L? +ARgS~oYKZ͇¥D1dK$r8v-)k<V*=&zGV1t,㢚읻ҭ؇MPt`!ƹRX%  et tr(ݕ[QDSUmJYI;-+zmZ>WJr?Ё[_)*Y_\썉sz;i3XRj!5& -v{D|XLT' V)9NPה{Tf/Æ\',y3ׇ5g3IV uTZuJ:\ؠyPl+^[}UqӼ6AVVjV *՞j/c~jS:+P9:t3o\QJkSJSRyVYS%YE]zFW8gvyڠܖn]9x*ke&UWSW_xD鹨ti\)N][ Q |7v֐ƽBNFbBFڏdKaG7:{8 V'Kwx0!z`v#m=b6-j(Ėf()zpuĮa.sePO WV' \O{ wp]mFVS{_- brR@b-pc mpx֨=].z`Yx4n)akG V(wAsև=T@ob[bu+z5& 8\; #vRu͵iw^;:RZrt[)[ 6a8H\&^Mgz`z4U]fO4޽cdSǻޅ;K =htfո v׻i)w촮?55iMgrwiꌷշ]l86:]R|o\wso-ǷEIT;S Bro*֘EK{dg7K Upxkq.yY}c.9#IU/Xjm6M uDES^HZWǥskTIKG .[fOָckW,%-c]\ua6;[\C:ToqmvԜZ u-]X8q+k5-,؞;GMڼl+bh;Kծc[Y).֩l+뒴r+R􍷲K9:UGm_}GH VH:d x(M򃰶-^;_]݇4kfC\f-*o6F#[N~L_EG\26Fkw3!aRS7՚įKvV{0{8STٔhu֝+vZLQ}.GՌj!7렭פ:Uϖ9[_ I/lW022kMCc`r}br'KZX!J5s5vu.G,~3z#fzݴ: rƹe}QX1rPp(P&9gQRCHy-Gg'n6QlJjLLDJ[FKBiz^za:p$C.^7+\#'NyѓbBK%})Q(|uNu⼵===Ң)x;LN#{ɡ4%u)9U09;{B'1F9]wzPn X;#HG cV C66${/oҥ:m(%+G*>1U0,K5ݿvv:$*|Bs/;v@.7)Hto-U l%H)v 1\CEk%[&՚ _-G.h<5ֶRg ljփPg"H.jk=B%^*VeLڼ1v%޳zaeǎ(7@^909Ƹ튼Zy1Q!ͯ!8զ&t`yb##?ܥyTe; o"pi^=;xښʜs[.2L.Q|v|LÚ7͐UWojfɚ4q5?E 4<1ci<3i{(vq^&w);fH?]x߷Iۦ֧[Ẅhfjϭc/5);fv5Ӝv@?]e~5#B .% ҵPC$2quG(e`Eُ79A>痁c]=A;AݟuZro'ٍBBF EvwʹfBɬݦMzDo7%2/)Qm| 6p}KC 쮺w0HafGN0}"RXl$1kTKq!dPs0]mT"vp1mht|9̹.||ޱ/Z|5d-ܨw;jӰ~*; ;6d֚8l@bEd "t 7%8|>d9}p>YbQ]dxݦ.W'6\xT*#~d[N8qDy]{ mv(@vPdMW6#])%jO|І4s t }S֧UzgS}4GArQI6H7+MVP0wfgauG=!␤2<= A 5)-xfb4*85A E=v.=B܌|ki~1t+{D.Q¤h X(3e 2*2۽ @͐ dbEMd4`H&hMR4Dsf2-=2ɴз H` J^51 Ljy/4wvޭ1y /Mۼ dc88pz+0)ˑ,Dh?I \q.jRw璘=T ;bsPS) O&s NF%0ڃùPn;qSi=tq iV$k}W9J}==1p7ӝ6银;}q9I"z"AxbD.br9@M.D;σ!Ѡj4h,;(_јJVVFn끝C17.rB#M}z>=i҈u!4S;@ t캅\epY\g!%uYCChz5PxhUP 4Ln6D< ?z9ؐ|F&AFl0b (}6̴ȣKc8D>.'6Y; rb/Z-9). 2vѺ !G4 )*,Ц_p:ThFiϷ:hnEMDA%n|DA$6 蚏pɑMiYM9*" "iV)b2M/ 9?) zbYIK0J4$ZBRr}"\Z.zy82s@쇐YflCD雏M97C4̇aVD-(TFS$A4Iʥ>#{U%bE$UhCd?:KLl5=Р,1iKHե}~#Ҡ 3Рbi$f;ɚ)V4.]}ӛv.Z%i uЯ' ,h #Fl {F :q6MVys>!n;!S!Q>g,OҞ1Ss& mʺS'빻ScEבԩZD<'77uqYN8,`A4 svꍝuet4-pƤm`Lp0#k7c*o]^gkHi ߳_2ci-76ʄnOSq'd27@.NL r_Q-#L2푾GJ,4!do4nR71Tb+41ӕ_Z2+ot6&er`v+dd "L2B2c.u6TX;P tԦn uD&k\?!Brk=x L3V61n0 ؤAr:=ZGl99/H #(ˆe|Z`rؾ1d0jdk0jyZ|FFa22{=#J =e$vTԲAS襔JTalB[e5Vڙ-:f7e(-ә-CN&OLUؘ9*b*1#pgs\83m%sS|A S2VPB'6hj*i1`{Ѿ!u-GMp381u9t=ۄQQ5JcQUȢz7fDfIHAŽҨ4e* !`Qp 6!9sFAd:}H9SxmFiU$)(4.-L3f+UsFbDY{Eעyi^RVwuE8Kb֍FFUiئ^DQa(Oԋ|O #I627B73#^ QfH,|9J@OL\S[&it. ܋xz;fɹTJ+ ܋t `WVE)5^׿^ɟ~s/JmFTo0*lbO2{GXFx_Q+m΅}\Ggps.HUZrjGvH?:Vd\)k-DNg]y@hoѲ+Z'YRtDej/'n6"`c iYzZXvoUdh=  ݢ%NZ.6Ok! "Qa9<4.i훸"mqG e0 ˁ$(X9vٵʳSb)#2(f1Zv6}jVf]ly :>e$>F(MqKMF L =2cG 90r*epT)2SG,YeU}1\r9sp\8.OWMc̿2R0^~demvA^FVá #W#W1 cH=HT|.3>EbV hPX-$_{L,рp }c ]o}&0] bdŖ/jJ;h(D>l02Ov;0L_諲)/`|UX;&_Y=2¤+H-1IFN"ח,0ZS!0_r=`aUF" +*ɟFV6YrM z89 +'A%S凫%t;{DDJ#&WDc,K~{%sXٱ# k1*>hٚh'I|:†+U;6 |-!]#>eXQ*5bf4^ƙ@ y2Ե~WURb"VzeGrZZr 60+ uA=fbNy3(̕h=23Afr!6՘j;cfP-Lmh^p)J'#BK0ZCmG5Rm\GVM"tvkTZ{ӴJu$hH[ik:vۑtR4G`Uw$hZ nޑj׷؜v:fiAqZ5[-:innP8m$`DuPa,?`stYL/91iZh .#NL.E,Di9j> H-m4p:i}1GhtN;!dqS9^hʙG1 <ѱPܐQƀQ?04 AhZVA-z:{B;rӻ)6C++!K "1=4Bџt^vZvN~͓1gY[4osTi9 r-/Y9 n7N2OA%G g`XmZ}Aw1t JP8~c:1״Nz2ƙosim,ηs ۝le+d];Ra}6y@A؇F֑B jq`c=B,XC bYI,kո'# F9se&P_G+;=8of>H1`Vc=B #k;s31#.bjYw PJs1_ǀzG()`%q ؼ9)` @?X["i@yd}]#-RN@6B׫u !ȓv?Zb͉rBxtj{Īā 0(|kQ띙q(;2ya`-fg*Ң<80cā+8q vPtD|09$D< p42B"`Pwk^‚X*t%;vk p,RBl;"K#%TWSJKmk)!=:1 RN<1edž>g {b,!;:lw\ 6U "pBJ=#"jX6fbD.&ّ^K~#l,PwتHAL{igf^Z -_/1 R-:  Bqn=@@0J= oͮ|j7̶=qcbvX2mza;0dC̦1;o:ly( E*>XGFim~ =$tg|6"$Dҡ[d)M`6ufm$`}Y2@G3-FF-iA93&KH$ "[S՞ u(Gnp#\h'$Bڶ< B1h:$$%WFcK֤)Ȧib76~ٸ^EGkT@TE$:&%,${W^b/"b>8]ݣ5DD@UL$p*6;>DnDK$C݀WҼ6|BeZGnH9urBRNݏ9>WY`@N=qAl7}}6@QrF#7y!`3+"@6.֛Kfjv:|8^_L1MobٙvIJ/z#J2’ sdVdy2b< sɟĔ=R91fDX!9`Ě|@Xz/ ~2Xvt`9xXJǤXNKt6ܤ/#NG_ulm E$~8SL]*$݊6LO3/VY`#귌`e ~ &0"4FL:`^4;+לivoN!#}䘦߇L "5ndsMG|6VZt'6̛digQSʱMOJyO?KxRĆ~3=N[Z?, \b{l$ws QF 9d&3ZQ{-zɾوĪ\ _*`@ʝUWgRPR2_p{$t M\av/8sm/6%즸~$\F~ce2%;\[-e\CoYe&*(p'DI wr2?tbye&kQf*lJ@8Lnwg,%6YG2%faG.ze̘c.eb+ަ Tg9L$@V#,(r fȝNՠ] W}ڈ)ۮ޾\ $X[JgwJМR"0ܹĪVe;%.X2 .,#չe錧"ٖ5GĦČTՙ炮qiBSU2KY#x}nJB9rf٭ՇSUCHaQP&RG U(`Bdj<*&r TB˚}=I4 *%؟FW֢P -UhKpT sz*v/[εm AJhm+0L j[Z༁)9fL9828dHRtndJh1T@ԁ[vcy4fMI2׫~)I&83Eh 3MoHqCS̒<1{S繸mJ&mҎm"5%^˗md3jJ9:L95u3@MnNU?)fe/a9mʦp-#%~nFJI?^ׁHr`uSf|#R%b+ $؄y9N6J[=E8,˓!%Ȅ#.nBj*jqi_ bRew.lhI4aNkP2m" ӺCi4%֬u"K(b\yx'qr8WmBsUg_Q9Y (%5ry]fkimk4 Tzx։@zKv`#v[ G{9) phƻ #xb7|BX‡DT'Yv:== Y! 1X@ՔeG#rwZ^ltF/ r8x:88ǐMRX/ˋ! NxsŔk@xDهXGH/RG^f9Q61P$p$_( _ZNklp7f:o6[1U C)'<&3.N.0G{(pO#iV{QJteJ?! d>dAd0p.+u80pȆzfey1 o%+v2_Mcmns䀘Ϩߢuț{s@ܛ. beƀ6Y|)s x% )s xrZJgV(#28lݤoLwyOQB˼]Eg3liQ6= 'e  #AQY) *Bzb0 3RXÀr1jH# /f:*L3T dS]gԮ|cMaYO)@b?s~5u4P ۫  ɀK%7T8 IB3vZpRA4 &]0Xޘn#{$[: ‚LkmTxaj=?V Znbۿ0* @)AXݯ^0#nHWdd rc cG% B? J@D݁J4[0@% %ͩ)dn5+D%(ڞ[gJPɥ(ˇوV,AVPh XB# +hɓѱIAg%0ryZ3Y3i 0ZP@Zg!D=|q 7FDI3-"aťx)jixYlFw!s(h;{U0ra۳k2c3$[;+Џz;}@rn OvI}<Ģ02:[V0/j^1 n;,J qL >ȁj5&0KcOWM̀{C`$^E%Et]L0m%GuFxWFn Fn4#FV&1jO3TclaK“R(\!uX`x1y`\t R Nrŭ} D9՜zdlDLύ)xCaTe*:ҁ9^3wme@fBaKH>`Djk]`&<“5g#OhY@Ô/Y*+r /,֒Gĕ$#-i_)1Ӽw̴}.Rqǜumkm3zlQ !FW.JK=jtmEi4A ;wMRpM $ld6egڈXQFim:k [/ t(ܧ F9AYE *uh̹vzߐ1Z]ISB7--)K{L%.)C̿RڒNp6m* pELmIvѷp-{HxѡZl;m:(gV#fPXc%3u2\jjAAs ]f!D qqNv5KaHPTy6`A%SE D+dW3kn6uo&1Hv-Q$v?1urc9a2Y=R}[ fQ&E fab:-fkNPTB1 6Ї ^yi{ ]Ɏ^>4>[Αn/kyu?l9}r~×s]u/= t;/{.0k5!цHф B빪m=hVo .\Qh}ىmMw0uj'DžGa+LDO*o+>+;opGe7_; p "Ibo06C ͎74*%c(/JwQ@G2 |ŎY'0 :4vF E2COQ{)a9<p]@F&JCaVW]wa@Z)wﰃ@L6"$ *.5 K:^?ד$6e6YȖ/f/2{![ҹEOvXBe^f dSy_y+)u괈~, FIW^Tk@xNGv5_p]W޽f{^UmU@vKǾLdž:i#RkerԒ,ze&zY=_A1K#-NZWKI; @VrW";)Kv^mqA8|\$bі ;F\n,$Qq#.زvsyXIKu43ň읊Kv`g5:c رʳWOk\ m-YN^ !"?zCK&ʑ7WӹrqҮ2Y:}'d!̾8RB.N#yjY&;fZJ$D^}a[}^q)Ҵ5ʘ`rU+V6ƥ ^`޴cX^5]qŬ랭Wi\_˞Nie4Fg/N`കW)e˃+zih'.g<ʱ:dk;ឳ:t0ꀙmF,Wŀs >ܟ[5.p,XX0 :/&ĀckbUbk>&z DoSy}k8jOEPDo3m|.;q7u6˩V yZ*y"l,Tȧo3N(cTIR];}4 mT=Z9qfO.70 cl 3{Off5x51WMQ ͥ(3/b KGJ(m8I{FBѺ߆a#])M֌* fD{ )D+i!zlv%9 4+\FUaB;/An| _HH;篱lg+N*zd=J'IE=(8qNO,{*KsliEϘATO/-Ess$k0d]) b3Hs˝7GST?N)e<3o:v?vʁ1"֞Sʥ˞Sj>`>X)5k=S H5i'@ؐvZgqRЧzO ER2sO %-q9] f;f *aG*A?rriDb)==a>)=;|< SEc[l,tET9& eƯ3ԽG6{ӊDWT=Wt5H3_:Ya? Tۅqb(lm^6  )V-%UDE;5팥 J|'|<4ccKG^F/r<мȰ^|>#t78ُC53n-K3!i_:38ҹ$ZG43~<4J=]4~^y&ӌ}lИb|t⥆ .JşulQ͵ e|x[G~T"@x;U>?g!#JM#h[kyjM}k뷖Bxkw>)sʾٴ0>lh}?fߚ~V$J6ug =ل[IE'{Rs!=ٲrRw`\x ڹ|fX } څÎdOgHKxp2V K32ճd.1d:{._|~ n?-<`¾ӟgO*Nma_\GG'|]xR>KVH7nxģFnb`NGY1E8F$t9Ж.ا/VòFDX?z(>貖!.G*Z<xH8u#[~2Vbp(g^)O ݮ hooxe"!̤w_@%_O^n=żȀw7dN*7~G>?=ϟy~?_>{?>w?7_||yK߿w ~g?k7_?_o__OuJ#Ӆ8EuS tXcg/X[N߾Wc\ߖî:w&?~֤m$)NG侵|pG"*%O}|>DD?obݱ>Vbz&|g/?{>1i̧;UڟzyV^s/mk uͧI@O߿yຮ_߼^~xџ>}F>>>ֆGnNCNo>^hy:YG rpKQYpF;B:ݟBlKLÿoR-7?{k)}8^e8/9GO_}oWg%?}[O㍾Ϸ>zys_4LUj?_SZ_p-g?߆Sޱ]<"hO}SiO|F}p;ȹ7=Û޼ P%?R/={MZO>긿^7s{#r}:^÷9że؁|a+sNxн ޹otYgo^}ۿ_r<1( Z:Sܷ·/_~_v?eO?3}߷}k?>}߼ۅF^>jR[tg&Msϐv7=(E[ 7xN{a_W7_}wyO^y؛{| 9/{ qyʅ,upq'_4'xh`+ŏWo=7tgu/2'Ky} m=goR*Ou_mţݳ{n~3.{ȣ=6_{O|eyԧ5߲/W^QU?6ZW>]\h߭w4x̊ܫac~7E-3O4tO;>tax?/Րmj~sK1=1i\Dž~4wO=ȶX kxK{=e>ro|F@/B/$CU%Q3T>ezGяGo~Jܭl8rnЏEO^p+a[:s8]bd޷}=_@+{y _g{)~ao±^d7Ae| ظ+k\+}Yc..gD[K~iSYt$ +rA1z9t/42=H!4D6n=yo^15(Q/|61 D/[W\"$j>[݇T?66h+Dϟ^%3&{dz}qր?'O_Yſ=/?퇱\|Cg^{c]Ǔi9!#u---$$q/=v\wx =?׃BqI꩖~iE} _"Jmb2ω&;|7]??Pſ{ͻ_v /TӤskY> Wu= _-]ps[_ywշ~/}~|/zT8ބp}W_?Ϲ?{E慜4"_X_u|Wu )WIߡS-Vũ7k="~5WVK'_~IO˙ֆ{ςendstream endobj 344 0 obj << /Filter /FlateDecode /Length 23043 >> stream x[&I|()KJH%r=D=uͪ ;f<,ʚ /.瘙>:o}u*+Ϸ^׫ܯcW_ݒ^iqJ{(=?ZXwom9?~z|xM)Ew ?_?f]n|߾[yR{V_]_W|Pҫ6GU>UʽWǫqLm|'%UϽg%u_zּobV򫯟{7=1cW=Ka@X/^Zq/k]1vLx.W?7k1Ӱ "Z^$4v/%H_FԴ.kDX{Q12 h*T)k,h\3'>#[jUG/#:UCe`b^1)"* lde%z?͓L-㥦 H%E/4}t.F޼,mkD#ik0@Va 6UIJ@)0"j4!W'(Q,3>Cq$?FcLtڈٰ!vnC! dԃs>]Fr錎QU1b1DRH눈^өCluk\Lz)Va TLfRi[Ļ9'~)ɗ+P+hUbq6N"Ymp)|)ެ`2ErEDij"_f]ZJX%3 .0\E Ŀ/UɂPFMQaHmr\catEa~=J40&O%O9dۅ|񝶀9"'jKh&10U[s|eJ'X')x6("v@% ˯d6! w #E ?ۏ%_wZl'KI6猯8ٌݳc^.3 P/-o 3%3W]tq]DG/{-*$S.ǀ %z>{Ԧ{2#E0{q2=|=aq>-Xc=ve|f7NcK'G4pn[KؚZ8 obi#meMijbBiܬ ܬO^~e} l֗Mv'8թ&}欶ӓU,W(hB='W E#GhW勛߱VuFXOÍf}fl.ХɌMCGk7mW*%r>_Nd7)Y_& 1GB2lh7mxUV$1mo㸷7ccYQbAa8ܩ8RaFuyC9#%EgS $6(Q̦1sn#hB lLbIcV-7țzƥ on9z4OFJ%P236N,`X`D| f`ݙm:}buՀo2ojp*]ଂ`Tp1^mECrKS"0S"vR#O<@8B]8!P9Nbn7StG#:0#~)y !Mb:DJB8U#.w|SȤYP܍Fˉ0ME:1C1-!&M]Godd1`F}#/Z.⛺4 5܀߈47f8oEfߴqM; id.߈ HM; F MC*SaGFTWVA&n8 ː,%|5 pf'p( Ȉϩ&Rq822ɴ .?Ng$4i #T?|pKs NCɤGp."d"T - pL4% % I6#@4D3|## 4doZoB%i9"i033eģ7Ml޴=Fx##Cx##KGxӐcMCQ72R;otaiv722=>n鴋FGL(odMChHCp'|z  Z5jr6ĻVZqJ{^Q&eO )Zq솎eL4%F(:PpDFġǸ_)oƣheOse<]a,J sk`.FڼɧhcѻE[c\!Bb\[Fn @Gq`Tč-gwdc=M◇d_Nq2R}?=`R@E*#;6a>WD/˥|nD!RNӮ:5Ɯ'f l,Gf`NhGDP3;s5e@l" k#kO0<7g`>^2gՃyĎ,/Lh:18^x1SxAt%עhYu eM3p2 eZH`h$TXYWf*4:òT 1 Y Pd8&Tqdq͞1,Ɉj"[`lEl@kj&e[Q<=^fLfclFrB"YmX7̰̒}@6#QdYE, ]fnڱ> ɈZ~- T Z_,{XfbɣtR_ZzCd5YQ"~ψI6+CbEBe -ZՐ=THnZҟ55 SX b 2+tY ngK.[/J`gl!ڥL0rZcA0zӝ C&K(tK\$yiCPb3X,Շ\fddrŋ>~,\_QC' diK؃+PP*xHE$tei>sOCE22THaR5 чf䟚MË62AtjH`x`@ $(G}0mk9HU5(6!#z  OHbJd3B]z2F>e (G^ǑQ)uPZ> 8<1bJe`RPLmudՕv=4 R ,:f52?}9Xm!#S05 *z5 ekPyGFgIktqJ!&4?atTqžbtasOv$M2p񆸞\EڭYQ]]EGaJc(- @?VN OB}ͨd =&#qV֑Xzl2]p0L綣#}o3u$bL(F oD;&즮 !(vL)§nm)ZdP8| lA7u?Kuߢo#saCI&m߈C6FnZFڌzoF#fOuLd6,4$ JG@0v9Bx:RRS51 9 5nK7+T%FDl2_pd*CĚr[39?t0|/86m.'K,""mf_qixh?'@{2ArSq-Q&1e ƝƦcupSԉqǤ8x&e` EJ /87` `czk[2Ǔl?^mB_rq/ۓ%&-&Гe`>w7]SF`Lx)ͅf\[ f ӦptnhF Tw= ؈tfMIEsH%G@*) ڂ";HT`eh=!h֕+}#$cd)IπA-?'0LRp}PRg>.%J*b=9 :༁-2HP"fs J*(Kl71AJe% OAI;Jr8aDk-~oDgH7eH]AI;Ƽ-n1"μ AIKh"݄vPReAəvP"Z]J4}DPF%s&0PrhC%V=*iyg JڄD˪ 4T@>R@% 8CE+핉Jʍw,uT5˭T=ƤnȈ؈JE38"QIòJVՄ@IêJdx@H6;4,;CHd&4J 5cHȉ"1I"ݮ>H.l'(i <IC\H(!fXG$ ݙf ©ӆ\FH xӈˈ⑆ CƆGl iG2j;j rd4x.ם q>3{=ٻ/0KSZ1ⴋ >epqAoc$E@V U(Ô%6\C2ꛋr@'Gޖڣ3n=!vEM}EeFf) i{VsRiI0:+2쇛aghlP#mohO EA IrhQDΩ&$ˈN%mH4ʖ9zOLiu9cC%z AƝ(=g|ٟc[0SW\b\}dʒ-|$P1!x+]:Q2gdHk9m9l-fm¯X1P5{i.Np6x\# ŎVLk?s 4En_tJBKu# kcp9b&nrP969s2/\b#XA82K,H 8I!a"B{0+F+߈ܱ?9(-?)*-_cZ.PKGvxls49sBQsyY oS#$)`>\F2rfJ\lvDq6)K!f/ч]Ptu\ў-@'-G3bPlcҺ+ėX-ֈ=Z\d=[ѠCTP1j72sFUgG#0r0 eun?Ĺ m4iMmU';XT2?{tL:cmwd6@ Y&.~( Px=/ܔh1YH8P/_n'1邪g G+ gۉ߆.abXHPw "6QubpF&^u0ߞ!6a2݂.ox2@mptz=9֑4lݩQd h7zHl"2݁o$4+9Yq҃$6/H LB7vX$"Nx8 fG7Ő4=`+_N"v$Hq`$e(tiw.门/h'_i ڄMk+Ne-Ew$-]o%ˢZg܆A<"wmtj}8S{LFwH%ϥ<\g*^ݿubN뾡WFuZ. Fֽ_X'dZȵg;괚]ċyپrmmnouX4dtmaѰsvӘ/gQR}<uX^J|-R{Z  h-RֺrXkءY_}R LnlX笝UbE7k6 ,8}Ya?dgrXIXS+6 % dE+Mk^ [\(֮, 6c|,*/*^:t.} `V%V].\wA7o|=lkK:G`%l ,dqhuAB~V"hJX{l5'r, ͛eRchXp>)Uq jGsf::6MejmtS/g.hVn}%mGP3Ŗj)hzj75, Jmyׇ#bR(~Vr𭔪qpQ쮦Zrǹi#0&_-BM7v4Y9f7M}tćݵ +̳ dDkeT+%-5iN# DG`okH vk^]?6+L7 D:>s0R0>s/(x*ɪЛ=dو}(64֥Mԙ:EٟEN[X1 uYN <@HĖف9QwчIMT1*4|Va㢫wo{B_ mQDBV)`p ] ԁ 14&%>"^w⮃66F^|YpX"Nd%FüB,JBdb[D, -ZJԜy`cCmd6 D@=K۷)9-@L ߘCJa>'T&cq"s6't3ȡ1;9F[ȼf˨YϜ,bg6{W@P+۞:d oW>o'+r\!h#Kp' >m>h5cgQ|h(dTʜjO,+- 0mA-MЃ6g6$@X >45$7C+;J`A˚ >hղE> >4T,=%|h9|hX>hqY@MP+>q#*,@uЀ[ o=P‡ǘ/ZG qޣڡC):4݉g >wvh8`'C;:OаO6a;4j H_7P Cg۠C`ΠCCC':thJ 5aslqN 1ڴR AF<@Ck;;@# (ʌ䖭-c #XҰŞ1i}bŏR?. =a%Nj>xT0~9. QV#rY5ʸa8rv]BvE~#ZC 46Ji),6ع^_~vL;g&$3X-m*ymhDn7@HۙtC2cDKLgpKNVe^w2?rҖx;ґ=_Jv,r7<};KęPQ\j+Nw!FAd4g&֘_ֺrzOZW>ZZWnlD8h$q!4Oodʪa鶾KG@ *k)+zL3cm2Q6nD:48COD0,emvLVHwTߑh,wĬ qCD}7tLu #CPMnty$J#8h޹y$,z,vݱ%ƑDx3xJ.d.ɵ1U?}`5BhgtE'G+S ͔[9<;yXT,fӶᎃ˰:AFJCqczvD5V^L8ubH(,|A ETWN7Z c1K=Z72)qfKwsVO/΢}#ìYe5dTijpL`6,jE^5#xqCÒ,;9[@Ԯ~X:;> 7Z;ʑ ee $ BS;K^Em+M Zx~SAEm pXv7Z3-5^.l&HVcCisai4;nwQAPkC rvق5Z d+ToTQQ3$ҶV$Go8Jx(w1cghEB5+ՖMjᨫ;a=C2ҵIfwӑQlEib{*"H,T]2 UK٥j֞t-@Cߴ=X7#z+TęwV$wڻT5 ͂I5lM+wڷaZnUZؚYk{b#ͮٚ&4rZp?o"k=oEK -Cݡ='j֍ }K7yeͺEJNd= OY,Zvd U 55?P@K/x v.*,2961*?5[5N=Eig^CױG .mH zo:?boCH-qS|!0pic'{y|F dwG܂ZQFcb $^l.\fq}[?g!^gz*vEh0ɻӰEEm4hh>m ʷ15s;5<mUCq 78ACzgۭi^0֭6UѰáZ~=1EM{m 9:Y/ mPN/8n:Dˌmrͷ=o_A_NB? 9 *S"~ÓGoS]7Aվɑ$62Վ@S X8!w`M#vh8%k;$!>bR1;nɌfoշ/F#㠤rњ3\OkXU;G+CxђiN_&)R#ddD@y,lF,HtY7Z+8-8Nc]58wb:u&mՀyH ~eUڟ<.?ss&pތ>_oZąis\; CUWnwF-~)ӷmцQ5N#ZsɮjeA[g;vmw n-^J_eI1 N3'/qK5pk+~%v?fvDdB{3ӈw$pQi7KAs]cFS;xhWG.}Ip`rD:~̒Mp&̯e3“`NW52@BPHR qyA.y*t7؎ĀQ',αtbĤт.ڟ(|ڹ_FҒD$e/7'"N2&+_!*m6nc@Ɗf6͜`ʮjzf`8Iڬ"Öd$=yc_CG>͗oYU^ 5G_P ^-:S{V_{t8[*U+Dqފ蝑 zڽ×{ i~vJ垾,KEOݷA)z kKzz-.WX~WW(W_^Y^+>Fd^EO_7hzzmiYpzz-Og%鄹z-=4.}0_p̊Zm_oQ3zMՆ9yv۽g33 Lvkif{ ^iK7$ٟgFo G{ekYS猃?[rÚ.c oܽVC2~`Fae~*LGpD؅v^1ېݽbܽ&ʼnEk{܃o%1^F}qVd{ٌ͊~[@eg+twߐ.g}+{Rp^%3 ^iwU5>?%x{y{j7xDoU팦y{M\EwRSq ^!sba9V<, Zr1Qo{ bXD@3KQJD!]'RF,bBbcz23(~R hjbDGjsn[k_[Vzԭ ގ|hژr:fg!zA*-"8n%J0ERGFo͂7}%cHͨv,0fɻo޲!zr/ȕqz(t!nFL\_{ŭhpqsw/pjtCݶ %9@%c&K$n;WLW7fl^V| OTЯ}kzqn<ح Tf94TY@@cg+]advCkzCFz`EΊc=_>?H Cg„=,=*c\@bWF!@4*vG Q<8!F]"XJ4ayϴ<[˅q\@*`Ի&beT3RLS1T_j~q3V4Tuf#^}؟O8*֖XI;*{oQ"/Q]uPHMW/qr%vHV~' C Lx0uǸ ع!Ҧ=޺6 =uBd@R۟u&#= ?0?[hZ@nVu֮QШ$""}}UE\$ ʰG=[Vwq vT~֓ݣhZφ*~)'h,.3g] \G X~uZiY[k1yk~*6?<\`oDc=ZKzg>Ӻ@n~HG9m7!7#-9[3lgcRn[&,|'꯯R,yU~x+>j3@i<[^eg(ɰ743#8Uy{4iʭzȎv?%klmY)&~v^`v?Cw $e+Bh3P{ۅS37RquqMٶKE{#3viDό7/乃G|(+>3k oE hGR=MEaƫYbH:#,_TQXFjū;6+}#>m"_mǑ#HlG(u&d&~rFkH6{(| X n$G g9BGpA O*G ̩h d* )bkvߤ!#-0$2My8צ?("1 1F(-9'hW$kaHV9F[&#<fO" NN"{ϯlL lMSC'Ç =7ƽ^{J ;&"H m} &v:`&8` Ԗƽ {S7֟f'`KփqwY& Yɧ{RO:l7;Fd{A94`MwBfߊ40[Xh 7 ,vF:ؘl`ܵ o&Axw#[fOx N8{vt^YO6F ᚧno WVۚcә[N\#gŷp)Wr4)k39e)WlBDVDO %Y.Y+KjS5, =6hZdY,p`Y+VDb7KY醺菵7r&lstȚjTsȚ-èY9d$=CN=8BC7r5jG<#s5e5n)`^^)kJoDr擵ev-FfV>YOe4|h's5kot at [&d-5e2lݭKfV1pu)!ݥ!Z:d-]#7`%(d#eC֒ %C-)=  P1=*!k_;7T0c4c_Nzdώ0hX/3\U|qC{Cz [22=lY#8d&t2@YLrCU؊ϳϴ;d!cH!ϓ?n=YbDV/i3Gk] q`BqKxA67M3|5LE|͖vD8rIב'+ ~0>|VDA$/Z/;U?n+T10ZVi"i,v!CgbUQϺ³tŐ.=O^?sIVvS>;VYoހd Q5_VE#,0*&a[BPQ!jҊ #]kf&Dž!GoBZ8VTiD2Ð^JsĀnE7 8oH1wYBy#_!Cy3uhP#)ӊٌ*+zhwr1oFbhmfq,8<`?WH?`.1rcs&7',lHX@jgfGIߜHKan!"ƅhQ P@ sΞܛ Ls@d| `#D s4L0.0FtӖ| 2$.Ԉ#8U @dI4\H`C6 QH`"1#Gh9\QL$O-Lg p) X!KY {9 ($TFU{ŘV`'!4R֦Ic!3XzRq!$+㯲*V ! oZj|'}Saf ~W<Sq}q}t8KWWح"т__a]^aE^pWY?.8pV%q$N8^^ǛW(ϊ_f\o^>+WpqyvyEpSلfL W3+LFX\=4\1$JD.dwAs+5k#*\/#U'!z'ء w]9^IXnkszN&5$$n?vks+ =B;9<#Umr#bILN)hZnN~U`hZSyWI WpE?Znm =G"fEoe=?eFe ּsϝ=<ϝۜ?li6>?eL9ysI0Ҵl\mz>v`?*xK2 xn $h[H6*,qg^f4?G*.+G.W=\őU{# Z2V&<̄W.S|$Nv+-^őxU=*\NJ2yQgG54 `^CN 0=zjتwSop vY5d|˻+BK%59F_E-g{Y}M=?|x/.o4?Zm+k)Vяe?Y݇ݧ#0S2т㧊"?*mxծw^Ac:$~g8~燧O~}Ү=|*Ruyg'-7<^_. uɧG_zWzzS33�>=xw{xOv!^w(C"Z>0o=:vrk5 ?%n_,H]/?/7xr~wg}D&@B?\oo z[z|5!m>~x>jN=T~^V%oT_}?ݿ76,(:/ 2>v$z+|ڮp-E^˝%_vG vŒDxE~}|#_h6(r#OKhU`}݇׾"an)jJ}eI|zC$x,/o?=g2=~pqs:_WZ lcsQv)v"~1`ʡ"uLSY'Q+U*ކ^.x(4?6~ϯ%PেO|%sTEL^qs|aqn4./|.cmo+ENϫ֬K{?>9~~rN|84*Mͨ]4F>܂ &Lm]7\ίzUOQgO7=V~:n<z&pXOFٯ^pї!ȓI`׻0_*DwqqVǓ.w ]1ʃ2KN7",k꟡eL*<FD>[9W!:* ,osVZgj2%(2/R*M)]J?~0cчz6jQl&q[r"no>D@g~aCﴡ7#CW4;u+pezsWh|7<O_?hp,}o*,WAf8ܯR+48T狜 \eA8 u_^+g}yLJw{L%Wi?: 5\dQe.e}x;e Hw?=#7Hym8F G&Ù|/j?/a\`7W/Mj-[!7W0o 6}8s:#%N*R{7mNϴ..~=<kygv@vvA @׷l!dNgx# n4k⎏|VM* [}|f}(TL o7c)[OZw?ܘs@}| iްA}8ۋv><6/Q|n768e^/kxjJ(Z9{[I <|Kh=WOPߚ|E ԃB#m+vw ` $!Jw ցgHPnE_6"i/o +EO"{>nQ^(Zlky(A0Ï?9e_O~#z򢔸h9kȌ*8> ~ψ!h?4P(N u&UZOqmK·'Qlfh,wmiy؜cuSԭNƗ@`ޟ\pBmIf/ؙ/ؗ/v˘wndu&/T;9onlߵӰ]~cdlñj Y/Q x_94co#p=^ -` Qy`'""n/o?=^}(gnI?1xcRpB̉]$r?vvy{Y[~'-b{\s3uX$G{qvކk>YͳѢm?bʧW<76=?X7+- %U .w,OPz}+ ףO[M>1nXۇ} G-,P7_/J~DT14,G^Hߵj/!˼j xbKOWb?), =o>-8,\>G2#*7|܊5b4ץ1Y& 0\<5%DX˚iIOHJdo#_2d~oŽ4)"ahz..mC)ͅbWOL%#|B3b\-[JqkDH.pRhz}`lBk^_)OONtRW'xn?j᷏[{Sz>!z,- ]I>>~kU:jo 2c3(sq YŹPmy7M2l:!Ç٠%:8~؁v^L14H7MvVOߨ4.~|ZG 5RNER|/ gi5oGEcYFɺt@,!~gA#1F"*2T7Zendstream endobj 345 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7195 >> stream xY \LeTZZֵEuAEQQqw !$!@H}/V[[n ָUmD_> ;w{8PQc֬])lOD+gn 8'F ? z%zrp aS㦌p=GN(hfdTRL89gϝ9^7cמ}`=ݼf.RJݦFF m7ڸ~ff(kYD7GY*.3A'q}>1xOpߗMsY/~%eܴWm&-~qקRj=M-&Qdj5R-4ʗJyPۨ7vj9G^VRU+jʓKyQR5|j-Z@ QKuj$DSє3bg)JL\(PXʕQ(zzO "1SZAsk/|[ѕx=s͜ڐ1yF92)ʨQ?oR6}lx11]j@GR1yYe-4 |!s?tS^{j LTO=u:|jo m(ףWסiX gb`腏SE^ z4,,N->UDІ%Rqug6$ڴ;*><v>Z& UAMV7mt <4Kpa &fcʍk.$r{l_-+<݀_HYmpbctX\|W_NId&hxrU{vhC>/דA2؅nC"٣-6R_Aalδx19nݕMv/?na Յ凉wVXƱf#y+#_e77u3Jؑ& b~_r4U[(% []b D`&ؐ;/jqIVM[Ҿ(Dѵkv׎랥{~ڧ􆜭(ǡѕaF.Yo0gE뒳'06.ǞNժVC"p`埓˸ܢ2v|MFWˏntȦqZYfL$t ;Lh:ߵ$^6oE*]: ޸c , ESjI=d*EBpxS`;cp4 Ѧ13vW1.o@KjۉHCm6}bh62Ce&AXawBJZ?7^@Lѭ 9%^"é E"V@{keӻ\u^hwFЬj4˅⋚Kf}A(ᅭ@LƖQ,q,Y0g9C3+]"l)YbWHWqDtvB <&P% m9IOH鏍o>'em I9oi+rGsM} fg/{__$%I»ȢPqz >!ʢNpƅ H'9QϫLC'D{S5|Cfkrl:ӷ 0'At$ 1.D,v WnRҨҿ¸+Ɠ`7rMxlќۑ]S]H38&_Wh1M6a+'rO63{X鱬 v?q { u-1ׁ^4s ڎǠF4݅zѱh}1Bٓ9+KzOd!>y}4 ߐw ;EB|^C6&[&?&2I+LhI+꣯M'z<0@v8a$ab[a.RmPlYGy=TNh]چ}g>V- @ۈN?hI0LŸ-H%g8"oaSݦ13!湲S-H-9&'Pk)$pj )e1nQ MʌVw§3#GF=M@ƆP+u iFZԎe0_..Q r%d)5A5$aKL ;,RJ_D΀Ç?K{ eE-*U4/t"[/$pe:$c'MԒ-T.:XiV,QW \;&}7/ pujLJ:Wgu ωl ( @ I:Q ק G[Zz)c&f7)-᫏/Exgi1Y;o=/a-q$ɼV՚ɪCw@V >RMXTU&(ԛmstG}%' 𭛔a QhÈs$`'#_OFbWA׷@ĸve8 b2&=mtN| +kk\Q~=?nIZ h ד*8}|ttSx jO6ʿTAւ[pZ&b]/hB[lɶ_]ؽ|=Zlh1qb3UnE'5 w[ӿNO'Q^Bo|s6m kGlpn-᜽sbCujfo fO秆t W.OLkA\]U:E꼿Cςp=UWS"w+3 T9)ZE"}o9PC°nZ]N[q{׫DűsMCQvvTP[@aGқf>j>Ű:k9qihAvz c)0V{D_V^PbkdQj]Rs e^O@`?wq.hݟV7!PɹKïv7]@ m[m3L*bcpD.Cge(!>܁)˅O"{3G HcO0;u3_%64$UE_DgFj"ꪺN?_bm²ZHl0Pt{2n_Ah{QǮ Y3ò-ꊞv q==S,d$sWn"9Fq/_|x/!2^nNYNiNYhOaU`.*ji?'(ݾh'~kOdr~S#;_6#=):!?|:#žr-! 1 Q{odFr4{藞x4qRarϵ(j6 g[*j;]ս"VI/t i6 ;d$7w__lM'~;ُ% ;]y!H*9<|XSG9heLwK(0|OMrs .8_&>A8FN_AjiAm1e! U^ I{wԝ"$ w=Z 8KKl7@]2n2#H Pճ .n_B??uZr~N櫉dцh;4q$l)RT񎔵DgssNn'KzV3qEVP i3%fR + QZ! Uu:%k +8m)輸 6ý׼:c~ӥYʨESx%OYiӕ3r4J@{lᓌO\Xw^BRY_5 =˳ }ǽə >16fM!"#=u/]fCdImE]ziTVJ&qW9V{j"VhSkh$38žYuKGȋ 4hp)0R4a6d>HF5Q块7 kۈMA 'OOYM)H/ "rOˆ:/"-\Cs 8y+ oj'~[> ׄjcȾ!7;/'z6'ٺ nJ-+}kCb< ^ */_\Q,A>F/x -<ݝ.qnGt](@x&LXbFg+_{4E/J!S2o}%.˂ mRArT8h%g)Ɇ>@iZ\ Yk0K"|4ۄ.4MAypS/)=|:ҊFJ[=g̛ziu6}/ޒDȐ7+:ɲt̘RM1p58"nUd G.;#Ĉ~F?b78\p$nôBN=,6~E@p`%}n|xjً_Vso@۬ՁuRyוr tUj>-LX:NhS5av8'\DۭΟ;KZ;! y _vYk?%K+5!Uu 9Eȁ]݆D5?C7[f6I"&c 5)1DƤhBSހOH"InsJ%2mU upd,ʬ(H/L ($ǥktY/rɻJ|y~ZIB撺Ҫ*}YO]s_wtԓ"'YJbjUD$YR|SeRuHYhI n-z!HbC6QwPh*lQ\ӺIԃ`jW| !άL\i2Z FH9 ŋֿFڼ3&9W_al$JWW5m9rǠ.4 -$֬$,&*.L_I{[w ZVcuľRcSӈc#}8! Io:oOkL59E.ۇWe!1Z[ D[zTJxrR!U,76D y\9_]/zV|ͦT_ɂi,(ʯO7-Mpc8L[Hcۍh$;]6\s۝3A8}Jf|/&S2b> `Sgb]k'+tԁO/-GC  ؟ӥ@Bn^B.AJ$c!j!4*R<"z= gJOãfLNQPvbh2K,&%jZ.Iht+x}.Vq,=^/UUxj uM=yrÏyيbjg7y[}ϵfM۪Pɵiڸ„z LハQ LaQo-ˌc S8S@Pg64$Lᅫh:ޞ5 _@KL9\7쏼>o;v crO(־+c ߋ?i 8,R:&&g)*[iT/`B MrF@Aw>{ϊ oFtv wO[ڸc n0HZBzJ}NjTBY9AyɣUɢ7f2*.u~li? y H"/ҐwtOz/UQr&Rs/ J8*rE^z. O]Fyr:@F͒)}\:%CK3)*tpʜ,3):j;^|/"(LKpԋЩh/v/"_ZgI!v/q@utthOa9|e.>rf+ 3%3㴉`O_ƞr`$3T^B*!Shg1 7sYu,> 9yY$"'`n76KWxS 0u`T"3-7֡a9| vĔ-3VLÇwgf[N-GP 7g|endstream endobj 346 0 obj << /Filter /FlateDecode /Length 5514 >> stream x\Yqsއƾ%MQ̓ICaa֪=jZ6kGX$#3kCyFőeU EuyÅo/ÿwWjlF\^].RjS*xUl]+s%]YFiUQb=F;WеZ)}mgYp:_aEJW$k[ZeaUW¬zZ\)a$%LYզ %Ej-`Ÿ_G!6 f.+iXY 8,V6׍'umq>q=hj'`vO5ChnEq7oo# aڦqgovi c7opط9S]HҊR:GGsͻ7NmؾņMy7I{zkn8W<֍ThW>q~3nG(8фvgcb N50 ^w~%nc,GLl3D;%GcaMqr BJ'omx|'Y|ꆇi`:YzMq1wkD ft^(sC4JGHa܇+>xb ?Xi O2<@zmgm+2p7yQPy)mF!⫴C-9;̏ݒQSnf" *]t߄=*Utw=S+Ӌ]˥{L4EE, D%upbtNkmkaKuYW65قGzێ=3m{?0p^l2N/V+0 Ǥ} g֌zπD N-c Kh~O>ÊRl4OPԉl[6Q> 3ߣ 76Hc :вH'\$pB\9T8lnwqZ%Lt.DtinAMci=+Hwjџ& T6.Ic25.vF d<#} &%_sЋ\0ѱP@TMly^94E"us9ke@ mɨvg#1#@h?~cYo@5H݆mOmgUg.`ǩJYK^ik`<(kIVk&I jgD[5e 5CY8_< q~K$Q;:aOq[mIw-4\@pQ1Tߑs@ts6jrP:zAD4ZH@͛Fa(ױ NF蠟|U]%늚:Y쀦";.kM-OmmbUчxc6 袰9$| NѣrdO~uPU]՜P,VdėZK8TRm/.uyi%08-Tg rJ83{(ZnxA.rT3e—$WYNײ4vdϹ5%rdLV7/M f llQ>Ƿ> EezM˅lq^QW7Lx RE_*]Օ$:B6ڇEX E% Mg&=K' S>ND.miL6vKZ51sj{"i)"$%n*?&9DPQ5ginq=wG ¿o1Srh^Bbye>JT'(/˜IDKl+d-|aeDR]L'iBfD̆; wqv#9fDG`mN@tM F$9orIl=JG,,\{*1#sJb1C<egb K I`(s_1c׳W0t* TiU8SطaV-=уѹ%hFVGmOuZؑ h`yS4:/-3,hbĮn =,S&;25u0~<C |!aIjާƗaY^4d3<Ц0 D toerG/Zd}͹r1tv aY9)y2[0pK%R:~!PKӞ)VBwHuAN:Xjw t~;b29EI˪eLOL\4Y,Ws’ cX#h aэGe/Qr5ibUdzue6>`>.eѯ, PyqZK!dsp[dJ#32_|wH K(NXsFpܱlpv,xGwˬ~~~H~)4jnvLQj@>M>˓ M[5~Cݎxgփ$?|vcH]Jv"Lx:ngdBvZTmU 8|س?E; gJW|'nٸ_ȁU4Woʮ)dC+C2_|eTU>Ű9L&*N;Xe|s,LHՔv{|_\X!//GA^Nڹ96o+%wTb-;ϸ-Aq*y jp;_PMq@ЯsDM54e\{** ȧX7F/SVI n5I_NUŬ0a^HZ6LH|Miqv'muECr;UG[K4ì<iP.~Vw NN=ڼ'c)"Ԟ=2+Z7S磡q{}p2^e6rYaX3#|F~&9ؔ1Y5c*Rݘ)zg_!6T` F88EZpCEa?VTII#l#}͐b35tBn8uΣe=3`²صqoۘGc?{0KnPA=.7Yw 9;E$u$J/eCv 4v-1ier(hv- s( TwOů>~hQy3Tɢ&5)#Q _ф,Ę740q1ӺCiQSGA<]륀>Ohx }FDLjyY$\ U1:镩 XVGV)ajr˧i-4݇:lzxZs+#1_?xCk4joQЦXHf vq j& }0~ڇ&ڽ\@ Hf$\a7I,g^TS8'ꬊ c(l⡟AGV^ƲI?X=qKQ|6**dE#Ñ=M{\VAK9F } R*2'7Srp]Z(Y© S[\5zTW'~OC~_xggp S~nQB^j{+H&k}0M46x{[b+6I %͇q@ [^g?~& cv|,>eوH;0s^azS> stream xcd`ab`dd M3 JM/I, f!C N 'faa9S{( ̌E% Fƺ@R!RIO+19;8;S!1/EKWO/(>јAe5|֟]~M_zw| ?w|7[KOq[oߜF k/]6wݪҹr?ͱt CwHz+Æʂ&Ʈis䎲=83ک@e8Mv/X[( *Q;Sw^+[ym-ýwRO<<z7O;ݥaendstream endobj 348 0 obj << /Filter /FlateDecode /Length 2121 >> stream xX͏۸{0۱a7MEqCh[3V<̡y}$Exv-9$>~|a 2onÌf?z5NrRh|u;GȜrQ0XRBj?/hY`2.``{%~adYW![-$1jD,$j;Cj~̗LG4*DIe`޸(UIPNp3^`6]!]hHz"1DbAMb4ާ~=ևb)(/wd̶7n }Qo8mSTۓv5f +4Ug1D1nfhtոi̺jpR߇%6}y\Fj`3B .D9FZCBM}_M=E9ʍ*T]D]@rkl=908 &i  xDk800OR²TV]vpT8Qj%#K)1/^+_ 8Zl5d %ȤP\^K"gq3k9& f1=*YdZHK|@Z*zt )C >gԉ*W;I&$#h`R`5Ma%HĴ9^_g&b.˟y.ܤK HN`HfAm)i5jLO^7HV, \`+T1 .ШE0W.Aj\ep cIx]מz\7CL&s3,G81"g3)6^pCyT@ҝ&@)2Sg4lKnf_,-sjQV(D Sgp>B,mHzQ,!ʄp!<63#J(3Bpa^xJiGIe(zP)a+$O7((d0vzތa exV=) |֘p4&GonGl(uA&!)- ^ 34nƥ[<ȖUBص`!lTCv mۨo 6g_NY5ڷDLlcIz(m6K.EҮ(?\0`jx4!25NI WI A6|8q#ދ1_wݎ;Gڬ 4VO,LΆ\>u~/B_K1;x/*~jm=|nhM ߌ]h(͓O:Ƨ+yΐ bgV+gV0R=vŎOᶁt04]l8FƬF?Au1fT.]/y7],,sԟ71nRܷΩ~•ntr )onj̫"D:NYP)I֜M7~=i ʖa"$u_u3MxKS<ٙ'fybO`NjW;]=E}0 |Yv0ޱeGQ- pw|Ns O)G߷G j]6D`cYCw2Do܍&~Yqz~ vfSU&;@7l-[4laz܈VsYHj$cD\bei(qݔgMtT@.?gШ:3Nb;&J㋲RpPggJ)S-w۬(Xm[;_7W^Y 1i]L~uFgiMp.Qpۙ䧾>ܝy/7boʳ,y 1 T'?}lfLӶZ+ӟURK'dИb*eۨ NL} öc;T6ͥo -_ E4j`*:67I/?q 2Y=W]uDkrae} s,endstream endobj 349 0 obj << /Filter /FlateDecode /Length 7979 >> stream x\]o$u}_H %p+Yud! bÆc<[lهs%lIlblCVח]N}}s1]I^\C@vw݅^.1o/_\lՋ?oӼ7_nմbnZ6\)nv]k~(yxsu.sN ӕ/ira|Ǩ0SCwiqKL82q^?9\wu۸ kuWoݟ>\ůh˻eTxs>lӒ/C gV-nA7ǒͲK7;Y_C21OtLG&~6aJO˒74Oisp{{a tc'__l^ݾٟwWo߽{trn+w&y/1`w[۹ ]n.o/b&$s.msA fmΗu- f--3 sl–ɸE` Ƒ팆e;iY=lS:{E^` ӴiA–2'.-iu`Ol B:; <3Eȸ새|y0m4H!)l&' Cm̖ղ[`m-.-Kȵa 2 $X MV;c4 4mыW&a[%m9Jq!3زD( Ɠ6Z9% Dmz>~؂hD\%/[`"#cFm<5m؂%qX.M3/^I)l蕄j'E0M 1vel) шyMƄ>wz,Xe<"} 6ڂ9ܺ0e[xkFyH,AKZ8YR!!e~D245˂(K]V,N@'.Xܬ}P r&F,AA=YZ.ੁ$G$TA a^IH X&DiY6f.4Il$nj]E{Bhz"|^ philA0RqGDg^EiAhH0)LL6qA༵8D#*@ HL҂PI30W!<46*&)F,3t0@b5Ek3ؚAZu'ٮ3O,A!2*Xx%J'$),5M(%tV 2L:$BǠ슥}ũLLK=)(wpoeBbbmQ3Ԗ`5nq7R~=|ڥB<P*Z*XRVMV#kIHIZ۬n%6@?_Z5»[Z 7XEԺjN=?Z[ D)VaD$RtDĂ *O2Xʍ)#ԉ#7oUr147LoFi0U2Hr-⼸gSBq $T @ 3U]YkDyu6ƥ\^sV'+'a3X1ɘ 7+au3V3Aۛdb$`ozU䘤!Ǒ4Ճ̨ #H3'8UVX,=PC6E5!k0A÷7O^]%^apk`OfæQ4?@*fdd+ jL.nAڸ0GM@*(!!Jk&C 5ajVtA i(XM>#ph.T" @od @_kLLư VsƧ Q` Pǔ8DꪈfˎoCTtF=R~GGj%,P-/(`f&] &-5 ug@L$J<@Ro&ckQ$ZM`Ci:8F_ җQ/dJ*ΓZ+/ʀeNFN F<-UNRq,M/RW| mI`UVVYR< !Y)ɲ\NI. z$YWfW-LO/)Ul8 ` $сSMN<d0tD>zz@`F]&jM:ȔTT)tSX>FKSaz)5brR-ij. <à}vJXh-$3q+Cgr+E5=ڷZ@/h$f? p%1:{/ \ģG e 3^ظMoZ)IH(&k`XrX*֤-\AE1"lhvְKZmQ9k5VY_ݬ-UdjK߫bګtjZKŁPfpQK | _‰2?Q=Zg93[P~{V,ȻǷ{ .wy; 'EM뻇SDG`2YQ@9-hs>;M48aIDAƋP$DL*'b!7W0T{:L/yinI"AI<'XB͞D\$;#eX;i6{3%c3'2LZ z2-5R[2-57Y=([Ŷg [ш[@qH&My#t(S#,-YNV%bmݬI"\]&}~ay I.p1Ya^`7 y$e¢LVwj_9fwOzU`^I-H3i-^( 6&j*h;Y>O"8#RC&H-&8Db^l?0YI{@s$b^Lɋ UVk6ܷrMUV@eUUҸ'WUYڅ2+R*lJt.zYS^N3MSRJKC4I SOHC4%{іb1%)NY.HIJ; FdQf:1K&()Jg+l)t.oa|F4f;i!n%xy2.D<¸ҋmQIl{H ?49U(JK txBF9.F> Hs1]tVeѫ Ȏ%y "ZEWXj2qp>ur;\d.S6 ^*%C BLtTKnkpHށxlxm`KES8[i[oz`5Ed;YJcP&̓Т ֌ʱ-Vπn^^ U ȥR"uƆx 8t(v-–! ؚ=>JSMA[)d3qq_z@`8CCv0]qcML a~8faqEcrr  oa3ɓQ>"Who_ġ$s sj^wi6ֺš6 3j5e(,=iK m97SoϽS5`m1nJ:7v1+ë[+E3ҥ1zZ3OlC2=r2/RFb$uE w;#Q$iF#`+>]dDRq)MNP;z;Rt b5s-I\c뤾j#ƓT3A=# FjA.Ty}62&AǢdJF(1)+JcMSDeVtdM I+pb R&)9 MSsSqq*d{ф弱~vuN `˖P\NuXtso-iFUSKsd+Cap0ܠ5r{,„t$´>,v}S}TRiY To`()M0ʤJCBLnHz e >P#+]n +D++<$HCj*uMLG!U9K3oc;_;#7jDY:xr5ԔrTFג.a-k*h-}vVGRcj xgbi=aZ=3DwbJ@PlqUae1hLG="Pkԧ >WU4TU|oxS|OsŐKv[Bm+;)v]/k{)E=h氖e-O!9Y:h 3 䢶bv-kD*N5N-Z<@vQď9EڈApQ= O>|} 0G@dsS2oHnO`J^Nj͡ꇎH$J|JTFrz|41񉞙;5sn-^պ^%.IֱE*gɅϳĖ>>drSXh7k>hefJqB5n- w7X#*$9Tao,:hX̅~?vlo|滉~w;kou.f4xżxӏ{Wߴs_^\_w3G9p={/6ڛo6 ho_P@_^e^ 5.\OI.*6"l(Zw[BuaW[$A|]/k!QcC͛x)?%wFo>T$oN>f^ e,~spktgOyF͝ SǵU[}_7{|pXjy``X|U~|cuw׿ۺ]:E}O~DY2y&+2oښ/6;$ 𺻱NfQ˱%LNu:3 nĩ.!'F\ đә~x.*n8 |Td'bWyߒDP_2~m74"Tlx ; C/y\{>yY_ifwzkRvc+/;Ӳ>|g ͛b%H#|WS8{Xo7%͹rhWnD"@YZ|ů/^mRy2zx> @"#" =jTp]|b@RnygPp iŇ /7u|/?)awVǻW^ퟮ}nVOu40#쾘|?`ŏzGF~?a,d$'nvtn0uۻS6دJ dvs鬩deT 7]lvQ|JХ&z'Q*d@t vBear9@*, iuג6z8<; uΚb>7+\r>%q-FjzA`3B95 uFOold+quߊ{#t]iTEW-y,:\J7\=7*p䝿9eGWQtCLQxZf%Y~4¼>E9S/O-\x$k]~?o};ݬ?޲)yϩЩxNM`Ws yāB83sZq[|F5}}N~D M;Te7fy_+hwV7)ԟ&?nKKw~K;* ,4gR{ ׭R_02ukA!,kmqQԛu0OrC ?e]#\ݮQtN$f"LK/>goǾ#?s% . Cnһۧ/[ ȔF_x>Ҍh(#s8Q}hApNl>f(1 GDzXC=j:g*\:.L9ٙx" 3>.RlNj#ǛT43Vc9zbH[ED۟ij3;O\y?@ruJpƤŃU\z)#=>>G& Ռ #խƑ&Je?Oqsj%"$֍-6Î+ʪ2`q2zl~Zsշ/frgȨq[@T6:fS?ֱvM#0@2$5ӄ:FnkB#z[͙ZK;> ,È,0J#$xB0Տ#o9$_Յg}S\>GZ dPTḫߕDlhrշqdZY{a?F..ݚ Afo'hj"pp$ڔ͛igڐ5f D67/7޽ck-'VNl OU~FN߾X׻ut#BB|$I .ծYnSow_o@oi%l861g:W%( g8 3YGe.E@GnO |9E$5(ː=OIP{1#mU*z--<ot7d1~t̖Y ]?ɺć˼ p~,{?i!B-[9ͣóQ/Q21>KocȗPLmu哓?id 3T["`9Зe[kmWǃFY~-ȍf};V*9;E=/:59fq evx*0d8;?[$-VoF. Gy9=~ W<¨ayͫЕI'r iBȏFd#ϙXOtqsAԖ]qv<ڻ_'z_>hE ?2).` \Mx( jyZVJ==u9wc|.a]GQendstream endobj 350 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3567 >> stream xViTS!Q nm9U[GZ8T)2aB@I B0d $@AA2Zjzmf};uZ]ɏ$,cX~Q|p.}EhC(nrݎ:` lk?X >K0[kO}>[~A|ZbTDdV-\\yFEĹa>$b|^\g?L$t}Շ!=cI >!Q$J>v/"2* ܶl|/\wv¼1X5cC(v LlsĜe-V`,sVb0{l5'f묯Y6Kl*l^==6ٵrr~}q (!L[_-س`|B7 aC"݋P54ylϲ/.NN\:؆וb{B<\},SKH\ + 0 2Llܧ"/ 5J qIbm) Œ` iq,#(`V3x::D+G\᫵:RykGsȾ ƞdJ0,_n=2w!>-⛎ghnSyPuOYC-SjtG*%ᒃ? P32?92/Ek A ԻHɽS?D#SC{=);pڧٚ|^jfx %69n@l-؍5i9D:Ue _29 9,QZtf,%)1w=.яZr95Yvr0p+KWͅ*y) )Sh2&DQUyoSjCM !CP"ˬ;^wM`j9#;GU% prSXt{JĺEvM⊑[9G_r ,ez+JScNͪ'& [OtP#ɥ40)E>L;XX EIr $8%j뽃.S(j:5-:>enfY".1ݦ`o1ڱ`e«(\!5[&Aā@%E|@\iDhjO=dWk:JU0Q5( ǒkT Ws䟢2հXY[]R}0@ ޥ>9OS貙BW@!З:28tP߬c(FǑhϻd4S%D2p0jE^ظ?#:hhXSR3dzhgZV(O" [C&.OpO\L]jڻ#TJ(NDǵ7/wA^Q,= !"A2KQ*u PYSJ <0)ޘS%+Jq.j p $XIbr!Nk}E١Ի\}@`^..U3WzqB=KbbŽu|NW.rhXѡ~v?ʴ%ނ%zp.#w#㓒qu"ss]]SSR]^?fٵˡJ4#ّ(3=Wj˴EHkorLJìYa} @TCv }U퀸\>ru'p_ T3o\ы;) o%' p{iQ&pg O۝"1FQq DހpCӵkT(S\G jYKʹ}#N:a{`󾠮[K~\HxCR<3,4( u? @Kσk TmeK17_Gpڪ{#Zwb_lv>8Mp}4Nz'A/c3gڙwhgO@t_3QhKn'c%={low\rX!{o 9pYsCymkf5=."mCOO%".7h[נUh/!vRe0]цydɲx@=.YHU46hW]C-Wq#dӕ12ZO ?-r{/\{O5c*-舟ޓ=?ʟ&̷;xӵ.|E?vSBendstream endobj 351 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 @vA,ta~ 8ʀ0%:t8K结ϲn(n H0hYhqgy⤼m VDyyUn!tfD]MmL#;/dTgqp 8榹I*`~xS V/nSendstream endobj 352 0 obj << /Filter /FlateDecode /Length 2547 >> stream xYK/^RqV|e)QJcytSY;6ɡC6oO7]IN\:3طSZ)vBדN__ o*K-^\M65lZBM/gXeJI.V7ɌYhEay7SjE]z9aFVR/RHr;㺲԰,&gI;cVpɷLfh7A [K2Nfsn৶O6:&f6j??Ǚ4#~~{-|έjcs*~fsjv;+ς4'UvrvHrـ_ay%iviê= }s!+%ΙܛȢN0o,~Z"+Mmk:}b\rJ h0ڻ ?%иLQ|kv8?.E I1Zh &da™zEW )֜ m^AeW362^W_$-Q ŞkQ!2::)!4ItZ*AQXP%r[ZX#G-ZOV)9A#;a}%4ꊂ{ ̱`MGFRWAFH?AXDgqʀ v3$EOBy>ŦT /,,9.N4 ;"( =\(IjM?l] ݭQ=29 au6T H>ny:Y_!~1vY(P>2)MRx,B/ݬZQ3 BAvE[6R`WNv07^t+]xm`5)u,qn\SɞDk~7$ڢkTkw 5\Jerv=Z.!kL"R`ae^,~uJy.M?6D~}P3{'X խd^|g$ c." %4}\n yui9::2u;!p˜wK8Tw]em)i:ӳ βg&~Ws}3%cSUJ0#zf V@S,~B8)Ə%,}QmJ * qXaZ´|f,Nb߹ @B4L_C`Tk]ⱘyLvgaHPz V*}哉!k}BEI|^ǛpRuIL1M{w \e."Ϟhxdy?A͒oA.~H/yQ܁g15KZ x|Ya<3 UOW>r:ܞ`i!*YЮ~}H>s|@^kW!:.F!AQ ^3yap{0b%1["/ ql,d]u>.$|eB %}f }{%H/)_$wxs{DVcs΍ihgn$;\r/ȅFuQyUb(+-Ld:s31p0Jo' n%# f~%~1vY%lZ?8eIW6 z#k"%ݱwmTvw ?l' }JرP(\Q܄秗>}-4E~`Ο&Ͷ~us<fTp|_07͗A޹0M>kzzgb{yE \WhQ> ļzún10P #?6f}ȐȮ}ĭev-"S;xS)S+W7;G=pf!wa^=NZOL #PA_<q_~}$|qzɋgA)V)Ś` NJ"mΣbo:XTPw'~vwhxS=ṋcBdOϮ*FQPM^4*&:,\)\ T -$wjtF`W)SqZ>0ZOjԊ^bT =& &:S+ΣR7wZwzZ/'%6?a(ΰ( z~ ̍Q10a-a4M !EVOwT8!ſ=tnÊu{6#f| ГsKՕʲ2=#ُLA scSqHN$`0ʲ/(a,̷ӃԹK& endstream endobj 353 0 obj << /Filter /FlateDecode /Length 7849 >> stream x\ێ\u}/$%Q2;!vfQ`-nMu2|y$Y=kYylfl޼{Eϛ/7As^6/>*8*o^}7If;۸y:{Ki:Z +|P7~D&mzylZ/28-oS,t6w ľ"Ƶyvo܌m8 O /r;-yQsڼճ?|b1qR1׻x}>T[Gb̋zKh;_ ~y:ݞve_^Cm; 0cۿ;V,:UA%~7 ZgfהG`h;}>uc:>ȗ:;V}ۦhY7>TQeW % ڒj[:J39_]븎׻{Z7mJ:ZzXZ/+ebn7Cݛ]c:]_c7|ޜUG:eRp޼o֢6"tӻ7QgU1˴pmssɠn8HP./ Χؑ-tcecZ+Nol}_Jʸ䒔_xWJ/" 3hqىd5L@;>iQ3:$(#xxw\agcF<vk`B0q:|꼽?J6W7}|d.-n9 V&8t4nq*4ͅ=?ܞŧ!%^9c\Ծ#Fp(9o$C?^^w?y4P&4ZsFT.G˲daEqiow ۉ;,@Tcۣ 9;?;얝C(m7`%alr'$@SiUo$QY1o(\u VZ ih\%!-%O=\S`OX v$іo-32B$`pnsu !"6>1E]3i}hE !<; A|0>9bt{8<)@Llr'IzPZа>4h B^wyWcy85!{AFOU 礤:9`|a:4t4ѱ.o)rξX) =5DZBⓄqGL6?^&9Z/ `Eiul|~w4ۖ|>x+RX9`rkRkA"jSq6L{3@2n_*o O … \ q{t2iXK=10&G#~u.Ui^KG8nAcp,ޝv=Byd'Cim4n0] t{Ұǁ]v)ͽ{5ݟw]Xi1gGao~%ᓤ0/o8(aĝc[_|g77.,lz-P bՆ|Y7&DWMbLR: zpIҙ;hv I~jk zm2T=JPdT)^E)%He<daX_pqRY#V;˟ۯtR$nAVRܪ^0TřXzu&)5 %8DG+|D]LS_NY Ւyˋ=Ϋ>W7vo\ex,ph0$?.n?,F~rj@z7@lEk=m(h%ي'k:JJJs B݀qCwR<_z",G֔9JwdV[Haw:cu@CÆ:zΌ2XF "$z&Qjvqݹ݈]ʇޞ۳w(s:j˗+JV@,p_aA,$ͻ;iZ<6KJyk 2+Rj?77m@@Ynl2y &9lbg$ ,"=R+!@" <~6\% 7ZHT ^O( Xpk+ "m#$H9fE\ϓu"As6J6/[ ]0bLA#lREFK* TbƋ$&hYZ &Gۙ1J$vZ["SdDbc8 @ VzX#&.T,ZBu+wӔD?9LJ@N,% 0Yv" DDp˄ ̀}_2azT0 :vMX5ЈE] K2Rvb ̠Jƨɬ2kPln]j_Q{ءL5xmfֆsV%cI E׉+(}$TD6o0piYUJxb)@rT6OI4EŘVBP{5c*eƸR)bB 6ä!7Q" t+;]HJK!\h4 VUqSf USU)j%խa䘃U;1.D2%!Qr,u0cj٠,z$#B5"RIZܙFF^bY Xq3K^cT8(*6:8eB0R+-XUw} 0U:j\JJ->k& ҕ:s`3N:3*LDs ;efZpiL%eQaG I3̭j$&ΐV ] =+&k=άMBmo&czҹkdT5W/$(+I,zw]]d,$QJ &#`JxLM %NH6#1>o}hR45:c.:{%kly>ӂ>϶` fbfP}B-ʩIg@}쪛[֮72A491O3YCͶ3w |& ΨI{|S;XF95vWhY^"fhp`dCXf&iQlWQLRIAF륧f+4 `(A!ar0(nxϚN*&dN8@,FvC]bag'pjd66jlX]!kӲw!ë rWΝ[ZreN̢ȭֈM;\!|*|RAIG+Ѕr_#`G \Th-YA4SX@0Z +9@r[JKϖͿ߃ ԲJہ2šg%$Ptyb"4=2 0?:nօiד@j;"ֆHڤ!!5cCQj$>FH(m5l63?7 ВZ[!8@(@ͦ@[ѥFqphװkq{bl((TmkPkQq倰΅ L>P9 #|.- d[zjnccJueT 2$bE$VxKXY7dk!-ڄ0?:@btVŸRKd ~;X *)V]L?\I]!(FPncW\\*XbQ:X񆲌RdTGGTQG\5o_؟ 66VfW_TfWᢴ@V7s7RW߼T,Ub%A`!GWR%`) (Z=xoViҭDT;^Z6f^vJѪü${"sn~zldY=[XhGm&6]o:g!mCA3ˑ;ϙ!E(S'LnR*Zc_HR[:uPGJ K~\BD= o΍ ԖN=v֒gA]&YK!lH]KTT>,K\+A;^H`4\Gs('@ϻ7@8Q}̓M^! $dxpp,!`cq' HnU d@Ϥ3:jD2!k, bU&(N'BPQǨ25Z8E :ε `t$n1ZݷvBg; 2tKP1JJwSQ,7Y Љv4jkh#+В%kN>K:1NH6f[:mB b iBIERzϺbSXwz0J[-~%Z8H%8]:d8]?af;dDlLNH)x͋ H pL7=TNZ+xؤ#D[;2 }#R9&FF3ZD7H?-b'@2tL. vLGS=p$q&!XrvJGؓ˴SPZXKf}.[:h$`*rH}Վ̾C4\f ht5rÅ/GA-Xyvk2O+ zR;vKK9`{'k}%Swh {;iḜ)xiz$9ih'M]8uԖ_89,C{T+AjU OߌriJoU#J哫.y,y_'\cYVh\n0K;#mBj j5N ]*1Qcd;: ^C&SM K~B6\C4Vac~'O-:*Z!4AsQ |Ӭ`(Dsm1x`}<9bX%iXюƝ/#n%ZIMW 2d-Gĩ+JRidIsZn9X2O8 tOyG \>W)b_s -'pk" yX Ǯqm8abL j?qpՏVܳ=~,< =)twH^ifԲ('0;Mr :H\3W}˽Uz~x:Z;Z_ Ƨ\~K`iI'BWoyE:i}24ixG.(9=#wKA-Iךv7k1#rV q[xtʫ Tʁӧ'XoOJnM=u><Տ0hdՇ{A$-:$ۧR{Rg}wPp9wS/}{sF<>x-GBjqF+8nG^ё N[1<] wـ7|qigNXk\#⧏Otp=rA!m^}L=4Kc9@yi"jfz~=W}?H*]Te׀>G}_Oj~6Os+IMO8UW_'- d3[2> stream xK[v A.4}w= +O~$w??-e_f}޿yY)av|R4;Gq[WOuq?;wBm9xG},Ǿ3_W}jW_s_N_CW4^'O򡖻V|wi]Zx(˻{$ӭߟOՓ!}jeO=ujm-߽|#a==/e3 R 6鉞›IxȋOF:f|Ss|xGkψJ#s%zW4xH}ڭaRе*~$Yc_;lt Z_:*溿]ǹY7KK:p#4 p$w3 ͊Qrd;̾60!_[cJNj#J|;sϼ6>w\~i^43ѝcd 4fdՔ~k2d02W j</}F]oFL¯1<~u]"U DV20g?=/f疣 n-W+:5tc`R#'o[L\j[ITkkOދ푽 'p}bzc`BoZ lG=ޯ_ymrҿ_=2}z3>1{s~}>*F z:Nړ1XeRi?\[:Mg?L3oD#].޷p3-h KAhVR{${NlvVh#~Ҥ} Ǟ+ٱgY[.^tcK}7yC"Qy]P4A-u/~p C_O4!^(-H'bfaڷ+9Ao[~2Ir7ܷ ,# JX( =Ys(eSt&J ptcd=ℰ^ U4 =L' MSoy?! CUfݷڻ# =d"սw!GL~ߎ97sde"@0YwlјWa9D푅)/䗗gt[$Y v{f C Jq~q0[ aIBh;~n9f/Dx CYx KmQ ѝSu@<1[]N<&kOz)6͡Ee4PXax U)8=u`:bk1|0k>C fG@)NE2hA-)-Tו`{rJ`Ϲ5n2B]e}{H8Խh2sld7%#F;*Ka3&Lho@hICi F}Yh(NdE7^a:1Pt^P˽ 2FŁZJ jyNW7PRXwm?fqR^5JXLeZ  4W@E_z&=.VOwaӢ7HY>E%.@;W/G[L˽,#,E^Qe%AVd.Eh_qɠ [NF!.%x-{(Dr dҶc8ed3W vN(V:|,F+#2_\8cp26*,dΰ+{:\E0hLu!*c,EcX~I < `ʾ5zbpc-M,A L5&h{Hs;E(w`7AvG/!"Pi/vAA<@@YFxj $MI55cZ:%jP&!름KGzQ§`'hk#CDQPY042i殰 Hn$[ܑ&H]ArC K"+#{I"vCY2#Ƽ)䘡>W‰yiW98Y8=('Gb!.%B݈`!#(Z&'uH, C2'^ (mIW'}A0eգA+%YBޙG4MIi#Y)V1`6-wlj#Q87=P܉<p@Y3PbY5fHqYGbzbAJM6CL˳^F<|ᝎ F_fm _}WOcVFck8W0a/yIQ˃c3a HuKzJk1KDq.<2_8BL#U_țL` \I$J(O0}h/Y*8`j&2V Cj)"bjR>C-N󕐵GNE-eQƘ&N[HʘMwSB(R}SHP tq@B[wq B`Q5a2Y4A*<(N@B*1OZEHK^y3s)6Gmb/'L:M@#BXȐ\S&:))iɰV Ew9"FrEAţ)TĈ$h *CEL3:=bًJ JZJS `T8DzBCx O5$pYv!RS:7w: E:O kJ peQ8Ŀ[NrSgࢠ ~- od71b\2YX!zXk|/>=},AJC4v5X ܔ^X¹W+8F(XeVNyGi9H#?˾9P? '~IuacUh(㐳ǪEi.0+ƍ.h0Axъ@#`z N9!& E)Ɓ%UdOOpEF͞ /JThȹ)Efs`da0q4߁!OBZL44p,fυDBsa=X鄝Μ>l2.j$ :8..(sᑣ@45rQOkp˸"A)P@ҋ3$h 3?@&1mi5 tՇ dK.?܊|@TH>񤬗À`NV=7RkDF)ؽ)~{8S$ijBȎB*%YAt4CR<#*#YH1e%`av)4n!RSOriHڴ^ؘHaR=YRTr1)f .L 9$pt]j҂U *b.I=em{|QcPĈT b$:,1R!(j!J`ר@(+@(FfH J*3lPPZFzL!,NF*@(^A8Q,%ܝRmKa Ru# 2C 54mSXA%G*#qZZ+LܓLY<\ѵ?Z=ٲ  W J=֐uBx,l>yv5`i!劧YġV 4{I_]9Qꂇ8l?ߩ'rdE,(lOwUX;{G<m ]A[Dکԑks\8W.ÁR֕9Bi`E"BQ3/9?*5\^*4J9{9+!sB2԰y.]FUB-e 4OnnB$JN Ifs$f蛑b%@ )p7F1ȸ>Hp"2hWFgmFN4Mj(#oI"iK˨ !"$/v:[#HF Ĕ:/J0l%J~f"[nIIb3Yʏ6c[4B3l7h܌`@P~1=%iZ%i{_FnDH##sϙs'vl}KٚS;5FFH9=oPI?RA$(P#\AiDkH'*DҰKH'E){ҬO+:6TH _ %i$"2m+J-(nUZTx*yҐG94Jdc(${{KZ Bt}Cٷ=KXޗj&Bt) B(._5h@߶HA:HP}Mۆ9md:MU%hBD t}t}qH]Ғ!#9{L Oי' uaD?_QփS8  A!? CH&΀j "C4G_雏.?$v6W('$xAGHtHj,f(E,A0~ߟińAmFzd+ɧe؎s}8YBj|p&xyǑ}jx,*F#S#bx G5$}Qۍ 3*F rD@'ȡo1H `G`No%he{  <kfs])yo8ͥb<_b{q<^|x\lw, ; .).L̇3k:fgil%.Vm?8gJ!r$ c^;VGN S~@?ߌlAm@l>ywGˁhc|m!IN(VXZe)t=UW6CE۵!>B9[ htM!,'f{uiA|Cݘ[Ѭ ÙCv!#LA QoMp)ᷦ}Я*7 6ķ(7-Nd:J3osb^ZV 7ͦ5ė7x:!#?|-o~A#VY9C[&ޭ~~F++ڿkjڽ;C!`?/tCyY@m:m%]C];kJA`d™ #$|^m|$MVijȚi1a+EB9sz\ HƢk|φ499{0!8dt$2ߨ/q=GX1#qN<B*ҔfLFjP_*z2gB/LWH&W('u62hjug>oQcms* g3 иCA;,ݩ ` )M@A|S@tf[~hN0mogf Ld^1 `֌zg9)bĠ_%E &HMM@&Kڐ29Y{om6Xr$ '[y^O, :ڈԹ# <9Fx5Q2xt'׊+8Aw{(.yw`{5N 96${9$ћIP/.6@+Bz\jkঢq삺[룖 z@ $/_glu5 SHu!٪&W X-_Z: @Z- o6GKjћ|(9c8W5:dt^ޫkFܨR(F"A˚^m/)1d _ ?F}0,7118pM]ftbұ Ʒֱ:F2c,mUVԊG1$jAc%=ZKX{Xkńpc*3;BgZȾwq<@l jcǥ=fl 6Fb Fz#`Ӟ8gVW/ƠL,X!#< ^ 7aC@ wx[ҿV {fP`$fQ@5@Ю(Lr1(h24Y!ۃAF%nb1[rj? fzΫ{@V{:4pD={=1J1P ,[ ☎:j#ŀ_E^EQ5xbEZh@gM3Ҕ(6n0K%ȱ(-=;GahC8jB^e4NF FSa= 5;.˦kIF]qa\C}A#{r^-*-3z Ó}^-]NGLT6Aͨ_ilWqq_M(UEEb opyb4$nny`Cyc*ݯ䷋7!$P&mI7i䈶F$6ZjϿC{%(_M|of!rDR|#zN oq0<ɑ帟?28PZ2P~OMJ勅%M@1bl8O |/`  p@ߝph2ЪcVK 6fCk֤fC lhqY yIo43Z6hh&h@j5;.+iY3^Ɏ̆;иuϱ3ޞadS 2b5,fe#ZTn f5'AеʹY ]{հ)9#Q3T4/ aiØ_N ~}5Da[̄0maͳYcêC p:-Un P tLڏ '0m@u|{)ˁRd:Pwl>B$ .Ύ.<3,h&%ٖ@`5P'S 85/F(Ί)wuXhv̉ӽy1=̋վ@3I">%=oЋ}1uUdt>[Y[NŸXYgn|hPţy1.)+Wndٔ 1̀wcq4ϻ 9Kb6@_2=$Bb=ZאX"'K+VmEjJ&W(K ϳ%ݑd.y/q퉾r#z5%Mq7~p AJ҄̀^m=E*P@KNJ?eTӁLtK!q{49WKZj PC/ X -һ1ryP$vxj(7k\t3I:nbC*@F([*0hXj' /!Eǡ MDY}i([@jKGxߍNk! Wc qdf-P`AO fz bfZBځPmvW(VletY %X-lz'@|FXA]Tֱ@lnvoF!b)@rNf6'pdve"|jW;kIի4L>.vfʒ\hVt1VGFȰW;alf>&qC4;gX`+hPY;m-!:oY-N@FGDuyXgCaúȅ{|qz Bb'$[섰AyV JH"5bYI\A]r<_+g2yL(M ]4p`3Gb8V'F&w6XoTKa]͂%у´lO34SZ-ۊVGnVAoZ(Y]lIXhnTCz !n\bݮt M4s78F\+PAjVJ5BblV =#$ƨ )9[͑ h&3ڗQiCjϡ'+Iγ#Cb@9s#j~NAGr9 |Po߆EN@ic?ej Lfc4 j ϭ ' +P1\bǾ4kfT1S#݀k!1f: 5fm~0t/Ȏrx#wP9@j4M6C & !iƦMԪj*g6>j4mѣv@v<'S6b` F=+!FA !K!Pv?f8@hڜ {f4dM_ٵ?aġN,O#1eځ!,2@\#o܏J4g݊'5f:`Y+,9nLBCK)tٔfyYkǰ%6wcNh= ޻Vq\iQ@ KadL`oFŔi_'OY;4^q03$@w;4.JOsM{ 4jZ ~Yq = bQ&1Cʼ3zqd䎀b < 5a.5JD=8u+8Ӂ˶ a~&\ƅ ; edIXb tG,GLyj0B5sJ7Mxp\(5hŽKq}`> uؤdVxYӤ:3sv(njZ+* 4ot"-hd'OB؍=PEFJ(#&Ncp qP}5|O><|+KA=97j)@y+ӟlG 鐷R(I;~tFSCJ][Io*ѯT+?6a!2>|+0>܉Afa|l/CUϾ ݥvcsWmU?WNsC>Hc5tyN;aؿG}҅H(]*%=׵tL|\{(寤U|PF1??Qʧ`Ɲ؏~_W^FRB>J HJ@>uL ZTPQ+xψB> pq7(H Xǻa|/+wh*W~9GFI)0>& 㯬Wpn&= ܵ=R%r U4{H~%ɲyDV0>a#}e|X+~!Ue7CUOV/ 湝Pi? ?W@o|BAF A zh>{ƞ.~1T1Tֳw{Bޅǿw=<{+K=xDZ5n-"4ccb1ecBD}s50`HLA[?}{O=nl?zRh6G6O1x򾴎9ӷШ t@y3Co4A\ȵwo%}nr B-!m<3S+'}g?\?EMdGJR[=]/lm\FK:f+~ÁϾorlAݍj -\tc/FlFAki9 gPV6_ & X𯔵E \JoMG=:CDZQ_Umf Ljem$ڐ4G/]Kv x`ߵ}| >o[R(`d߭Ր=VJh/NK1M!jx%WkdRS+$cZaH %&dvȾeTѾI'W=^vD&٘ψ=@=1oSwRItl |@K!|7|HA=J2Ċ|nh? A{%I:kC@~LS9~J0pƸ ?ļCP7 ?!iI:(56_C-µ@Ezzچw 5'VG9mǃ5C.t&Pރ5` ߯EnyDvd- [?cm0|,Um9K6Vd_hdpu9WAkxj7ohM')=u.#߮HZkOzc3D#o KqiE_FtK ~|҈?C&lVhEȣa&qfBwaf!Si٘}i|YC2;,WҖHJC292BL &2Pؘ]1ksp(~^4fvA15(~JՋ1ٍ ,,\O#a׍֞}@jW3x\705rHoL=MȒhBa΁0ә0-3 h|worlO#y.{dU~G=} v ﯵ4~V~d#M 1@؃ALQ#Eb#gVtŰO#}V]H:Cɾ>@{@J蟾CHա`ɐGd;Ce}=2c>>->. ?QYFAϨOT:V*tt&mz_brhy\i-x7D$yC/Ki\ysDNO@^K }J#R[: W(KK{s[Bjѯ7"cDFȕƘ!,PUx=+r?_oQS?<]FcT8@:7pH.MH~yh9!:w @쿬6cyD]VɩC虠`O#_n?9:T4X>:@y&BAZ!0L1yI`\0&$`\vTv4k] w47CwBK 4\8GwqS:M{#1=X e%Rf)q[~r16HAofbVR!?CFK6im/ilCR QlA| SZ #-/gl%|DRP!~֌.9-p '@on֒{FZaPXUńh/E@{DoY\U2 $Ӄ.޺M,<{R'PWX685;[>1DK2{h2*{9VҦfLm\iVҠS)}HH^%!TTk=o:' 0$Bw=Lft&@do3uU4Ŭ1f;jchכ-\+3`13Ou_Fo!9"]ьg~{ve&ҡY/̬fЖV@c8FH<4"aڔ/jV@[Zvq[;>䝺KICl3?6꧲udyZ_( YAh72S @@#ȼuxBJbh.VFzIjHN 9R+(q.7ˍq6NGNUbqi*E9]7 EsPh7( ɚoCuj\yblC8c^9ZJ;n>ThbF4q.YQ--p&,|߶n4wp.ԏp91{\ov\W)˖];İI2w03:WGʉ6vΙVhG{@6#`%MUbgOnQv $B=byߐD `bS'u^u56+`hz1jʷcplVL|e~RqRl ż݋#YghҴk ?4TMǚ3$ͧēWj$bAԴ"?|_,uh=B?P/W+(CuK. + ?sȏw>$|#˴3G#"?31"?u,X J\ei|t ΅^ZM.#ti;%~X_;@[Uȉ=V={k@F zH)>S|p;G+Fq3/>7OZw` =)cx/-B6`do#F`xH~ oI8gC%}lcpmpqi08CE;x19 <A^,? v m[H55o5) p S%(TU9Y;K[A~e59m#m?"!q;_m7pƦN|@m{,Kx88(eVG>pN =7nh[l>=8w6n+Ƿ4m8[ 8~i2kss~hR%Kng4-;X[)~ZJRopXKQl>^h6=JqCp|wJֳ0g8HBeK"Yχ mI),6 Jjc9At_w*ÔQ%iZ0"2{O6L 9kٴX:-^L[Fgm5<%,P4;Ah~v o!Ahz=t8 IcO3iC@.s1eW^yz6 ch%p0vf:[(Q_+tm BwnxV$J,P1NmrPY6Vb,U2y e d S]b2?!o mS3Vml^&;&!k0Bd>MU 'tMO,G--Zփp!E8'4%?nk67f-c) pÁ6H֘ŘјA)Akn`V2;mOk$! ef|M}el:|-qC4mE6q8:D4I~Cw':BSZ]G&^]mL9 C&y<)Mۭ;f=)ANNk]&AٯWe桍{{JSYw?xh~ShhZYePoimgo$aY68췺z'~삃87 ,Fk(P HׅOcmc{ݤ(ăԍyd+z @yxS>JH! LvW;z%iGiD"Pm8ƴ ^5EM 2=(c6N35^fk5dJO)58D SJM6XJO,r [e95ۀ;tN[`!R({'ڔ6mzT:JOG陂Y=zЦ9$~=5qhS=uqxiBT Di]iJ|HWDNxn天<4(B*SJ”Hw+:HTZ CG.v&!)W,NAWo[W׬IƝ~UJY$JTGq1X҈Eh3R_5)5+rt'QSf[5O^jRlK[zl_g[!EĚT*VNtɑ9jM\ j+ˍ~2j(W+:^{\ P%4e'&4?D=Ŗ2Nyd;zQħmBЅنU ="7$Vy>]ix B{ &\GS 1mPIާsvBV !ͦ<31%ߎ27rVJO+tVGct%;D UB`8J[[P# SB [QxU4(%lK"TEn`:dENYXN5RJn9JˬҞ( fPnMfP;puFj4QqUK,} HHZl{`ZZ7!Ɯf8lzIi+J^^/P?L:߫Kp,&P嘪caB귚``Oh29BTA.V D]w99Nu^n6(isi䉶qy v/>ETc9-P'S_+$b̈́#04مJܧ]^aK.LTF1iMtR$ژyIrZ>Hck&( on ?Z!XCW(S寻vA =$GB6I&En!PŢh#1QPlR rE͂WǗ9K)!10ɚ~|W`D h\}KN<][n[I7$&9##ʷ EݺTz[q;'0'C5d ҥo`vbg*ID*%#Yu盚kёɛqgߝavGC64{{”[=ǾؗӅ,-^zp==8BMz%/TCۡ aJ&ɐt)܂Ջ/$DPd5"2"zE”ܳռ%#*L!J";,Ra  \AR]IQV YՑQ[̢x׫)G8%hjj>& dZHH"utJAj{)}H q2z|) .JBs6^!٢q8rk01`Ԃi>5JMiutv#A jH_U&9B4 /AjȃP6^AhB b;y!4\!" :½v"PGjG&,XnMj hҲӒikUYq>T oF}n^44ՎRsŕ$фHMYG\d.MIuؙˆ%MR=M%g /809yV,LTqAg C\0-YX2/nYEL}Weɯ4eM[X,@Aŝg \QCǽ$jְx+猟Z'kR4X9KUdɤhmPY*IBEWcyERIChK.:`#j hKSEɚ&D5 nml~m^yIf^~m&I#:k3K9%g^RfRT C^IO.ND(ٻ{4xR7չh댋;B6ŞL :NePY\X~r%$_+[ A[X GbEUT#n$NtdcRUt uͲ;$ )Lˤcnm}΢U8PFf\&˺eR[uJ?;3xXd6~Hg&`ೄeDedSQ`ܚݚYOI>\⡔u?6D*o*B+L0, ٥۸4X D RWly t v bPXJ3 O}0'@7}duU%ɒ[ Oa\t\awd[I݁._~M!"c'd'_P׶ʾ|zFG{c'ΐF~2׶8xF} 6EzRGvlᎲ6Q[PcsEN+w]u>'?XA/-d}~P"n6b?/l*-P16p]w!U{Ry=ağ!kݧ=嵮{OŗwM_- e'#kkyg8.F2H w_\>w`)*'F-~?y싟O}CO/ғ/~gx7|{?|Gş{޾Kmw߼?}'t7?'wO?o~ooo'?ݟƽJa8Лbt!2-nih}t}|XJ=Cce:7}ɽ ! 3r9aґp?1Hz:B( Gur^xdtӣ |zy>=?uOϧGA>?['F~b=dǯ~gos̴g*}Oa5A,Wg>3gytDBE=)Ў|wpQ$i޿ 8:^NoKe)Cm_>{Q[5z~N𝻷w^_ч=⣉T-o#7$Z]sN΀|yb;^ )/M$F}ˢ!A\GP>w^_pkwwpU_5?뇭}'ǫ<!'_E?g:W.G|j}ʦRۍogw SL+}{$H~OgʤhMQ'x~`}ko ';tV0~^Q{V&?ŸzUpN}zͼJMyL?lt廕Sl_{eD!W;pIP7{[s' )=[> u>>7.+1U?C`qg7h)'ߋ|&8Em~ &MBnwnNk?tdTӿE3|^:{ -1JRLgE![J qϿMWfE5nO!6i/~wxWs|xGLYjȫް'%ׯԺ@J´P^E&'Kv.lW#{{W{SDfx~׿z'/0֏ʪ{wv q}RonVxryѻwo)M?>[ퟜß?y׿zso9FZw_']~{rw= wF½Q̼ytru.:kUl3פW`촌G,mM^'nG @#-n*HW_*>l&`/Wbõ+}k0v~PowQK>>gh` a[nM7ۯE|=X& ^aX~Q!o_@_rZnI?vh/)I=' ٿQP9>M3 g/%vf!O-BqF|_^cux/[wID}vfb^CX >cWobo:ݛs{3XY\=^Gy|bٸ[@|pД1>ؑb͋Wxߧ:;6q7._|[{rQÓ̍tw6vϗޭwO!ǭ[jj֨a?oK?g[&~㽝t7"֨s!q11ԟrlSLO43MWDy6 >;Qt4%'ԺI"f_o5i=.+ϓQ~FW]}K٬F n}i爟^Og_7v^I%"|dMO+(fUDzaO N"m|xU}ntAoo Ёš䅍}>`p `]c]Ϝ.|?W c+^uH z޿&shѽ?÷= 9Dm ƊV 8VڶRG3*_9aO,9}AVچ4e1{qQĊ",j'Q$oy4ǸqNYϭnq\^2;; yonyQ\|WmQ~s Iqᅱ mɔ~'j/cK]sM~Z>>ϞUNIg?Qf/+Amo W3q)~? Z1E)$mww qTbrO_\?O \X?*_}x⩚ǔdnygf?ՋҌ?mCN> stream x[[oƑ~_^l$GE`FϹο87MS6ZW4Ɲ_ eW`o8`ٷ+ 3jmU)`@Yx]VkQMz6Fo/[&il~4uVERlU`Ucva!غΐxc\KO...~5_j62)J|mtQN@iG*]hFCZFŵ (+x}>I'j <`'+vhD"빥Q_ ѿ :c6!H⛦*h AMֶFꔵ'K:`?ƹ/96`&X 췷~_Vſ'_^q;āuUpY/zv7w [g5k]3b'5/Hq|DDD=v_!/W$z V6X}w)?\ߵ•e9.k ?'J~6as/k4[k)tX"q,d@iƽs%Qn8a#}]vq\^2Wy7Ѹmwy2H2}<vYD4x#%pa; Ie^_o4JFMf=?@cjk!"[SW̩>*G:WܦiC@Ĉ x\݊dڲ'|nLPp'A) 1}h6;8J6uUsU\D[CV W*?\Է1jtCSb5-qdXurS. suaPaV-WY}8/Ga z}U‚ @5+ʃ-k-X.d= g?&^d.~./XUZJ+>cƤǰdQFU@)xFz3N\Q'M!#DuKԌ+N! q'^ECDzg KySFMš#<&vXe LE-~We1IUPQɷMA!=fҰ rbPD!]z6r U^&A.|=` 7#"=I$3h$o{@we6`QK lpHB:"|^9ìnDYRn@<> QTwPXUNN͜ dC*yo+I'M9Hv8P$B<3xÀJD3 ص"al-s@r;S ԎaNQnmS;]y)bD{+Y]a*kr!|L8bf/2HRQF-B'M)3!7m_ѐ  + ث|lfk8f$"jBquw!~~ś;lu: A I5eȑ{ȖKCcY!ڀ獛j;Hdz⋿dࠂʍi=Y7 ͡-"}u ے.^MYnV+pfcmHFKwKȻN61w8ѬOk6ۼf}BtAzdZg:<2`࿌P%TX|ضQz#z oam8H@ΙL6&v6SFtPx܇Ubx8=ӨD\`ݵ/7w}? NqZ4p+XŢI㘷Y!Ožx'u!cz92.qWUݿcOǪI2G{U">md٧kZq*\ ri9p /52BD)aKlT@\')IVN9$ UC$f(PZ*Մ4\4'Sf!z-m7yUcPƨKuM!k%ԉh!7+assMl{P*E~=\ ]y 5wB:AA_ ^zaxLQ+6+r1LdUܷԓm+s-@Uwѝd#)]Gi"Fafyn b!aZ}<iOɌ5xH{ތ% l.:оJ8SǙa2phe/,-G6 Dҹ%B`zX;ioeAN p| Tg vnb:[(/v,kka?V NbQQE {T6+lӱ[cnЁ@L~a|Q$L•"aȄ .$Lϝ24/mNEōj $ &"qpBŠcG|9x-NJkNJ7(̙ XIcB^xt9-~XOɔq銔ĒJ'vL( _J?ݳu]fO0e-wQ{1RyJ լU'[*n$iE')CkrIYt\ه@OG y?;t~@ P?P:ya'@|X^G6+S$KdܠEpmcdIn#WOui`L\*r\:n}ňlJn%3jU~t(_OymԤQ}Dm$\O=Oޏ~8eb+3s+Vx])Dd.ͪds8TdWy)|8uq }1L?ĥ,oJaMTs ٲ~sEF|}\HX>H 9_G-\6Fv&|'S@䖺DݽXF 4gwDڰ5L~zD8.93P y'ńpΐU<}:!]7wUt, |~-t?ю{pN0z:}4wQݽX(7NW7]NPnLAzEL*GUS0މG6xYwPX-u^oV}/meE#$LBN*"];H uf:914 QL]NWPw/!"ő(?_NBj*΅endstream endobj 356 0 obj << /Filter /FlateDecode /Length 2130 >> stream xXmܶb$(7UWIIqυ p5hVwuCgHI$u:AAGry8/ 22ƿvaAwׯ(3v ]2!#KLxG^XFZ{c kk5/k/tt.44K8`/8:,}w`ɏ쓮䈧N1B])tW8Lt4A L~>}"\(!5E7+8w}MeW! po!,CXӈS`8ƱQXE1l7$ vCdUU}6üqA*tOFz4_ ^H(Q# g/hX{T΀IN 8Z#@2qHQũ0`8FL#AR%09!NiOP{t:1dҫi6 l7"/e\J7T>T y:q2T*!1ۓvyF9X[AcIEaD{oq B&)fʹEa5zivB0vfFK=SP΃;\x&r@ګeWX\B!'ʻJ}qE-*ܵts\m_!ҝع9چ@m{)v)̕λq%-splw[VmQ !8f,Jt ڮ8V _a8x%ba؞>i{w%ZCoo-ٗ So_%D(BYy{1KQ^o/D.!A9<1ؒ*|wD"&`U0|+&$=b^2 (Gk" &RR4pW"},3 8D=Q ]' /CHD,+݊)HQ!Fk$a߫2""J,?zHT?J{ &[`K #ھ׊؄ ?>un NɡkBJd0L)3i,;kjEx- ʝ_=[U&y)FEǑOѫIXdȁi@b@)̬@V.gdW5ggdQ 63rP.@P&I2S"9i}_@41- Ů;USM_&7BPW W'E$$xAsA39[W^qi'f&c ՃA.K*E'\1!R**Ş^l)0zQaOPb+;sA`a 81p'c e0A 1fKGtn<ѱO3@4ƀk9t6&'ũ`> :4IQ]*uZ$9Q2v\6' H{%I{?Oނvy> stream xZ[o[~_hx؆'{Cn \$ KP"Qq ffwϮLHa"{7ߜ=?ԬW/g󳏾9W/d^%>٤' nDŽ˳紐3Zۈ/k ~t }I:4]_.k͜>ʒMo&9#Li^N.N.o1?N_=t;}g4ų {:7&cÙ\zv?~y*V-sw})}y:ի{ais+sĢY͙`b3i ܩ5Q9U*[Dd &YcF(zHH 4e g{&93{.GJ䋹Rʳ:^+"= D{C' "Q<5%[b ٖ%=ȵ$DcX9 CzrՆS#D$ ԉ؀auHdk <#ԉc4,_m-t!f!!4+ $x'z` $QDtBV >UC+BHrd"K5u2pD)c&dO#mdMĄ!ވ,HR7JCK pi\ >L,g@C7Dٍ@!@ E`GmY@rC%"PTG;" A2@:dm؞v tqIiD$B#, @Lu(^r" \SHiR r=ҖE( X?IL,\%H1pA*7 B$!B1A!<@kjSH^q $SpH]A&˲% т%֐WH.!0/Kd jH,!eLIH\SI\)I4 ("yw II( L:VhCcߡ$6* P 0QN^P'8\ԩpIe;RPMENK% C !Jsժ VW-Rja U>9#>uk$V n%9ʰØR2j|IV|hc l@0]X$'kc VMR,z"CtT !*d"@LT&##V ]x v")x # W ae+[$LA!`@lSWPVbJ"J^ɲyJYy>$n, S4bi UYV2v]- '1 a,4\ڇrZ %  Gi 1Iwf ]u@kW.sB,ɲtWլ`Fbң9y!sȼ4_Oh :~oWs0~Uփryh|OJ~&cQOĵҷZ}~'λTreYbZzK" XGC6q;?U K秨  @-C FY]`mSukrlxAP[`Ʌ7T8RODac9j SƒR+.Y_PCՀޞc<(Z<T+ZM<1HWKhU\$ľV.XU8z-n8:Q+h 5}D+1!Z#y_ѣǮE?ERxAYC=I[=pT'25IخdfםMTYUˠE * j6 =E1_yC&CVOA>UZnwP,@2ltyu~i䆦@U@!JxnQ`xrwd^=ŊCHz*w)!N88dO72q7mw9qm:ޗ9>N|EeŅ_A.woɉ۝p"m4 `_4{ku*^ i?S6 Æ*h~z7_>zK^?ʕe:Z曰6EU lڷ-D ]i(3őִ~ޅ :6p}j{ؖiގtre'|y?> 9(ӏţ1/ c=m#Tdj;}чaQK4ݜɃ5wÄvzzo[LY>]oy`44`F:eon !2)r?*ۿ=*><-g&C5OL;Bzc6ut}CY)NY~txw|/!?ՑKo~p<`yUB;YR&Zow7f]#8`݉pWtŚR x@;:H$*68D- :Wt_Cf9G^g -u:0M^_LYW^L ԃ_yhḽX b!OOQ7%ƝBzAߞ}ߴd9)}}O.Sӓ vdyI޳XSfoZH5E].Er%Sg̋Ru;enh 9?9}?^LEaI.(ۮV6r$0L; emYiyTqN!b7ۭ=zStRe/q}M{S"d16E`PΟ7kG*()o!7ՒP !.bla\*.`]%Lo: D$VE*; G!ńh;H#6- -᜴_AKUgzDSҢJqb@ODL331\oV;{:f|p$NT:gDIA ?ҹ_^E0e4}یu,/e.F(l흶e6wC}迏z/qBMXR>5jNϥx,m.DMwDnX>`dHWtjRZd`DV^M?k/=UzֿkjǨߜ9AJendstream endobj 358 0 obj << /Filter /FlateDecode /Length 3359 >> stream xZ[o~_h (=AES>AErco̐4'W9 EqyBfwŗk o6"L,m^^.VWΰXca񧕂b*6% W[gEY񡒕aZk6XuHj#VmJ U)EZ*0_R|wMTpĽq|6/VkUOٞf4[<6jXǫT~J+$2 S~Zrr4s{ تd#|` 2k؍jmD9+69W(u8}r>xQhX~MK;KQa?Ya-@h|}>ld| $DKlؤMMnTD DwTdp"bSsls7oWoe8'_kf[8 6bh0a&X*=Az&lHbߕU1*lrD3&rF}fjQ p(kd|xAu ,E*PmЧX)qc2Wpn K71fwv\o>w[bJ~cr )9F&D/J9>xJ@8mrM(a|{ ?z)j Z'a+m­w~Al%@R]ߠ[h9cK%2Ep#qC#NqVH"$Pr~WӂL;MqPUPhYhe#J0\Me))dS,iʿ10/w#RV>Oͮn)+YOK ,X$mz$+v$%uDk~,&5o~c Vd690%z1S^Hm#*}OӸ6!3&=Dp|;LjxFnMUh{.u4,FIsمB˶5^6*n,kmOvR~tpUhq,ޥhC"7 ƑRyZZE<6|X+/ְKy}(p-zG9 YFbÜJTO8Uw l(Z譩^butM)uUGSg!^B(M迂f)䩫"ܲݫZ_~grE| dYJbA %,V,!#Fhs_ '|;e{My$uH%%wi'b8HPbhSۃHj^Xȋ>xhw9/(dP~&ze]N{H1(X%LjQ8k\SfD'VבCexۓWƇ)юJIwPTuކ6Hh_uiClWg5ǻ ?<#̮)):%5 *Pt&SCdҘe{eH1X!Ӯ!Mxϙ!{'xpO y>VR o])Rې}UTse)(m٭76x|c޵6O3uQmS-l_dmz^id}JuԶ!dZ_%A+YĀ H!̘\El,!ĢPc(TFfA,xSʢgcTeGʓ*ު-HC@r 팙:܄{;AҀw\8=&aw %t~'!L|\A +遼ysrCP46/WbC7 |aX* m4bц.5?Z(qȅdXiHf tP}~Darݾ (^CH%O %!a{jK%S|؊w>)5v,_R;lh 7#f ିEvˑfZ\]ayendstream endobj 359 0 obj << /Filter /FlateDecode /Length 6268 >> stream x\Isȕ/ۀ&=&Zᰛ>&& E-^UT r}-^T_Uuq꟯"awWn lF]\|]E. ecU_qՔ:pys6*+g4(>]: h?‡Zվ.n+ctXW!-\j_6M1ZP_SPthC5 ootpyk'پ6.rӪ)}φwK"TN_\W0YDZLPbk/ Hۘ ^' آ;qnm۞ p +[ .){:9)>u06 L$vYj+N& 8½#T#|;+*ovkDTS߂d̘i7ߝ-8AU/p[u忒F'YCmR igi avsӼfN =5pA>ek!uCjPeվT42p5aVhn+YJXf]ov)6ea9uKKgF4# '>&!kz Kq4$>NVL;!⭕|LH(?\I!NĘ-â;,^5s`OHe`67a̡9.fhd$(fN0Ѧ][/FIaUg LT * jPUh2X9/1*"k =})\(ufHSNKnj4Vp;%hE8 'VT1 x )MFDfA7"PY䝖pۂQ:Pas]PFݨ `^:_X!?6?OFѕqD'b3Ϭjlj5,ؗ(kx_1OEr*t^2))4 aAvX,^@c^u X:&}LE58N- Jjfk8i|+QjxSRy .MC48ydt #*X;qj)\Cvy9t=1#5+"(-.~^आxL3hظcntqv6gbn{xOe|f#p [Ddow,+4kbLfwICD:;4Rjk e/#W+_y]5 4W_b^i?c# ƒI6m'L T>~CUW12 :S_jʅk62q=^L$J0E䟩) <,?a3o'i j&! KAAoBT9saLb*G NR4mN$MgU٥T]=nx4'BB,>%Xn1+=[v)pjn h8gbB6y@`io\T='죡`uF;mypZXlr >;9 ER]s:P.D;{o(\*QP&-?k%'/XzK&œ5Ebd/CЂTY_wWe[xo\;|3^j&{֋pq U딶X|aw^i:|h2J;="٧d&X/*6t熮Q$V*`'}2ZU (Dd@!y/['۵ =ۛهȘ-O4>bwa$VMot~Ka=}+ןf~Z Y2_Ӄm ㍊ ǘBvӯBTxr"!U,L2|QrڿYWN5m2 9֑JR˙)um(e, ˞}dЀ3^rWjܣuz)j|#^/qيq35kȇc+!SSZ I^5G#"n᚜qy+ ĕ_Q7|߅;$-i_1:ˆ#YUzQ[p/~KX>-PD>6E4Y|k :6g|jny;C :~SWU`ADBkCq8fZm:6 }o=̍nA#_MHendstream endobj 360 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 382 >> stream xcd`ab`dd N+64uIf!CO/VY~'Y3zxyyX}'=J{8fFʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cq`R} }'}?}{CgU|./.SqoNz߹qr[؄7yŞ>~nSg>;3wߛgO^-ymԆ-w| 2wn9.p{n&Nnt I+endstream endobj 361 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 713 >> stream x]_HSqw538һ+KMM?mfخ9LTJҮ̲ 1#sj3HB\]zp890 8uZC+(uA,1c}¡Cn|BX(rbJĥD͈\ڃUdJ4Ք닊+X<.*GBM 'HE"-t4qdaM=a2ydi!a*$LMZC3UB>JRA(e"%C2!:jS``P=Ah܉--@N ;#Vd\m]$Ό ۘ>5~ Zy.Kj JZX?0)+0ax XfYDgW d`D[I˭-S ^SWe:s $8?a ^$h` UM뫍57)7M5}f4 b1iZdUf?4my |P1n }KtRL8 B*iA+2Suax!CB},3?/ӛayvcjTz> stream xXKϯJF ݽf06^>+qFEQKR#O琪~\aD_U}Uo˂ޖoڛ uooÿM{*o [Zz[-IK]X.oW{vLQrIuZE)%˝{VCfXra"_;[p֋%>jl\| XynӪP\U vsB%QʭNm[OkrߌoB3e풉ˆu6d()'iZ#Ț406B#( Q]ߞՋ)eO?=n_=Oz^ lP]PÂ6xj6HrnM9>VJC?Vc Ze<(wU{CHovUsJ+ɹf/&|`\DŽs[㮉QB?wH {ȱG r{f0COo^|Ӎ3C6]{3lW.@}mn(&X^Oa1b,#͐Ȣ?mkoW[W_jWR`Ѭ5[awaI}bHAwuZvFl@aS m\o}>Q2c27tM{>l stuфFȿsݑ!I%ad1߆oB KAphp4|ޚ=-?)L 7 Vj}ugoC2)nphK(ƁΧCr7+̱,ƅC'炷 o~p)Dlm ˸tQ> d`LSklV959JAa;0ҀL'ghȆYXpLOP*=-|snK |Xm~r}tpйB\U̩ PbeOsrS!3A;!LEC{02 ؕTLBX?Cku3rF&hvŔbjhGO 3eJ>K;8@߻jyac%Nܓ<2`nJ M8i)PD3hv.N\g l1MF(rEh 4;Z~4} <ę,pc #c#}C0Q`??ұ2a ¼ Sv>CD0D޻hM}U+To> stream x[KsF/$q !{&{Vd'vSeJ=3 A"%o[:ُ~/Nҿk)IK''b>q|b-ԓbz+464.}'o7S=bZߦZSYu=>8vΤW=RSa ưh!,~))/K@ -;fyH{ ;^NgGɞh:Uۛ^u7'/Li[e4i"|e/wԅvg:âR>Mތrdf1]Tz:mKɱ';>:>/hߝ|vȞEbb,t)DLo "/MK5hoV$Y..Q];-y be+ĞۂkXɏ'y~b?uUnI4[59e^\y&&k-UQ'/F#,KpD/^ϿP'kExP 0 X3p;)L^)x%``Cipɡj On$K *U7U=اҭތj4X5om؜廬G_sYN`n[F0BOCwp1NP3e!Y]˷gZj2[9~MK6v-oFM2>Um7}C)@=S?þDFIϖjEZ69yrCZ>c 5bEǻ`scv\.UC&KDH:UK]:B"T v?Vzo)!`9Aq Ypc\Tjj%/$D Q}8?@ H$n(l":̀w`T/,f3R  @ [.3l< j% %$Z_nQ\ޮȻQt[dԡ 6]IOw^n4]ոŕƇ!HࣲҰp91ǘ9c{>eGI6d8^+jXk)r$06K`bQ2cBxyfRQ@v^r3>)fF 1bS=)6<'4'#w"2N'ϊ/)=)viԥlB$D:͢nHL޳`0(JC\hв''mCz< EȢj}/5w`{8XiڬW-.?.d.;<^]ZU) ׀],JLG2 B};b7Tym(+mE”ԎoU u嘓i7 jlٴtcJ[}weeP VZ%X[7Tjnb}55cqJLᐖDn"PGlsN;- -BOϻ>#bQtDR|l.AD՟mo)EBHIR4BqD-w܃lmKܢ7VWyu5+6Ͳ" C_B꫘j[XvffqsUm sb]ign!}k0G=d#k^Ro+Tyʧy.64pEyax[birQgԏ>q *<*yrs\4U-p",1aKՋ[[r8JgR)= PcVzj<$)gqB{?T">b%J!ѻsYG T4|̺SI ,dfX-I~6]{'{k.J|ƾ GuTڡEflG7Ubu۝WƢ{#ٷfKmq0Ե'-cXܾǤu)x~QT<$ b^s  Z"ĵk뇴2U⇖~dYvhY 0ni]FFk$k+!^2=px v ۙ{!ԑ4J% B!OyHo .3n<93JKVח͐zuzQr!%g/jij?yL ;1]8>~U?!u*`~v?l%3Fc($55IMhj5_j 懸]䵆vkGCї_v%A 볊/z(ٲG>(Roho܌t)r>jcƓG32e(Q"K`?$~wi "xJzx*7^u۝-Uy t][}х ^ρzts ,Í݌]t| Վ=Ҹ+FmF|?-(?h ܷ#Bx%@ga)3/]Ҍ/rx&jx%SƱ:ՙS<[r>3eaL*3LGZj^b*^,pz^Q ya]GmHzA@ZD?e"K> $ZpvF+UIPA mCLJ<1a{f (6ꈠ׹4V9ȹ?J4Ol :=Hy52Q[@yj'46ez |XELY}3w`.|`r ii'(R'(IIPi7W Zi h.:pս_̲F$Eb{D}?uᦧ^^:> Xn˧a8+la?a 5E|1 hOAx8E}PYprN~*5Q]`,aY@bSKfڇz:$x |MC@Yq\bl!B<Ļ`6%8q{JD@%@!呜NQJHL:~ck.13WwZs9%EoNVё.>S'=ӡx6T)|\HV۞17@24j?ڏ C:'a P=xe @|dE(|:e.p$΃> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 365 /ID [<32525367c573aa8160bec47ed61fe8fc><1671cde5c06733fb6188f44c7612f2ab>] >> stream xcb&F~0 $8Ja?V -'3lmSPl^͡Igfd bP)KA$k f7H  RH2>DAx0$5@$0y ,Q4Dr~"~Eށ`.L`2S|/='9@dX D]g8DʗH>/@$-Ϙ1D N "%Mv 8|8M3g52? endstream endobj startxref 293095 %%EOF brms/inst/doc/brms_distreg.Rmd0000644000175000017500000002561714010776135016240 0ustar nileshnilesh--- title: "Estimating Distributional Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Distributional Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit distributional regression models with **brms**. We use the term *distributional model* to refer to a model, in which we can specify predictor terms for all parameters of the assumed response distribution. In the vast majority of regression model implementations, only the location parameter (usually the mean) of the response distribution depends on the predictors and corresponding regression parameters. Other parameters (e.g., scale or shape parameters) are estimated as auxiliary parameters assuming them to be constant across observations. This assumption is so common that most researchers applying regression models are often (in my experience) not aware of the possibility of relaxing it. This is understandable insofar as relaxing this assumption drastically increase model complexity and thus makes models hard to fit. Fortunately, **brms** uses **Stan** on the backend, which is an incredibly flexible and powerful tool for estimating Bayesian models so that model complexity is much less of an issue. Suppose we have a normally distributed response variable. Then, in basic linear regression, we specify a predictor term $\eta_{\mu}$ for the mean parameter $\mu$ of the normal distribution. The second parameter of the normal distribution -- the residual standard deviation $\sigma$ -- is assumed to be constant across observations. We estimate $\sigma$ but do not try to *predict* it. In a distributional model, however, we do exactly this by specifying a predictor term $\eta_{\sigma}$ for $\sigma$ in addition to the predictor term $\eta_{\mu}$. Ignoring group-level effects for the moment, the linear predictor of a parameter $\theta$ for observation $n$ has the form $$\eta_{\theta n} = \sum_{i = 1}^{K_{\theta}} b_{\theta i} x_{\theta i n}$$ where $x_{\theta i n}$ denotes the value of the $i$th predictor of parameter $\theta$ for observation $n$ and $b_{\theta i}$ is the $i$th regression coefficient of parameter $\theta$. A distributional normal model with response variable $y$ can then be written as $$y_n \sim \mathcal{N}\left(\eta_{\mu n}, \, \exp(\eta_{\sigma n}) \right)$$ We used the exponential function around $\eta_{\sigma}$ to reflect that $\sigma$ constitutes a standard deviation and thus only takes on positive values, while a linear predictor can be any real number. ## A simple distributional model Unequal variance models are possibly the most simple, but nevertheless very important application of distributional models. Suppose we have two groups of patients: One group receives a treatment (e.g., an antidepressive drug) and another group receives placebo. Since the treatment may not work equally well for all patients, the symptom variance of the treatment group may be larger than the symptom variance of the placebo group after some weeks of treatment. For simplicity, assume that we only investigate the post-treatment values. ```{r} group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ``` The following model estimates the effect of `group` on both the mean and the residual standard deviation of the normal response distribution. ```{r, results='hide'} fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ``` Useful summary statistics and plots can be obtained via ```{r, results='hide'} summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ``` The population-level effect `sigma_grouptreat`, which is the contrast of the two residual standard deviations on the log-scale, reveals that the variances of both groups are indeed different. This impression is confirmed when looking at the `conditional_effects` of `group`. Going one step further, we can compute the residual standard deviations on the original scale using the `hypothesis` method. ```{r} hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ``` We may also directly compare them and plot the posterior distribution of their difference. ```{r} hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ``` Indeed, the residual standard deviation of the treatment group seems to larger than that of the placebo group. Moreover the magnitude of this difference is pretty similar to what we expected due to the values we put into the data simulations. ## Zero-Inflated Models Another important application of the distributional regression framework are so called zero-inflated models. These models are helpful whenever there are more zeros in the response variable than one would naturally expect. For example, if one seeks to predict the number of cigarettes people smoke per day and also includes non-smokers, there will be a huge amount of zeros which, when not modeled appropriately, can seriously distort parameter estimates. Here, we consider an example dealing with the number of fish caught by various groups of people. On the UCLA website (\url{https://stats.idre.ucla.edu/stata/dae/zero-inflated-poisson-regression}), the data are described as follows: "The state wildlife biologists want to model how many fish are being caught by fishermen at a state park. Visitors are asked how long they stayed, how many people were in the group, were there children in the group and how many fish were caught. Some visitors do not fish, but there is no data on whether a person fished or not. Some visitors who did fish did not catch any fish so there are excess zeros in the data because of the people that did not fish." ```{r} zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ``` As predictors we choose the number of people per group, the number of children, as well as whether the group consists of campers. Many groups may not even try catching any fish at all (thus leading to many zero responses) and so we fit a zero-inflated Poisson model to the data. For now, we assume a constant zero-inflation probability across observations. ```{r, results='hide'} fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ``` Again, we summarize the results using the usual methods. ```{r} summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ``` According to the parameter estimates, larger groups catch more fish, campers catch more fish than non-campers, and groups with more children catch less fish. The zero-inflation probability `zi` is pretty large with a mean of 41%. Please note that the probability of catching no fish is actually higher than 41%, but parts of this probability are already modeled by the Poisson distribution itself (hence the name zero-*inflation*). If you want to treat all zeros as originating from a separate process, you can use hurdle models instead (not shown here). Now, we try to additionally predict the zero-inflation probability by the number of children. The underlying reasoning is that we expect groups with more children to not even try catching fish. Most children are just terribly bad at waiting for hours until something happens. From a purely statistical perspective, zero-inflated (and hurdle) distributions are a mixture of two processes and predicting both parts of the model is natural and often very reasonable to make full use of the data. ```{r, results='hide'} fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ``` ```{r} summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ``` To transform the linear predictor of `zi` into a probability, **brms** applies the logit-link: $$logit(zi) = \log\left(\frac{zi}{1-zi}\right) = \eta_{zi}$$ The logit-link takes values within $[0, 1]$ and returns values on the real line. Thus, it allows the transition between probabilities and linear predictors. According to the model, trying to fish with children not only decreases the overall number fish caught (as implied by the Poisson part of the model) but also drastically increases your change of catching no fish at all (as implied by the zero-inflation part) most likely because groups with more children are not even trying. ## Additive Distributional Models In the examples so far, we did not have multilevel data and thus did not fully use the capabilities of the distributional regression framework of **brms**. In the example presented below, we will not only show how to deal with multilevel data in distributional models, but also how to incorporate smooth terms (i.e., splines) into the model. In many applications, we have no or only a very vague idea how the relationship between a predictor and the response looks like. A very flexible approach to tackle this problems is to use splines and let them figure out the form of the relationship. For illustration purposes, we simulate some data with the **mgcv** package, which is also used in **brms** to prepare smooth terms. ```{r} dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ``` The data contains the predictors `x0` to `x3` as well as the grouping factor `fac` indicating the nested structure of the data. We predict the response variable `y` using smooth terms of `x1` and `x2` and a varying intercept of `fac`. In addition, we assume the residual standard deviation `sigma` to vary by a smoothing term of `x0` and a varying intercept of `fac`. ```{r, results='hide'} fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ``` ```{r} summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) ``` This model is likely an overkill for the data at hand, but nicely demonstrates the ease with which one can specify complex models with **brms** and to fit them using **Stan** on the backend. brms/inst/doc/brms_nonlinear.html0000644000175000017500000070520014146742367017011 0ustar nileshnilesh Estimating Non-Linear Models with brms

Estimating Non-Linear Models with brms

Paul Bürkner

2021-11-22

Introduction

This vignette provides an introduction on how to fit non-linear multilevel models with brms. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term \(\eta_n\) of a generalized linear model for observation \(n\) can be written as follows:

\[\eta_n = \sum_{i = 1}^K b_i x_{ni}\]

where \(b_i\) is the regression coefficient of predictor \(i\) and \(x_{ni}\) is the data of predictor \(i\) for observation \(n\). This also compromises interaction terms and various other data transformations. However, the structure of \(\eta_n\) is always linear in the sense that the regression coefficients \(b_i\) are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term

\[\eta_n = b_1 \exp(b_2 x_n)\]

would not be a linear predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call non-linear models. Note that the term ‘non-linear’ does not say anything about the assumed distribution of the response variable. In particular it does not mean ‘not normally distributed’ as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in brms see vignette("brms_families")).

A Simple Non-Linear Model

We begin with a simple example using simulated data.

b <- c(2, 0.75)
x <- rnorm(100)
y <- rnorm(100, mean = b[1] * exp(b[2] * x))
dat1 <- data.frame(x, y)

As stated above, we cannot use a generalized linear model to estimate \(b\) so we go ahead an specify a non-linear model.

prior1 <- prior(normal(1, 2), nlpar = "b1") +
  prior(normal(0, 2), nlpar = "b2")
fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE),
            data = dat1, prior = prior1)

When looking at the above code, the first thing that becomes obvious is that we changed the formula syntax to display the non-linear formula including predictors (i.e., x) and parameters (i.e., b1 and b2) wrapped in a call to bf. This stands in contrast to classical R formulas, where only predictors are given and parameters are implicit. The argument b1 + b2 ~ 1 serves two purposes. First, it provides information, which variables in formula are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict b1 and b2 and thus we just fit intercepts that represent our estimates of \(b_1\) and \(b_2\) in the model equation above. The formula b1 + b2 ~ 1 is a short form of b1 ~ 1, b2 ~ 1 that can be used if multiple non-linear parameters share the same formula. Setting nl = TRUE tells brms that the formula should be treated as non-linear.

In contrast to generalized linear models, priors on population-level parameters (i.e., ‘fixed effects’) are often mandatory to identify a non-linear model. Thus, brms requires the user to explicitly specify these priors. In the present example, we used a normal(1, 2) prior on (the population-level intercept of) b1, while we used a normal(0, 2) prior on (the population-level intercept of) b2. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors.

To obtain summaries of the fitted model, we apply

summary(fit1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: y ~ b1 * exp(b2 * x) 
         b1 ~ 1
         b2 ~ 1
   Data: dat1 (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
b1_Intercept     2.04      0.13     1.78     2.30 1.00     1678     1777
b2_Intercept     0.74      0.04     0.66     0.83 1.00     1650     1934

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     1.15      0.08     1.00     1.32 1.00     2288     2211

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit1)

plot(conditional_effects(fit1), points = TRUE)

The summary method reveals that we were able to recover the true parameter values pretty nicely. According to the plot method, our MCMC chains have converged well and to the same posterior. The conditional_effects method visualizes the model-implied (non-linear) regression line.

We might be also interested in comparing our non-linear model to a classical linear model.

fit2 <- brm(y ~ x, data = dat1)
summary(fit2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: y ~ x 
   Data: dat1 (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     2.76      0.15     2.48     3.05 1.00     3348     2515
x             1.92      0.14     1.65     2.20 1.00     3921     2910

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     1.50      0.11     1.30     1.72 1.00     4319     2719

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the bayesplot package on the backend.

pp_check(fit1)

pp_check(fit2)

We can also easily compare model fit using leave-one-out cross-validation.

loo(fit1, fit2)
Output of model 'fit1':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -156.7  6.4
p_loo         2.7  0.4
looic       313.5 12.8
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -183.6  7.8
p_loo         3.6  1.0
looic       367.3 15.5
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit1   0.0       0.0  
fit2 -26.9       7.8  

Since smaller LOOIC values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model.

A Real-World Non-Linear model

On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows:

\[cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)\] \[\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)\]

The cumulative insurance payments \(cum\) will grow over time, and we model this dependency using the variable \(dev\). Further, \(ult_{AY}\) is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters \(\theta\) and \(\omega\), which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms.

data(loss)
head(loss)
    AY dev      cum premium
1 1991   6  357.848   10000
2 1991  18 1124.788   10000
3 1991  30 1735.330   10000
4 1991  42 2182.708   10000
5 1991  54 2745.596   10000
6 1991  66 3319.994   10000

and translate the proposed model into a non-linear brms model.

fit_loss <- brm(
  bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)),
     ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, 
     nl = TRUE),
  data = loss, family = gaussian(),
  prior = c(
    prior(normal(5000, 1000), nlpar = "ult"),
    prior(normal(1, 2), nlpar = "omega"),
    prior(normal(45, 10), nlpar = "theta")
  ),
  control = list(adapt_delta = 0.9)
)

We estimate a group-level effect of accident year (variable AY) for the ultimate loss ult. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of ult, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods.

summary(fit_loss)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: cum ~ ult * (1 - exp(-(dev/theta)^omega)) 
         ult ~ 1 + (1 | AY)
         omega ~ 1
         theta ~ 1
   Data: loss (Number of observations: 55) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~AY (Number of levels: 10) 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(ult_Intercept)   753.14    228.84   440.06  1351.81 1.00     1144     1832

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
ult_Intercept    5306.00    289.73  4751.68  5907.33 1.00     1136     1603
omega_Intercept     1.33      0.05     1.24     1.43 1.00     2407     2385
theta_Intercept    46.26      2.15    42.52    51.00 1.00     2382     2039

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma   140.01     15.62   113.70   175.42 1.00     2923     2557

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit_loss, N = 3, ask = FALSE)

conditional_effects(fit_loss)

Next, we show marginal effects separately for each year.

conditions <- data.frame(AY = unique(loss$AY))
rownames(conditions) <- unique(loss$AY)
me_loss <- conditional_effects(
  fit_loss, conditions = conditions, 
  re_formula = NULL, method = "predict"
)
plot(me_loss, ncol = 5, points = TRUE)

It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020).

Advanced Item-Response Models

As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of brms. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation.

inv_logit <- function(x) 1 / (1 + exp(-x))
ability <- rnorm(300)
p <- 0.33 + 0.67 * inv_logit(ability)
answer <- ifelse(runif(300, 0, 1) < p, 1, 0)
dat_ir <- data.frame(ability, answer)

The most basic item-response model is equivalent to a simple logistic regression model.

fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli())

However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions.

summary(fit_ir1)
 Family: bernoulli 
  Links: mu = logit 
Formula: answer ~ ability 
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     0.99      0.14     0.73     1.26 1.00     2924     2922
ability       0.73      0.13     0.47     0.99 1.00     2893     2508

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_ir1), points = TRUE)

A more sophisticated approach incorporating the guessing probability looks as follows:

fit_ir2 <- brm(
  bf(answer ~ 0.33 + 0.67 * inv_logit(eta),
     eta ~ ability, nl = TRUE),
  data = dat_ir, family = bernoulli("identity"), 
  prior = prior(normal(0, 5), nlpar = "eta")
)

It is very important to set the link function of the bernoulli family to identity or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (0.33 + 0.67 * inv_logit), but the bernoulli family applies the default logit link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to identity, whenever your non-linear predictor term already contains the desired link function.

summary(fit_ir2)
 Family: bernoulli 
  Links: mu = identity 
Formula: answer ~ 0.33 + 0.67 * inv_logit(eta) 
         eta ~ ability
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
eta_Intercept     0.24      0.20    -0.19     0.61 1.00     2421     2251
eta_ability       1.18      0.27     0.71     1.77 1.00     2282     2094

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(fit_ir2), points = TRUE)

Comparing model fit via leave-one-out cross-validation

loo(fit_ir1, fit_ir2)
Output of model 'fit_ir1':

Computed from 4000 by 300 log-likelihood matrix

         Estimate   SE
elpd_loo   -165.6  8.3
p_loo         2.0  0.2
looic       331.2 16.5
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit_ir2':

Computed from 4000 by 300 log-likelihood matrix

         Estimate   SE
elpd_loo   -164.5  8.5
p_loo         2.4  0.3
looic       329.1 17.0
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Model comparisons:
        elpd_diff se_diff
fit_ir2  0.0       0.0   
fit_ir1 -1.1       1.5   

shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don’t know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit.

fit_ir3 <- brm(
  bf(answer ~ guess + (1 - guess) * inv_logit(eta), 
    eta ~ 0 + ability, guess ~ 1, nl = TRUE),
  data = dat_ir, family = bernoulli("identity"), 
  prior = c(
    prior(normal(0, 5), nlpar = "eta"),
    prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1)
  )
)

Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval \([0, 1]\). We did not estimate an intercept for eta, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models).

summary(fit_ir3)
 Family: bernoulli 
  Links: mu = identity 
Formula: answer ~ guess + (1 - guess) * inv_logit(eta) 
         eta ~ 0 + ability
         guess ~ 1
   Data: dat_ir (Number of observations: 300) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
eta_ability         1.32      0.27     0.83     1.88 1.00     3286     2860
guess_Intercept     0.41      0.05     0.31     0.50 1.00     3108     2765

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit_ir3)

plot(conditional_effects(fit_ir3), points = TRUE)

The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of brms and I hope that this vignette serves as a good starting point.

References

Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. CAS Research Papers.

brms/inst/doc/brms_customfamilies.Rmd0000644000175000017500000003276514111751670017624 0ustar nileshnilesh--- title: "Define Custom Response Distributions with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Define Custom Response Distributions with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction The **brms** package comes with a lot of built-in response distributions -- usually called *families* in R -- to specify among others linear, count data, survival, response times, or ordinal models (see `help(brmsfamily)` for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such *custom families* in **brms**. By doing that, users can benefit from the modeling flexibility and post-processing options of **brms** even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this [GitHub repository](https://github.com/paul-buerkner/custom-brms-families). ## A Case Study As a case study, we will use the `cbpp` data of the **lme4** package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: `period` (the time period), `herd` (a factor identifying the cattle herd), `incidence` (number of new disease cases for a given herd and time period), as well as `size` (the herd size at the beginning of a given time period). ```{r cbpp} data("cbpp", package = "lme4") head(cbpp) ``` In a first step, we will be predicting `incidence` using a simple binomial model, which will serve as our baseline model. For observed number of events $y$ (`incidence` in our case) and total number of trials $T$ (`size`), the probability mass function of the binomial distribution is defined as $$ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} $$ where $p$ is the event probability. In the classical binomial model, we will directly predict $p$ on the logit-scale, which means that for each observation $i$ we compute the success probability $p_i$ as $$ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ where $\eta_i$ is the linear predictor term of observation $i$ (see `vignette("brms_overview")` for more details on linear predictors in **brms**). Predicting `incidence` by `period` and a varying intercept of `herd` is straight forward in **brms**: ```{r fit1, results='hide'} fit1 <- brm(incidence | trials(size) ~ period + (1|herd), data = cbpp, family = binomial()) ``` In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of `period`. ```{r fit1_summary} summary(fit1) ``` A drawback of the binomial model is that -- after taking into account the linear predictor -- its variance is fixed to $\text{Var}(y_i) = T_i p_i (1 - p_i)$. All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called *overdispersion* and the solution described below will serve as an illustrative example of how to define custom families in **brms**. ## The Beta-Binomial Distribution The *beta-binomial* model is a generalization of the *binomial* model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability $p_i$ directly, but assume it to be beta distributed with hyperparameters $\alpha > 0$ and $\beta > 0$: $$ p_i \sim \text{Beta}(\alpha_i, \beta_i) $$ The $\alpha$ and $\beta$ parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters $\mu \in [0, 1]$ and $\phi > 0$, which we will call $\text{Beta2}$: $$ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) $$ The parameters $\mu$ and $\phi$ specify the mean and precision parameter, respectively. By defining $$ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} $$ we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter $\phi$. ## Fitting Custom Family Models The beta-binomial distribution is not natively supported in **brms** and so we will have to define it ourselves using the `custom_family` function. This function requires the family's name, the names of its parameters (`mu` and `phi` in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family: ```{r beta_binomial2} beta_binomial2 <- custom_family( "beta_binomial2", dpars = c("mu", "phi"), links = c("logit", "log"), lb = c(NA, 0), type = "int", vars = "vint1[n]" ) ``` The name `vint1` for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant **Stan** functions if the distribution is not defined in **Stan** itself. For the `beta_binomial2` distribution, this is straight forward since the ordinal `beta_binomial` distribution is already implemented. ```{r stan_funs} stan_funs <- " real beta_binomial2_lpmf(int y, real mu, real phi, int T) { return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi); } int beta_binomial2_rng(real mu, real phi, int T) { return beta_binomial_rng(T, mu * phi, (1 - mu) * phi); } " ``` For the model fitting, we will only need `beta_binomial2_lpmf`, but `beta_binomial2_rng` will come in handy when it comes to post-processing. We define: ```{r stanvars} stanvars <- stanvar(scode = stan_funs, block = "functions") ``` To provide information about the number of trials (an integer variable), we are going to use the addition argument `vint()`, which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use `vreal()`. Actually, for this particular example, we could more elegantly apply the addition argument `trials()` instead of `vint()`as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method. We now have all components together to fit our custom beta-binomial model: ```{r fit2, results='hide'} fit2 <- brm( incidence | vint(size) ~ period + (1|herd), data = cbpp, family = beta_binomial2, stanvars = stanvars ) ``` The summary output reveals that the uncertainty in the coefficients of `period` is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter `phi` in the model. Apart from that, the results looks pretty similar. ```{r summary_fit2} summary(fit2) ``` ## Post-Processing Custom Family Models Some post-processing methods such as `summary` or `plot` work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are `posterior_epred`, `posterior_predict` and `log_lik` computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method `loo`, which in turn requires `log_lik` to be working. The `log_lik` function of a family should be named `log_lik_` and have the two arguments `i` (indicating observations) and `prep`. You don't have to worry too much about how `prep` is created (if you are interested, check out the `prepare_predictions` function). Instead, all you need to know is that parameters are stored in slot `dpars` and data are stored in slot `data`. Generally, parameters take on the form of a $S \times N$ matrix (with $S =$ number of posterior draws and $N =$ number of observations) if they are predicted (as is `mu` in our example) and a vector of size $N$ if the are not predicted (as is `phi`). We could define the complete log-likelihood function in R directly, or we can expose the self-defined **Stan** functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon **brms**. For the purpose of the present vignette, we will go with the latter approach. ```{r} expose_functions(fit2, vectorize = TRUE) ``` and define the required `log_lik` functions with a few lines of code. ```{r log_lik} log_lik_beta_binomial2 <- function(i, prep) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] y <- prep$data$Y[i] beta_binomial2_lpmf(y, mu, phi, trials) } ``` The `get_dpar` function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit. With that being done, all of the post-processing methods requiring `log_lik` will work as well. For instance, model comparison can simply be performed via ```{r loo} loo(fit1, fit2) ``` Since larger `ELPD` values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial. Next, we will define the function necessary for the `posterior_predict` method: ```{r posterior_predict} posterior_predict_beta_binomial2 <- function(i, prep, ...) { mu <- brms::get_dpar(prep, "mu", i = i) phi <- brms::get_dpar(prep, "phi", i = i) trials <- prep$data$vint1[i] beta_binomial2_rng(mu, phi, trials) } ``` The `posterior_predict` function looks pretty similar to the corresponding `log_lik` function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed **Stan** function for convenience. Make sure to add a `...` argument to your `posterior_predict` function even if you are not using it, since some families require additional arguments. With `posterior_predict` to be working, we can engage for instance in posterior-predictive checking: ```{r pp_check} pp_check(fit2) ``` When defining the `posterior_epred` function, you have to keep in mind that it has only a `prep` argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is $\text{E}(y) = \mu T$ definition of the corresponding `posterior_epred` function is not too complicated, but we need to get the dimension of parameters and data in line. ```{r posterior_epred} posterior_epred_beta_binomial2 <- function(prep) { mu <- brms::get_dpar(prep, "mu") trials <- prep$data$vint1 trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE) mu * trials } ``` A post-processing method relying directly on `posterior_epred` is `conditional_effects`, which allows to visualize effects of predictors. ```{r conditional_effects} conditional_effects(fit2, conditions = data.frame(size = 1)) ``` For ease of interpretation we have set `size` to 1 so that the y-axis of the above plot indicates probabilities. ## Turning a Custom Family into a Native Family Family functions built natively into **brms** are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on [GitHub](https://github.com/paul-buerkner/brms/issues) so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (`foo` is a placeholder for the family name): * In `family-lists.R`, add function `.family_foo` which should contain basic information about your family (you will find lots of examples for other families there). * In `families.R`, add family function `foo` which should be a simple wrapper around `.brmsfamily`. * In `stan-likelihood.R`, add function `stan_log_lik_foo` which provides the likelihood of the family in Stan language. * If necessary, add self-defined Stan functions in separate files under `inst/chunks`. * Add functions `posterior_predict_foo`, `posterior_epred_foo` and `log_lik_foo` to `posterior_predict.R`, `posterior_epred.R` and `log_lik.R`, respectively. * If necessary, add distribution functions to `distributions.R`. brms/inst/doc/brms_overview.pdf0000644000175000017500000274715514146747055016517 0ustar nileshnilesh%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4531 /Filter /FlateDecode /N 86 /First 727 >> stream x\YsF~oT=q8m%פB=~AA) %M-j_>{7(3ΜɄ:0e3 L3)Ϥ8%28Z)pʔV":R+ܚӽgȌh]fu>ƀ(A+ʬFeRe,ZL(,m~Ā%]|33Oϼ,+y(yc]T P:L-a>:;r}96 .8\U86 5qQ{iPa0U8e 7 @P@YtL*3,B!~@f@YIՀdPV~ Ak,(kcix8zaAփ2[/I&x1PHaFht )@kY $N, nd`p]΁ qeq܁?djaٔsÌhFC;:z^e1MOl:~8NN8ǸذoȹVBh_M >'tq=Kq}b[< Tܖt>}~۹՝yܫԶmuqmW{x{0~fbP?zt67>o2+\r84=lQԗa*Lm]$47|kc׌ ѤsE~qyUŤ!^8HFFF@cRcFEENRk#i[e#=2ylSIXgv:=#bؚh!Zc0LkzlkjejUj"!O=R*q@%BD%z.kѹD/qFD/7@P>O|=QS+Z.UMGUBՓK tZШg&`/Kl+|R͛~ëܡchD8)#ZP'qIuRR`be:K$j2Y4bixJ S4Ti*$MM#HE'݉Ttb"HD*&R1TLb^<;OOHmIRHޓ a!eyng=;q9yRkVQ9؏tXON3<A>*04`@'X6\C1&@!@qYA9 O6#tqh"P6t0kcr`al!TCB k pLi ,CdLOWRWM?3(##.c#Ğ+;dG {ޱdlb'N'6bc6aSvfl`_/aQy:'U ,J]8|A\٧R$\tr\}K^~FYNNaVǃz> 5-E{=a=xۇR ГR*HRmYQP~iaM06? _UY6Aͫ _ l٬Xy]ͦ]KCjGYCEVή݆Vސ{~S:OtdhUI=A [ZtZҸd( #}dh(8'CηDƗ_*z (!sCތ;+ B"C!\,TD(HQpXv^%Fjm~)1pO+6W w;=f.yўSgg%/p{ro0Ae9X4+u5j6/'`tqbVdXX?2c y f,g4pĝsFIf<PoC's1m(\kx0q_`wM-=}&3-ez_xIñb`S7{f7YGdsg_#0]~ډ[mvw˞Q:v8hS y>- vũq({@qE\lm>[}'/>CnC֥PQgU`\CNz%\"2YaT -rQK4J'9BsTO14*&{F, t_5aۡ?i?b=I!Kt^;E QN@H[gnΫp|eG^$W;ǎS Qppd1YMǶ&n2YΆ,UraGĝTKeP3*/$\# jKvT o¥zYO>BAW1׵&eהRumOuҘbkR[7SmM̲zdoLg]ێ?k++\R7)<@G_H'{f;3ЍVn|< _жo(ɶ*n%7 )W-MNIrE)J2TѷSu^Ҷ"OU$u\2:(COC0 6 qxmqz +zbelr~[uMp[.F`xWPPB%zp8p^,AE-( AqqK2ya{ɳvR-%V̋jظ$ "UNF$F\1D҄KR\;]nER!s-@*LtϷ`7H9ew I r-$TH}# aR)w IKBijN! #_d!ɍDes V@i8G UPFp'2f bs)? Hhf;b>e9Z0FT~tB}.Wٲɜu^*W\}LM}7ܳm|/՗Z Wӌ~>uԣaMqbeÎq9cdzrj$}ڎS6gdT}ٴ]>\{NEj_97- .8nS0LgUkZ]YOҚ  ڽq{曳M0A9ң9yiu6el0E7F IDE[[ w +-ZM/VyC+iOg pV<#٭5'4BPnJ+^Ӄ]m{"JwQ^5[Fitb)uov1dx&wݕQe8++(hTAvyZ-:(|NK$>2*øXGMLJϦfszWVՓ줯 iL:'{ omm!>#l t+!GzUxC^XcҳCM%I\?Ҷ*ia>;Ҷ ܻnƃjDU Tz'*=bKWbK̫[g49U ZRShw]c\NB x\o[x0++N'U[)!n:)o-uMoq.iCH(}Y9gd㸞d8~2 6{ !y݃BIddA~䅎sjr\N(?kƣ݂XZJ`hh^NN[2(+2'I5&f 5^B?y;$qdӞhR}2&M Ad'-@j-Ѫendstream endobj 88 0 obj << /Subtype /XML /Type /Metadata /Length 1535 >> stream GPL Ghostscript 9.53.3 Bayesian inference, multilevel model, ordinal data, MCMC, Stan, R 2021-11-22T18:06:51+01:00 2021-11-22T18:06:51+01:00 LaTeX with hyperref brms: An R Package for Bayesian Multilevel Models using StanPaul-Christian Bürkner endstream endobj 89 0 obj << /Type /ObjStm /Length 3926 /Filter /FlateDecode /N 86 /First 800 >> stream x\Yo7~_ n"`V|Ǝ8c1ZRoF3>}b螑[Z`1{XU,VIE)YT*Ţf10J˔EǔETh£.J{&`ژ ɴǝi"%fF.,3Q#5^"0$=zR0 :R1p'fx,JisZ9悢Vy%U`jw(|t!Y+ -(MP, ݖʱ ^<  *( 5$)C:I :pQjb08[-ޟ TWn)#uii ꐃ$Pd hg uhQ]GwN؀:5Qbw $1DK'0jW۴`/f->Xh 'Y꽴G%*L$ۥ#/ GtE4:\{l“lwH4 D'MH $^߾x6e'u iK]\\\\\\\Tex4+GnlM(O#Gh 4 F!RW M瓓r>%K^M' gtUN ^g:ef0&3dz&3L@KIdW19;gsl͝ss.wef̬ۅ.v1]bn3qČ#fz1Ӌ5=%D.e.U.u.M.m.].}.C.3=LOfz2ӓd'[ 79'Or0xx'ӒW`~rz2f ˢB,H_FVZ tvA0Y:qV]'`aԇY/OȘ`4ϸd}0S =8ET/?1l j jD*W\ǒqDMOhp Es %w~!O C> ?#~K3~vV}09.|/xT+~UN))QiZ}3>%}9V夜VS'wӸ!2{)ᑞ­Coհ-reu? D N1@g_g4C ŕݵ?~.jR;(C _ )v6KAƵv3dI2 %rG 2TA22*{WԺ E{1.5 #)n:sjEg IA??C 06./I3锏 V1kqK]?Euz-G^6h=tqt4??c*&eט b];IpF.]c"2-`SO j)go~|$p5&s!#ǸƘXgOےoErBr'dX^·j_V9&BgWJ63y"ܒehs;G_-f͂mA†w%Z}%tm5LNɪ =oSf_Qcֵ)+˳a17voCŰ~}:[) m#r}_ -VR(i&\=%?R*D\)4,Xg-IVn6PiGe-Vu{T1I8D!ˆu^zYx P6[:ދҲpQXb\P9w7%9N@u#mdpʐ7N/gdA{{\*"N@:".$Ie|t1t Y T\u6-wi툮9_p|~3[Hvi`o @ۭ9M;g w-ߡ}wo7G/7ōYKVb $`ܵ٢^=+nOi@S^bh"o $@+,h{( ?D:L4?BT#V$B0hJݔ :J6Ow#kSW6gVHΞGS'9_C~__o!i b~l֐Ɛ-++Pg{!]!Qn[.3zŠ묔oU[C8CɥB!StQ:y,:YTд1 T|(dPqŋXV@BǕpEC]po>Z‰ ND-x&EN1PŤY!` FT`:.&MEa`~'J#vѻNn&)D HN$RWrS$1cfSr7(c iCm'U+)!ӀR";!#j ~ED*Zw oTwY Zm딎ie Q-0R3(j b{]ݎ-4G &ۀ2t Yr-f"su N Ex43VbjY] Wm0wvw9ۯXhr\6"ov38d'^̪?9C1 fx8|W ts^/#>)`rr磫a5(Op<ӳQxN; 헦=m`($]uwB'{^ݚ_bi$)4Sܷli~TdA~f0-YyX4~Oc*\ZTj?c[CH[i?.nVϽ%t`0IcPVIcN1#g4<ˤhA,'Yѿ%pk:z~0'rnK`t* Z/U~endstream endobj 176 0 obj << /Type /ObjStm /Length 3510 /Filter /FlateDecode /N 86 /First 802 >> stream x[rF}cR)R$be;S~$X˜"e2_?6 eJ͚RQh t Ld҇Lc̴ȬZ Gh5]1%4l.SNS`-!S1Rȴ ic2mECgjLI\5`,<`Q}fh̘@/qȌ'PBf&*ЙUZa21E%,20y%|fDlԈ *)2<2sF)w,ΜWx4 6BS.JNQt+dX2fSOD$2Wx Ǔ!:Y,mY4*E@lK!9IAܣJ\pZ OhI)"DAb0ĀcOcHz0mEUa{-@%ErIɌ4ڀb 0"yԞe1ZA1 PYa4$WYa,qb c@-ưr@gi%a ]V;_~t:[.E6J$/8G/Hn0_̋l$*eBd gq<'Xe-?:˯wݣ>=+om#ы=?_g86ʼn$?G ʟ_1/NbYLNs4Xx<'d6MbOyW s<1~Ͽ\S^|'b/Oig|x[9Avv/'W - _LƋsyW|8s|>1$yD=Zl]R@tBz[~\Lr }.1ȿOIq?=~X,Ћ$Űph_V|EGSgO/^;bCZϡQ!%t4f -XP ;L97 +mUoqXw޼zh<]Văqq광5ΧӆN Z^vHܕ..cC.D 5gsHRI$s$ ]_o1c:^M&uPW C^p 82 5d=P JD$20x`X =)3rvʈll@iPCs4>E4h֨R,- *&@(xM F-R ~ u֠}1:)&c kcn aҲ~yjIa ._PJWt,Z)m<S@yb0Vߏy12t*Âmg@AE5(f]=L: dǘv7-ؒ0kFvY;$ebn@-Ao{"6 ˜L>U?]F 2Ȭ (aMؠRBa-nl20a6%~ZcxŸ)T@5&]=L,xB+,`.0Ur@.JH" a &H> H.lT/U,eu*PZ9G'_mkVP :j郞'LVj}WddRl-c%d{_c~܊SZXuZwU״#t7t]}ےtS$z#0'b{ǫC EKno cw p/+o]Ky/J/1ǚF-tZJW N]elVX4xK-l"zo6^A3]+Eͯ_>}6z62iJ˽\q2Q+SRv~Ư(q1 M'aC&.۔T{)z} vt1__&sbҝUg1VgV;Y͑*@[:xAUZ -CGM?|_y7DۑmfѲ,ɮܴCV๷/2Vt:ZCc9W1U㨩G솮6L*BN,a%/]їd ;_RAx[C61[{*i#؋ȡja^u7~ w n .Fs\ Z\Z1yAPF%u}! TU}:n"Xp_偽iž[fTƐ`3CN Fk`rJ-鳎շtP% ^ ї,p;Ums :|K:Ji-am;Q5LчJާ6n6[Uu`{*Ŋ#Cc+DE45(<:P;Z!7nQ6)=H7ET҇.i$&s)h*Pt/6){i#Y*SVo== IcEPjHŴ,|zUeG;Q*[5_PP}J$Xd.#aR!Qo;AL;SdAMo*Մ_|3Jdi$a5O}֧UH5`&{5^V٭,@"b vJlRTqVۈXg зu$͌U+WZpݻ|OǿSB]As)72r+v~ hendstream endobj 263 0 obj << /Type /ObjStm /Length 3098 /Filter /FlateDecode /N 86 /First 792 >> stream x[ms_dL&3nl*9mL>f#*IN}x$R(hFxnwolpB 0ɠ _GtDD6 c4D-u #| F&".ND&E~ $2ϊp82 L b69Ar  (0%'%;Y!k<lKdѱ2 o@^xWa(|z>k~=:#!X~D"D!^DxpEhtg|1a #b9!<&h"#0=yǃH!`/9P0#Z ![%9(9%@/.zK=b6ӂ~Xgt@!: tXAI 1;s!'aɁ#=I˦[;󛷫tKH枈Y htfPaPxq#'=ळ~؝jذwW~t٪ :.t(VV=;@qFpI'Lw/9Je! 9p)f<^2zL5ߛ$h:A E/x գ rHv c Y pn(QJbz D*?$m;c+ᘹ 6Rm9laOƶkp#?ke0@`fNI `q\Fpہ`$Wi9`I"{؅e0#zX:\Z\$Nأ\~` Fc2 #r`,"E_Иu0( !s5T.\$fÕ2|mN$JI썒{*3{+qdՔ h͹n6-3-|ys5Y@ }ɳ[!LճxXLWE)Q/' )6@߂ksny로?9`#lrfCws3_5KjjiRGyO=Ng7'͡ 7ӫxJz򱹃I/V .Bb߹8^~a,?i'ؐx4;-^,<ͮXA?>!%xJe3P ,M(kŘF]f1S xdjBHwՃ{Li @HP?ԏrp&Nj9dx Wd 0dp\s\,Q4jn6SC44 d3WzH;فjGEA:.>**ık*;jgjâTt?LT_Q."8%([޵'_w6*w=.?߂0߁Yf=n鯍D4F>,|Ӿ۵}އkNPwkoe݁~8 +qVrϒB2QW(,!\pJ5,:t_٭8`m%"dmF[;y]RtH;?tƷT]e11 w J إ`CgL)oh6/C#⼟ysG5tYerìq'nA8vo:#ª7e+P BpKbfKd] ю\?FvbYV|L°%OAj $>Oa)[O|H%G;l*F rsԃBPN|PzV^x\$6)R*@yg/e"B=3[4#6y}{"^yۣ°berS*"=Ƞ`mBxsyi(>l\g 52z1yT0j०a̳FC9\{T!":OCuön E^W>5m;Vdݴgnn@@\\0"/琑侱pl3j|!{w+ֆݝF-E\XNRADQK>}{pPN&߃|(9^0$CSx!&tT}"ْ? &IJT;Pp4| ~EՓgoNOx3,iA ORlAZFm^yxv :WJO,Mo{ݝv{FǶY+'Z~(fP߈mS.O;Oփjrv_Mn/L=Wߪ;ZzNbrKj.W] u1;ըBQ]n߫jGq:YBfSY^KnO.R-_ZN?Qn ay1_4MYlK~nۧ(Kj_Wݻ0oD FO_K{v/ˡ>!"rQE? wŹ\-AىXԊ-ݽƛ='3&ח߷xMR;Vt҆8#:^g>Lƕʙ;Ж> stream x[[~cIp`6Jk YK$'w8hf:UCp|ñU6G\ xbW)g\"GEIs5+ FQ2H1e`S# ^Y wn$ #f+r VV9S.^yl e#I dV w[T1,9%B׳ : xm&A*:GNE/fb4x0y(JH[I%:,Ԑ,xv)9(R)Yi8 8 J24qD#-UdTNc 7HG-Krhrϣ=1h 1L Z"0FBbN1yq9(8( &lǘ V10 `,K+?|}ޫ'f.w{Wjdkhd-XUj~%ߋyCʯ%_Tl[oꫛ͍1naL5@6⊿V,][~/C 0%0 a{s$0 l5!tM:f7@S(%5HZB =\ 5,Uܼǵw*U`}~4Ǣ}&cOJQe PG3NE7Mms4ےf8lIݔKs+m[硽HI 88T\3:hæjv)3ƊtC 0V~ݡ XhE SCա8c2Rx`uki:^ zpkZ0Z]֜[~2 H"\nHmn]LYXN裂 A&K;o*ܠd@5 )$p]W U7w><~LZҡi1ҪmIea\o;CKsv!9.8͈7rGəzIH%1iS. JðFFg?Ì瘠'؞ U؛` Ey+|Ř؉8KՍ*WFT_..9$qɌ0/:h*/r5R㮛R0WTh61fFW%E4 A9:}d\r5.Һ Ey^o;=&ra^Ic6/oxOz5N4v[j͇rdy]~Ff?~ԯ_PoxqY Sn냏N$te/?|#vf^_o5i~ޭ7JfVaK]H4f[j.]uzp LP%bV-|5]\y[bT!՗`  Zw˻n?]]?]ߘn* r$r|zXoEdAՋ(Eˇp.KA6:’]Z*d:J Y:vĿX~Lp IJg)*όnRCK2br %:7Dt|v60(afRam1鲘Ĩa8;Fkd9hkE1Yɢ; F44mRo9P] p2)WHIj.ĒȟB&,}웱}D1E09uFT1u&8`E"dǺˤT'bnau2څcC S.;1L̒(ˡ9@1(J\8EVQR6(QiP a,&BTб%1VN6-KPӈnK_ƒi1#R.I"f'\UjIINWk, B6 ;zjY,_adOQRЁ,gJj6<}KS+tР99TG%ADFWD103;'a*Nt~s+gT|RbM-b3ieF$n$yzjɐ8!W&^>9T{HЇEAbr|DtIL10I3``7]:=?)ć1'c)Zt 3Gl=(8 RKGef'c<'@2*(d8G&iW@&måAUQB)Q"+Tlf4*TTL1~p5tvOT:[-rqS֏*.W%(tk#ƫSN5{jest񎃰2\!;1qYOw|(Èu㓸R( w.#b{?:~1}?WddI=C@FܩN ' Hv\\;.eE'; =dPe/gurApٻKI`DP@ʁe@aG$ z ԕqG9>/Wu]Wus닷ݟf7l,̶Қo~]׭6'n] C,>w7tUʷ%0CJ؋d4NCح`k`3V!)!Ir3>vVv<^uH?<endstream endobj 433 0 obj << /Filter /FlateDecode /Length 5401 >> stream x\MsGs*)a=,?^bvK"7r!Y9gfwzҶr= +zQn/~*ޯ^^^O:fu"N W^U_պjkeS#|.. P z:ޘZxqXklqi}'eTM&zi-VĠiz&ޮ@ߋ4ZZ[/ڵH͊D6^<If:V;oUUgkWie+k-J9WDd<Inm]9}Chi>/d*eCjcT{i&Jإ=6 ̭c(p3~y ]5a4ۋtm+V xj)W S+WJZJ+zzLzii]]m@w*&2A$yo¤*t#LAjD[,`@wYxNj]oj\eSG7>kJ&]ۡipMص#|nHP@nt͐Mj9MGjC-xߡMP6h~fVvǡ:]xL-Xaƃup]ʭi`气Fp6ࣣH[`<@Oؑt<F_ ]t*V]V6J(=;bxL@=~ F|9ϵ װvOtM?ޝ'f,ȶnH@2{N;1D+C]*%C\]xq  R^Xqx׽[[ʍ!C /s@jhsb^qahghoG :q4xL<};|FuPT&Zܯed2ȥم-҈C؜29؀p}@LJ^DhG9z}0Dl#^:X2ڐ#2g~iCr|G oDr3y0a9n 6&kq$Nx,tA8 q>aPpuA)ٗ\Hcl;1ZD4;`,|`/8KdH\4\Z=')l۱M2%cJ5nn|Fž z$_n GkQZT!h z/eKd}J3l+n)n|îKil+q׀֒q {BkhU|D, o˜ Lh*akL(` W?0>m\\'ƻ5 q W y3SۥN|Im|SD9{Nx=4O~4 ,d,c:0Ustǟ O{Ly))ƵlО8R-vzߝ6wOX-J0?_h7~,lqa;r 1J2eq ƀ˯b.R] Oy2**Ҫ*u%U@hN/1MQ`%qeRK_U,XQ5u#c;Xt@ i!6mדFFT~@5Fc6UFX[:26Q|1=W*8V}9Ay3ȯ '<߫o^}b.U!:U,2O9@&FUyٜ!Eޤ e!"XȘ-27)W 6C])QH5c=r!L)O9vGTp}U{QOR/l 6L{3ozfyS^99 MoR\D7`T䄇 YZ / 4deMviKMGgYG(t}H0cJBC#mҏWI7n˞W()?e `~*+%܃1K{,?~aMjm>e^<}j;q>I Ū>.mԭ +rI* K GiY8% `] @V+pysrQAӸ!8{w,䒭Ri )ģD6'ޅAj>fd攊D*jcXnH֩zњƢ? 1KO'\2t1)#IcD㱔3Ax燤)]A4SU7]1ii1MC]\zקiI6,&*?~}Yetji"ړI0+*Ty1NiJ4a l{NiLo`n`ȼ8w #z[!)N/V3> C&LtY+-Kl G 4vs6ow$o~` nJAbS6ESRDTUNHdzj׎QwQeUӁt?OLe!4V~z>t+LaHM#ߑknY୶ ݳI ׾i؉}&%{D}1u P(ȚZ? j8VƁ6%u<:93M5VI%M WY8)tK}hGIܺuh{<)",˄(Oltȣ.ՖP;`XK,PRi预2j3)ֲ89ՋĂ9QR$d¶3dgR}RMμ=ACW^:{M;VPԢTc> }-/D_EqD#aF˹!O$ҿ3\10.q@)Cv9y^BzEz7%0zX< ?-IlvQtH`PM>ofi<01clIso"tf&tm3lߝYZ5WpQ>t(I4α2@sqGg7φӺ uAN~BGvt!j#[ eHcf(@qȬ.w9g&X:OR?C5͘ j+viCa'R|o)Qq9ʽ=ĩQlסv%^Cfj#sFyע߈+JOEn@vQciT(ⱻ퇩BLKzT_z*" `gK`Ԑ}f`f U6m~Mae.昸{L`<*Er!2uRu)}Д[*~^t+hhQn^-"34S0>/oFl:r8b7U§ZBH$6S$E'޲ehstlA"SVT4׉Bܹ rg/کS1+cXp*}DЈ։g{7wx}1Q&%ckqXQ}ޖ]x>5\;v=W˳` X -r;#8i{c#nt˟axy{Iu9a }Pf?p>ۣe r YW, 8"(=ю<(q&9v|hMgG(z]Εk /_bӢ&T&43Vf%-cx,Eqj{~A5Noeю0NN[)%InÓ.$߮J[W҆R*R\o:?fi/_MR^>s5 E;zFYli?%\lt%uSkRO[jv%e`cٴ3%Q,^5+qZiet--Q$?3f7LNX3chK?|7BTӜ;OsSJߵ/ U*N?F~֠S8ƟPsVGVVɩM>_9uh=$ZK#%N.98f<( 6!Fj`n-eC&*~9$ڒ/X9?_>'9FnKE`=S8QxAo݈w5->ћaQr8墽3?R }tǮu9fS~Zҿ Z;cצBL^$O}VEf#W>S %۽?Kc .Jlo:M)]YgOw!̀rsJ_v> stream x\YFr~enmA{*+ڵwdrx}@C6[hRwfV,fc"Iά<< ?,Z,^m_5W?".>x_k ԡ bqq*v /θ:(ؾ{h mm0bmul('P\vQ.RB rFUKi_=-߄+ o}ص˕R2To6nښ~\JX|Ж00՛rmU ?x~6-ʻ66"-8WB*X)S{k#IV-FHn )Xxѣ4"* ZVm px}vU&}֮zHwJ8Y6[-o;䮦Z{~yr_}W/_^E6CZ7/0h@~{z+bUaAYAW_6=q]nH p+% -d%TmI<<$cJC:OiÎ?Ob5㇣(A7@᫖1`/\J6k@D0j|vzhҨ:hp$Mj籂v2F}[MtH6ȱ†\ )\e&HoCp公./GOoaB’_Fzaz,[\9Mb`P[%!V xeP%wzt2_霎 @bCsW%~7\$2,FL{O'5 Q` ?MFZtHʾdeP uEi$Ft&^ {K{s I2@ݱ f7Ww݈̫K9v#)#$oQSfvYQP(2Mm!|hvrL/@74f┴&)/b| 1 `L4~<,' אKR y0{ Kjoxn/gKom;[<5>ru+.Ss)J48uԿ.=|u-gzNaPn|P$-"#Gm K9t\]O.jK"jS(BO1:%;96lf8=XM.-%X jQi/z֊;DuzXX;䠫߂ "[![2ԱgR)T{H*Z+PҜabR2x4 1rxzC0ȏz9©b(I)Y7D~jFTtT\$ce|LN.u)'U > t9}P "CR%`@tܧ=@>J{#!FqA'D<nr]rjqI;!@_POgnI\(Ac-M +QUmSg8-Fu&\eX?#PiT3Wa@.}]uYϡ2T@XW~7>x {c 8If<*#\*(ÛZD8b9?zb^(QwDGq9=:M./IܕF;JUI)#(xxbȘ{VȰ15#_3*!TÜ:yjg0$ ۝"uEP'EM\8:GE.N j*ŴTQ7%+`i,jN/CLF+;M -@_$DK[f>wo W>$ģ6R4nHg@EkfT-W!(0zV *HxnC5A o8XjSZ3ԯ_KO #C-ta}'1vV_F&t714R9@G{*nSO9wyꔯc|  IP Z~9Mh>AC Bq*-DajPTx'+ߴi`ީTHOH ]0LCrҐٶ+4lCcÏ81X4=+P0ևjZ821k1@rIAE`0\cIc L%i%KO2VSqM*awڎ(#*5rb`T]K  qD;gt,-}Ъg` 6C #9 HS(oLM$)=`XVO"]#E`U+<%1#ZbUP;l)9l~ bQt9@e]C:Jy5 D01Bu$rN 8@N4%hu0WBE%V&;UutT;bI~"\N2S^,ZU1R#9姌&5alQ:J0334sB93@I[:Y$\L98(fJ4rj3UE%i_E(\o~BS &>/EںI#Swm[fY((!jQ85ٻMk80e1q1uFTEi$xhMOْ6gT Aϑ.`&'s  99G;vIIw+iC>4:WEGєO hl~XlDJX;{4)Tj~sM&AxoO,I }'nHC6bȡgH Ca`7eJ$gHO{`/)O/(@e{NM!ϱ>8MUKu>e;cJrDiOΥGqSɤ;&~|@H2[$,fa;H1#MŊO=xp}]FVR"ԈcT-FBXufH\&ƒͣXąNsDF?6f"omwϼk 򌠆,wy˃IS:UdeڟS#tvCfa%FL˹Z.;bX ®O*f &$wZ t\hvj!7j5CaYqnJZ>]Lg`<#ƻn}R%iѶuJ8w F Syī//h/51=۟,28v"Ð.(pK<&mʩGz'Oħ{tH0`K]֯X>oÈ[Vr }Yhݎ_& b8=a\h59! kZVc՜ZbڟX׌q lt]V&k׏xXWW}pt iFPoFTBnœ-AaQb`&/Ԍ $ dR|&CرMeߋ Q1p .: X4'XV t<@%BDv@Ej_0M~5b dhϸ\x)VTk^F*_ے=nH)]rʓٌc$lKٻt)nDALdbf\ůyK]r %09ߠ9Qz|:lKHYT.AF]QKu)Xg=pzv3f͸ 9|4-S;`q}ܼ݁mjE4sp פ;%]jx47]zsFpTt1Gc9dpq,M~Y"hD%B1!Mx˨ ђVN5Ο-XaR!0>_ވ4̍K.6Ш:wR.eHj"lcݤփFpb|6)+OXq#]Ah܏Ӹk6w3ŋ:3Ɣ/7%>- UVPV [z-8eRKJ}\RO`N(_S;aAihm5;t5esNj}Kb8ޒm"2g?zzHϪ$<&\8_=# \ӕu\YLrG.p n.HʹrV愆CݗvK.-3qnw}^7Nn ӥuw'#v$ ^^!>">qшC (S{T D=STkF-bC38k&A+o3}n3okj*MU{q 60?}LM{n&Iy0m}W_ǐ$:AM,V+a.xX-uV Kpp~_2 IEQH1*s7MmU>ڨȦ K^ڦ ")KQ/Jiendstream endobj 435 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4038 >> stream x}Wy\Sֽ1$7*r@7XCEq#?5wThOl;]ޱ: 5 " K,I39tH~p*6%) f l7TJP*C\ .9%C\^ATeX/MޯN(ǗQ]O|>z Y~swɂ|=_a*rGâ>r(.X4'{C{qIma)A39 {]Z9e:@VVЩ+4%r ].G!.\S,^v"mIP 9ڙ4lI7fA e-φ!OY4iyv" 8;+^x?/53@ )*"a3  |Yime_{(D^H&R&` &&]ŃT76B6am; y͜9ׂ %_%x?ͪ h&>bgnpGV o.F_9æYYqقDGniכ S!*}h? RcJ)9􋮮}sh*1 7$ԉI Q.SZИk5f&Ʊ-$`/@vKFz HĊ /KT888nCU~7)$xa+řDMܟO?7mx_:xVH 9rzL<; cӬ$'NMV%ȜB©=C* NOMt ;58rQif*0& Ŕ[2 tGK#֔UcqSO枛sG}Lw(k3/։'3f~GO߿҃}5T!, קLD@|[N'{B'XsHtZ%ji7γ8P;ΑYWӐEY%Eۋe+B~6Ψ,o+7v܉đ$fK:Sj:>$\;F3_|-Ee7;4(X *1L1d!yGmNF"çSxYW ^Kׄt0t܇O#d:.vt]SndoRXd-kakcErK ]A塨#^Ϲ9 iM]aO,Xe.C)[= {2w5i!-8K{ȃ)JG#73icddGyFTBxyOU\>fbi}_kfNYԆ7eA\(DSb3hs:QPc~O+OLWe>>p8ϝZ|(bֹy=z/Pȯ V= O#B>?l{c1j) ;{:oa?GmuZs8N ([ dl3śғTR N_з3u,kZN^ԃxra OMu{?7B2R(.O7g@}@tS>Mc'nʹ;9K9l?R(nL8!ȵa_`Sd3+;*Ng͚-=GM?K޻R7)lѤ[NUԤ-ЕFRj.>TO ۓ\ke Ex!s Dth<ĜWC9j+fPF©pXa8@_XXbPѾ/WұlÊry:ɹ\&kι)^Cgƒfu$ }GipԃIáNl։VrכЦ*Ʋds :cJB )Ԓ_6&(7r;tuh}d_X}B8{=Dƺ!yQ,kTтn+N64B Ͱl7Sv6_Jt P:Źl\syrvO""a{d~ 2,xXf*1iW\Q6#vth*5\mk+4Y[Qf*0j>endstream endobj 436 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2236 >> stream xU{PW{0M5 >1/E $"y<90S| ( QQ$FhE$K&){rRU}w~9GA9M Ŵ7Ów듖';~ TH&IJ(|*ojZpλƹ.g &0~FRJbk\ҚÞ]ڥs6Ơ ׾ߑ'j__>9KL&'ic&nFFGhC7DnX(mt}̎%K-~VQ*LERj-F)rxj:A͠<)/J-WM9QMEI>W<sJT8ډnd0լtyK@DQz7KB(c/>1"K:=1Iy] )'C|0Bǘkbw\!h_~5xiDp.mEplc8ct=,&2^' ‰e=Ǻ>8lWw{GI`֚<+\/}aZ ;hq=HƳ(Mr|i@wtEw.E4=~$JJ CB ɘ44jja>?5>t956)f[a"@}Yd u.@c}:K*MUЭݖ'y`./RɎh矹8fAm6ey(a7 \ ]MC\qYEYhmWmAltWJG&?fbb$F3)Z[NrЈ%@yyayaD):0܄߿Q/%y'̆Ude)("J#aM-,"g!,]+OF XL^vQ} j,Yj=6eWRh |Zt N:n~b};va4OF?Yvgb>dxd^&m1%/+;%7?PuZVW`2BMxv--5Es <(ѐA**XǙ }އe/|VuI.*:;o?h;+\t&Ut]iBף ~|hr z>#d!>ma~S`)S{RC fړ+,yRrK}1rU~^2>[SMBN7!.?AqS1t3D74xO@WUfVkjYO*kh^ ٙEi ė Mb_3 Ybgmb|mFmT@ˡ>Grx@j0Hh yyQ;ʝᰚ%SBN8۩ Id95p{lLd /'|AGwi;p`LxsE}hGmy^7o>2EE_pZha2.ژ7v*v[N7 {*xllZj\Gᨔwܒ{X#_y+[}˾J= Kamg"<9y28O9iWT[>qv?h[R~Et{endstream endobj 437 0 obj << /Filter /FlateDecode /Length 684 >> stream x]=n@{7В~li\$\ S,}fFrC`$R}|?|yَarZ~å>.K^p\6wןspC_o?| }4ZN~~}TJ{Z׶`zuĭL7Ա)e c1 kSJ`Ҕ2=ph M|y~)Wh *)`CScSPSXA]X6z`ړ֟h7MAyfJ\›AYN)՚J *J *J 5YaSeTiTaSeTiT禠άxUWTU V xMFfl51 &f#ld6𚘍^kb62xMFfl51 18#Ifda>g䘏kF9\NAK)s :r.AC%tȹ9S!t :\NA1UL 4! d0RP  A! dIo`2D mr B`)@v*@h )`+H| הo7M&\SI߄k7pM&})ߤo5M| הoҷcͮu;]JWG I|\.}L֙ˣtS Xdendstream endobj 438 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8446 >> stream xyw|ٚ@L B %p@`p\pc[.emIʖ cI(BIH &⦅49{^k2DD"+l||'Ng(|: v]WUk 缹nbfvj؅ި'ԋDk=Gnw4aq3#捷Zw+[ #nkffe#[;E6F~@Qy~oXQ EC. sZTwʗAQ=^5՛TJF¨~S)bv+@HQkQmYąfb^;ߓ,\#KgQmtݥ몮_uK=9^cz=wyoddqWzRY{!}5DrVA~2 쁷msPڠA/lۦb :p谡'3n7hFz Gkph=o:"hJWޭH*1 }'XAx?6 6KSBHNq$ ,}7`/0ʔ 2Cw{O1z 8ޜS,Pz?i5_ӗB hv1ȎGyEzyo3-+ͽ ޢ)\̄-VAZNKyl[Gs)V'*oz#CSC9>AhfOM֒XEoMB bXg˱ 9$3 r G{+zw1}rty8;!NC%= Anr4 Iֵ[4t̀8 hF{OzUϱ8Y+/ɻ\r[tWʿA@4Y>iV{ea1Cn]aHo?`71Ž%w+PZEK^4_F~ATՌv5y/)C}FqX{cOcPgHNϐ;8l ٲw=8RC"A*?Pyp>8C*+a52o<Z8;E&ӍaJUtJgRIZSN=E',@n;9^ I&y|$``or$ߜNp8љW.q8%˂h%b5l<j5YA vE$j(tQ KB2) bnu뎍KlDM{Q^áޒ2Z I{E[_`N[Ck&4tVEr7#ߚbIF69| s$SL4ŵџ*-0(9^}['cs$.) kYT" cE8tt /M&?O,PAf/Qs$M'(il J@T(ey' 52KRܺȐ-&9^$hbHJ1/`q0Á^^T/%zWeNBFўfTp]̗5!*0CF㾸ሹѸڠݓW_HPt&`lA4z{MSU <.p849m01FQj x|<#Ȋ GV|t:jC}dpgWLgͶ Źph|:Nֱڸl`r!XA!&U'%]a'IMv~!*էQ!~<\fOs?0UjRb; !;U-.fu:p:)%}@SR^ (+[P"B܃  ^XFc3,[7Ԛf |=(a_jN$%'a'섦ֽ{!\Ʉ|G7^%9%|E)0ϡa?]'O2+zz>nBqh( vJD3jTBKZ{~Sw]^\О?At/tٷrN⡶&悇.MU F ZgdPϡQ3P>:vU缛j&-Q|QBt?[,l\4oZzHKۿI < 3.Uio(n`=гBdH\`) #2r .9s85!P}NTGT~)d_M0LFc&;!.9)Iΐc3Tʅˊ("ArRThF(]b.+3SP?tGYVVx+*P΢-Cf?bYϻ3`2ҭCmz5t 9F:Dή:?tA )Xߴi֍ϳYm{~ݘKqZjg 5P4$'FO&)G#Ȁ(!Uo{PIcQIbybʛk * G,/(}.-5T'ק(,sA/})/:&2P#-<|XL:PL:ʢM44A{pUE%97kGk9zv$Nzka+#U|xڡ}Yp$F#댌ǿn N M 8O 9l4bP'eFC6{CkY~R5K,]jYw{k?,dM][jUٟ<:Qb|q0L-#%ԡI/ jۼ2&HdqP wq N-'ϟO?b:B@DU!~ v"q_E\J ఐh-M.7=[΢ǵd &ʲbAM8Q_̚T4~ZZL@QXUEIO4 ~Sb$5[TܛBU(K?kx$@.ȱ6Π&XoMY: z}TuZ)5:ԯe63R\vS,P/L!r0}/; t3Dt m"x`-"էcq=DDkں)!-IzPIyCIU Ò@[|ԕ/3+hi;i: CFa>$W1*B23dFڰgKRA{gur{wZhmAN5XϨ"}tVM94~hyWKD)Sk5δ D&Eŏm{&όjҁɃbïطSpܵ %%Ya!փˮz#p T#13wMBH\X,e8VF=0UM!ӍLJꊞʐ#CRS3Qw-Ecd'Exд_< ]3Oؾ)2 "dvc1jm)w۽ f1=.s$pg_9r@ lLRD42a%[],j_#+dV3^%{>+<ůhCi Q_T HgNfEeE& v;!s ŔYɟ}OJ+~0f[V+NN.V1T3<;ו .;QUkVJBjJ 堆諲’Fu10\~>>LQ.v?%@:5ʤD%0;rc XS2x sf ,+ʜ}È`U` <=/y`FͲ7[T[m" Af> Z_-Á>I:ўj y.nJJKаG}}Oqengw.{,yGwA竚7hh{RpiteOv SxJCɲA3H̝ AA` $O$MH/3 $6(x1-=fI_ {4R=2g{_Y=%UF=7}ġ9EͬOB+eri%'_7N]>4[X Tm6Y&&vnDM@k3H \Ob)Z$ӈJy!+&] <}G ] <5YJr>y_۾zE_9}{pDXH gMb5DLƅp%p{;q׷݄-7Z;#"7HRK 1:|"&#=?џ&6-5ںQvwX"ժd^*vNgrghZ.vR"tE;~sty((ϫDpbk߽R0SI;\[goy ×N!a/PP9qEUeB'[$$5%ʫmr<֤$TZ$ C\:,B#9VY㩼R'FRsЦV>N yKOE3T9ٖet:ݟ`R _Q8"םa"yN^w=2A=^FRCrqȳx2`9nd1D̩UJRZ֓O<{uAbx g~$~aZt M+de!\E w=>}Gl~ 9|=SxsosҮ`^;8$mk@IϷwqd idM2D JKm/.6uNVA$Y]\VV}xuX"RGߚ6DJvV๡-1VJPG@@]~^ZV6r&J$&!5)5]DSXI\),wԳwJ6IgL&3*q;sO~ { `CmP UqqI; U%[@!slT$FĐEiRGHo !3%ZٟUUWW qg~bަWJUKD*ef\v^JVE:/0u,;ϾFaO bG{XTJj,3RKa7gS.6[UV1 f,-Kg8 p0q6|sS&6?VQ8_zLϑHlX[- V#W^pSŨF "GfX.lHj+r?r^Duv?G.]l2x ˛rL[>`)>شD%w6Zű _;y0'$lݙ/~& )xi$Iv@_XXQ 5[vT9`?sBz>\?~ǞrঃUD Vp(B㟒h /3~ό>f!+1T;Y:`OLI^Uq:P JNxMbe:$nu[xCQ`/S_u7KY?LJXO^ΐq#Ĵ8B_hn͚  BmMpҤ:\cz'j RMN4ԯ~U}ű-@~>e댗΢)UNz:7MvDuX܆Ou\`sx}Gwd,G*Q:u6ԗp3" tۈ! mnȷ=t.)h+_"Y'VC . 3.2-H'N.Z *v#Xa?^O{΂i3omAfyp8۱:si)Z㈉]V\V.%;L,HbUfl?R׆-E3чWsKO6m#B\}ٱPw T`hj{9]&a8ezV+aZ{C@Fe|mE |A*PdNb'޲9,_6#/="s 8^{ɡ Jk[L['J&}%3Sy -GvE0ӃrOݩ æ|+ֻ1!}h>љqŽ$ض%QV [I~1(3N[<1]-_0B9 ZV^gSݨsLYQDQNRnx\azdgdC0.-oD{ם Nt; qodVh^4Ds;WN\uS <;w?/fO:6J5&neZ{<حꚔۥ33k1oŵۿY61 r?_6jY:LXɨqhx;2oz*wHtAd2䟸;gzH|4;m]vFN%;i&_mK 0ɉ,Jdp_~/7/Md{7; \v1ڭ3tjꄞKu36tiz>5-[w?X^endstream endobj 439 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 918 >> stream x]mL[eǟ[z;V;')qEL ~#TB6,Pڵ p@'fvnNRڑLpdҡ[ eq1K.1+~$C Jk?Pɫ18mlF3{ඉl?f^3e-Oʱ2c -P(" Qnii=h hՌm:\ԕjKhZ;hnem͌1_cj e5VoU!DK5UT! !1z=DT+:1Cx}Sϑ\y&SxyA}^zW(}V۫&rHJrKi`֏4px69k6u6`B\ܤUY5XQŦU9-?$,gX 6 ]:X\SO!c7QTć`tj|(0D eꙂk &n+ܜ _Tk^zKW`U&(B\'؍-n{Xi83?_%{Gx+ p$ȏ3xQO埠ynRKz>>Pi>#iR_1o9!y|އjelնg]}oqI7-]i]O.cSZUE;LA6ΪF'<C6lV+4gU{Hp'7@CEpiqr >Eȍ=4$].((\8$`D 0?uO’th|C 6O /}6X@# 3ʨ)endstream endobj 440 0 obj << /Filter /FlateDecode /Length 581 >> stream x]=n@{7В $!En)#3>v-K<]iCw SUsU!j-'cU!1l@\B~6V);U3U80 qhUHcU!ΆΆΆUdreteikkdretedrete"W M&& M&& M&& M&&]&&p.@'t8t:\NŐɠРРРР lP}bd$cթ.-..-..-nBRp)$`.L `0L&RI\ 0 K&p)$`bJr̀+ςǟyM_K[:tp9-AWP)uendstream endobj 441 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8128 >> stream xz tTe E *$xNAh)2FCCy*I\)s%9I0aEŁWyrWm7Zo‚JԿ}{!F |||޲ugjrxK o O{{s FSU|*1Ʊ`ȷ?=ix{`ǦMMˈ Xp+",LO ش`낀m?AkVI].=0Y7lo΍ؒ5?j[tPظv'Iz#9xnyE,{y9)FӉ b1I"vb1xx&^ kyDO# D ":XODl 6Mwb)XFl#c;D21xxx@ŸL"HbZ\ b$QO#Gd<[ǞF2Wѵcvywc5;'SOO&~g{ib&[ܖI9&{?G<#u~%`J{ꊩSoM[;tٌf|?1;.cjӈB\<'X b)U:5ak=|PbNaSƩMkPGYv)H\XEh?eۙ}y0rEEo? Z4AͬC FЧ|b{w"αIA&udVg+tG~۹o\!%;'F 3Kl`,jF :HUJ>9k-Ef:*[| %57?K 3VI UCl=M+kOMNt%˳r =kh|wcˌ\f8p!_jf&˥qߴB`ih=q6aS?[h m}+ә; fަ5Q/ZtzO>?MJ-q]gMkK35㝭_l*0²g$Ȩ׍:j>K_%jLo$b0YK{ov7мZ*(7kH!>fIennC ݂;y0jˑ&m{e7z]htQbOo'XZSjR TZNbp]Z=˳.fBjCmbC$k&HShPKU"z]~ V$T9r*w]n@̾4وM^2iBS*RdCPQN9eK Oh>H43usg+A5kuz|O堯nb0Ͻr OrNo'ͧg Cvk27ZtTw#]*qbU"(H5Ure>oP7͋Y`w:?Fv9#tWui-S|Z^b ; -_V4ʡv)ou Z#* 5<)b%2*hߜIgrUYsapFļh^ ЄYK"šj1(;k늎DȌx!+9%21;)#t289YbGw=Y%9DVṮ(I*H TT<Ji 5Ś*EPbf/RSâv]A?CŋrT4zik3fЙhZg؎5pˀ,s](I tY<)?rN Ӡ eC{]n+v4P:\Ȫ|$CdhaWŖjҖ*c 8W؂8f|;t%cv*=S&WC0/֦b1ڡUQta$:bPvogNx[l0l^ (22\t#F`5o)M(nnN!jG:N8:[CYZx[iR2fIFդKK=Q{Ľ-}oP2Sc i)/ /Gƻ+v \>cyFxsr4!VT(t{}Gvs]3p7D@\S#;uvxmyI$s R7F䥋\Dkl!2qZ~Dvpbě@;Z5.(zEM, f'+K'-#>,>[s0>nB)iolNVC37*9v.ΦF !3v H+4TIJt Ns\4qm]᠑8,/y:XB(b"LDG[jշ5q-ZI(PAA[5FDSD_2ezodi6Vmh4 @?1Z#6C;ՄgFɘ;~իeN3Q/v?W~$/tnEϕ8pP eH D${S;Rv'˕PY25ЖVntdEfY r^  2!bR.$I:syR\wФʇ$bQo4lYx:jIUFE͑4b r6(9IT*PsiJeGFy<=j3nvY6׀HGkFd.F<3i8VDUJ\ƐB|ĕOqwZLπkL1(CM̑Ā\vb;i*%~'p'ZЄ8XM&ɽXҚtԌ㲔Ƞ?WMq1ͬX}0#huf-2CYŷQ~FQ|Wpg[?j+:nuΦwX9F]h$RjE| dtk Е޶ṋ҈9agɰD|R*< Q,|y *1_S;\fgEjT=Mzpɱm`rčZ$ӊ:uL2u7k僬`423_~;~C0p/۝i7 J*/d|oYE(@ RUЀfN|VHN%*[n,BRi*Bꔴ` b>Y,ߵ{j%VZHuZ"O&d$Lƒ4@1(ɩG98ᾱpZ} w}h<}ɍ fF\^s|uT9}2nKXz8"#f0˨fFoy)%ѢyW/  ťMƒ$y6~F/.XN( 96D+h~p-P.'+#+$emEGzyK'̹~`x1=~̸E6&'M/`n 쫨~hox}TN4/|ܛ i(Ki xNeF29ԥ o4Ŕ 6|ûY:2_D hrI߹􃅚\`Ϟ䔐@bMY5?V(?L@Ϝ;DPJ BR~ok@oȄg*~6֖o "+S"˥D+qrj"N?hA(zw]jo ] >hUt rQ֙ޜ5 ]|i~3t;֯NȔٔF4|dohąT讨iyd|ר`}iI唗*O OXTyv=bɅ+g/Zz,_{!VT7%LZQaC.7*MffEuͶ= N{& CKupZ] 'szlޝ oc9mgcQb`Z[q4 qleWR&SZW(YepRSgp> gUvuv l>$TW n¢[+Nz WAY`/|{wxjNSWWVFXZ([%UU鰈)@:0j8*:ZC vn=M:+..@` a#xF{a%A?$tqBo,X'/b g@n46_ܳ~`BD w{,QOwzDypZBE2sS?W )xP}P̉b ira0"|^]a߯*ʀWoyޮh<tah[<CLׂzxtEU{ d)BzS[u]brQ?a8P񼍱O|0V9qP.fL1n/hr㊖`rJu(އ1нbev /Z~ vj?AԾ>{ײZ9:%5ZJQ m.˽ ..p륚`981y9?ke'C8pތ8DR4K+,K!)G12&L h@E * MGh 톿e YxfMXCX+ɭohlhPA0 l8`pwGmIgtaA>HT~[88XC!/gd@CbQy}7|p:I`f҅o r۾BzxKCURw0X|sZuq)Ւ2H@JR+Г 4{}@A|" ۜ9 L0a5(j"{f f;R닆>>ڇ28U u;AVa=){U" %ЩBŤ/Rך_|>.~et˝s:@3=p־raQ1ka &ͼB} do0a|~hAݨ>p]†9o:y!yD|; C۾ow6\Ɍ5mi#!v'Tiz9AA|"dj?/a3+,_@brSW8'8N3#B I1eq;eⲬ꠼'`:h0,WJ[O{ ˝R11򕠔Py ?08ɺL*Y™u Sl{>6+"*B F>1ΚwLgɨ̸u *̢@JJ>4 -"ZP&>UzJnN(Wv4ˤ6\).J&<~pg<^JP)^̞fTժP6E@]݋.Mk4 7Ψٮ՚ M4Cx22SRP,e:aBfSn qaksrqG_z4sᘵ Y ~@xL|&tSͯL'%&eZ]ͮ)>8} }M\hcDubÓ ٽq=^sxHʷssN %o@ 5|A;}Fk}_}t drWg'5ҫ\)ǁ<ӑMe'Σ@tiא;axWb2 ؁XSiTr+s^rhr}{ĦE$*%1 ʥCS{ #_{g,Jq4l+6%^qgwX4G1jI`|.M9d(D@7+#zbF0ۭ[*Bj7Q`0yKu}6z6uxWTÓcrYl,-hRf.inRhT9 ĪH.W;Wb$]#x3|7z3cXJn+\y 8~UM&"wř\$v^PPRmt8v๴Ľ}k};T33 ^``Y_cV+nۺ짎N,En.d qkL]÷^esBwm6ZDmnC!ַd,GW.5P3>' eTr,:ydVtʓ'`yjsel}skQy'r%~00T-$ _&cC_v4'}_dodws촴L2LEU~Ì%6bך.Qܿ4?NZ;ԕ91RcF}ph7֩Luu%Z]5 wendstream endobj 442 0 obj << /Filter /FlateDecode /Length 4487 >> stream x[K۸9iԜRPC/>f{xlvsH w(QKQO~o~FnƻTS$_r&<,=_r&o>quMR<9s]y!s'W3ōJʬW,MRs[f踋2Tr/ʒH 0SZ,g(EV]]mfZàh֖(Ļٗ&"I+.Ibw՛Ou_F$B/M,s*7+TJ I Ԍ-B]y痯-ڦ67j@aH fSEFa̤7Ak u}^85Qrmun*;K܄ Kg.|lR)Q=LE8Hv=xr_9{M$ZP,U*@|AT.QrֹKu=YRC%( O?YL.?4$v%FS; n֚_-Kh3 c/W{a͊aJ |K?.N)j|$h06ppueA)J M >Ge _2bDs'؄+a7@Dn^S$t#Dunʋ¸u۲sd%!f]e Μ-_X0-?̳aȓT #TfyT6` s҈=y Maݲ*YօUAy${W Wf#"rn E*y"H!wWf4Z!0b5z±AX<cB2-E?vrsQ gSE_,>;G 2zGxÍcB Uө2d?lcRnx @郷^b)^wmLWI.Yi&l݄qK>o}`GA`w QAxA3%׬v2_MS#(uah8 e92FCʵ9c#2&bvlVmX;x\')jxbIԬ %89k]!Ъ0?&v?!_U1G+F+$hڧveWTrĄliRC3?}xy멥fxY8S!?@r~2&TBIu2<[ Ú܇悟 VGI +ha!E; ,G9Pۇ(y>+{B_2lŜ2W+J(ǕX !qn/?*uRaCp.s,LUS9 ~e4Ê@P,t+Kxdstu9"PVmwtRb~m j[XpG,{؁nW پLƤuYo| 2^']=Lw RiAEA*6X- @sBHwJ qvfiTȁ.IqPj嗀X~һ%8ʊǽ!&;K $ zNpS?9nY'ܔ'\a]+RϯU^R69~v㐧zÁV ();$ !Q2^% rTԁy ֚vٳgenssr+dޮ3l/bA{ M#/Ζ2OB_T<sKSD1;"K,ŋs*k݋0*?Cw> ofpƳ}2'_ֽ=W~ytJ sl"JhEvk 5VhQyu5]$f4SQ0˲.Jy!=P0DtfrBDȹ4#1LGcmYK"JS1* vkߖӅ: B>L#pm ݬ0OYJG&Z,c1;IwƷOw ޜذL'Atzn@J9)_#y?yJ^یᦧSKюZ!/_IxaXk(zzse;5>yO, wb!AdU~WNݚ%7?yYx;ݧf*:3&x`l)"Ga;lb$ mEE[ at.yiRw0g!8?U*˹ @֏#;(D%⦸vG9>pl r9Ȑո`O _[xaeN蛙+HѡSzDW9ֳlE[q߸;?xĤ[> stream xZrȱs%/^p娎`C'P8p,S!yv1݋UThF^y,$Ils,yo99=z}c%G~"ΤQ'.2+#{?eK+Y0ɳ\˂疭+V%k}[WTJ vqe]LxSܾ)H&3nGLGST4.Dڒj~. MY%>rlOg%n-ܰF?5eކf}b"I6VѠMX[u~8}{f=n~+:BjYj^Үܦa(X$vM= :V2EVd*dSx)3 Z\@X`avȅuo@[3Dd+vD6 `dnWb5]I!pdׁMҨ7Uka рTOFI}}x0L_eIs^6TH?CKkCKJnzcn< 8+d+VZ m؎wd27WG&dRi삽{"dO͸. w7  k26l,_GįWƯ/;jڃ?\J~+z~H!SBERSvS9H5*Mũ)I Աb1JvkExHjަېvE~k.Ν/JG x9lA(̲9ɕqw2GL"6SՆytD[[rպW_pNvʏ on/R F`+a@ i;m5N€Eu2MIV>CBTE 8a䕯]I/Tn ?Ṕc#.5>Sة04AƊr_n>1irD:"ZL̕Gy)ig,IYQDƹŌu?8ϤDG[:N`^i${߂c)h ]4 Vxv_8D aoK8L,u TzosK`6j}@¢Ұq݄IPu6ahЬrR+gz04nCs39PPE7>i|߂vr# {1M{F3iuYwYm' "d.SI`yJR2-z#uїIl݋X>`!K"wwa5SW{Ȇ} IϒWxK8tM:vzD_ 4#úL=dqbD X\Yפ0ѭa:1޿掆 _J~JWM9$z[&<3Y^1.ޭoWbN=4.o)Z7!=Ƀkkײ)*hnQvAՋ6 042{'c_/ZKknKPZX>12Ԧ(+􉠔X*0xBS7|H=<`6`o<bs<6aoxC7Lhhg~S~ dB@v@FhW&=pEش8㍆YVAZ Ü7Ox["t xb0- ^ i2z>'SИqN"C#?T!(:U!ce/YB:XP%u\D4AH h&M@s-;fׅo['5f$ATP4_zk깛$ t P:{o" ~(F~pGm]P@;x5QUQf0^TDTn272 z7 $IsP.w<-/ipKAUPPjpJBpij~*Ĩm{?#'S=wN!c&Y-T囀8-:V2XDmhKV6DZW^(@92, Nyqb * g^7 '{kxJ+Y!`MLkD̕޹KLn3U ?m A0Q9 & ~ vYGZs_1^YWb3R %VY49Lbu^ӗCx{x [) lz\n}~#{5krCUn$ [bvX+BS{R<'$wY^J;dw/LcmA33Ɲ + QO);&g9ArTϒVg:'Zh]ܜLaL<YX)=|5ej,0KKT+r)m8`A%W|(5| gPc@U|bL& h2hEyճH8~`A}E:<y)Q7ѠCX8i!/ ,mW;Ͻκ%x/1!7EUn>@(`YITB:RՉuH%y@W4%RC~{5 U^'9}D\F,4^)É@᳭POYlAB)ut}Xa0j|mupa5u %lk5ع bpw3b^dZZ2o^TI(fK.e8U 5}5闛YSendstream endobj 444 0 obj << /Filter /FlateDecode /Length 4149 >> stream xZw8=i/:Bo" {{$8=hYHs*gC(BW_}o竣rrwuы /--\.t„,-ua\>)3E-'vZ䚖,g'M]5*rqI.3aQqWoaFB3bZ+a|zh&̸,R}j:c0]#$hCEI x%B>3_ +i1IcP"V~6Iyl>,49n\%ٔ]_Iβv#Pe:@ rv4bJ1MjY}0ۛ`!+ɻ:XApNfRX,NX}}aD23@1h#yeIwmj Cv`[r*jsV^n z<DŽ} (Y[w.m3L-DB@ijC+ClƵJǯ>a˲0%/(e6Qt-TXV}qj%1]LF?LlYM[a4%Yj;EEi+J$bg :1 wJEaLAoCDpA.Z`VfT`3Y{RhVJѬcV0M멄FsRKOD al 9Og;IjŪv _0 ,bC5԰(vF|bFhDd`*2.&'P%DcVr6?&ܧ8 r\UR@=wQ|0sDHE)}uiKTorY,f%iX7 Mc9f?/L) fp )a[Pǜ*sc4V B(Ddq0Șdvo_1 8r8e-%W iA* 6@ c'_ B`>ݣ@jl·ɣdIcwR^z@'zIy 9}NFL.)S# Rؙ2r @m:ElNNZ Ma`dXb{Q:piJ]p}Ii2 ޘ+X?Jy'VH&T:AoG#YeVP[hؗ2| >!Gs.Kף`I~5Z]hC&H>~']n,`\eWU/޹:QYHWt $t B ~7ZUAÏ/^\|_\`v]q1mw"2P}, Ol`01JWIE q^`f,K;nWY;1,π̞pQqgo-#]aBs^7~tлv5JSߧlIAYR o0Ajr.EV~wxCA\@|KmJ]&en!m-L- -qML^]kXi 6I\czi6&}o`5ozj2R7vy/|2Թt؉.6? 7>`&ܭ +VUN.Wj.&X4^͋/us~YشY/ ˑܦ đ!E(<7 HBG|yB:(v˕qEK|DыUsu˷Auբķ,ȅ! _rU(P=&QDL@H"\|D߄0Eu6}h؍B )@ ދ  yӘmF"+Ht0[~'b~cI(7#or6 b@x|",jut(ڛ*؏0W0VN8sF}1ՑI؀M/eF5NHR"nΊ\pҰIl2rÔZ *9L~Ab o&#e&a!깇xy/:AN߷8*ɠCSpCNsE >]Ro[jB`?Hq.?]{eNJcyH9Kׁ!u`@h65-O dg:n];IK̚zZy_)ˬȫ)iոgށђQZe(t|!iJ3HAm`y~Dm'eAQKm yLPBd(Wd( moeA " ?).?%02itI*Ԯ(Ijp`sGB@xxi x47ɹX|10 -{mouz}s:jd,q(j֙b揭 h!&k]cgU1p4 :PA@h.RCU35j=*?_żZռ~zq['s oGÛ'ȸ]8}x{d|=ZU\q3yƞ6·AL /ÝO蹋󩡋R2wgV `I'[wEN¤֣لF@~޿y[MxQI$ A1RŦ@NsRSo@w>4x3/zBT~&n *k;ߌ+u!  /hg4vGXY@q xd6,uXn u ;3h;nk.<ЦDx&mmd^/6_c/#6 hS?/:m~TGĩ'@&-cQƳܭp'߮`C !X5:p8s/4Qf^׋;`垒?&}$Q]m0uMhF8$>>t?DȻeu_IX9z٨]7YB黇Hߝ%j}_"_2c&E`Nޭ%w 3Wm{{l*iZA7r>Wa6@G28F%71@:2K7彺]\d/2f#_jxQu@Ċ+9j $^#]Rĕ۳ (js&?W7s%D! i.i5z2!xaDk~&5)8rFyRMxM`koQ&Ƕ/ȝTvs!dgTdEׄVN-VD޵]] ]KвAU͗EsmklEw[Dq(~ݙW.՛pQo<4C Olv,"cu"B/gɄbd7"X 1e9dڲ#.:ƒ٠~%ub=>fendstream endobj 445 0 obj << /Filter /FlateDecode /Length 4093 >> stream xZv6kYhYh5}&k;n#+xTfMQvd PM)9ZM]UnU<9A~|{|}|zuRxyɏ.BY) 1CYiJpusv=ɳ\˂%kֻaw*w]LRYKvZMJ[8q)i5j3J Vmmue/IXak6N&Jh.t`tzn+׈Frg+Rgw$o e$[qB,gNS]!q`eAAAя+[r']['KVAYާ\Vg<+QbH~d+]Bd44* (غbJAqVct"Is|T*RJMG6XolY362<(7bejpۍ[AMEήdoˎ Jv^'Rl>jCbw*Vmnj4g*w7"iZF.őhNaǥo_0l-{n̙j.\N}^\\6qդ)X3q)vܟ!cն^HDhv] BZv0w2f|Yo׷dZJ]iY￾%+##V&8 @ IN+ZLbҟtdr,f#:(xp%;Q Y#Ԍ 䟦|Atnwf`fP_~pT(ag)8a) TmTUj(?Uyu0%bnIP>.Hpη]=-ΖU6St#'2'ǚ]w<:;>-rQE0]B@OAA *#RM~AUQ \תo@wd}2;ZFq E֮"|C%&۹;1"~D>{k6nMFBBB4c ,Y/2m _x5Iy$Hٌ(e_vopE V/sNH ;emT IjrSNm*f$[`KZ$ <%G!q#Igmw-!dX>h ^9>QEz>ITHFf¾ȠzJ2Q#C ^I٦*Gso`AeJe>;쟾qܣ))5D&ži5Gݢ)Vi$ OXne"!ODmچؗH=oY"W=&8vԴ OM8Ș)]jX?i`'./^g`@C"ȯP@%)z) mȖΉ!ADb}@, o zz0w,ٿOX4KWc^# XX9 ަPKgi q` R9>m^~`ABԠ* i$ C:| zaΐ.VV!]JxgLHWON~)#Ɔqf?&h[J,b ;+ǝyk]^+ 4M.],`;t6/w=cZ5)Y(GϕFa?p|jXRi -$- 5+BnOY &צUSAB ?F%όܓ+4䰪 'ܧƥ}Zq 3FȵTH |L|f%Z!h@h=cyãR*E~B#J*,5j 5429;OhOw IvZVQ$Q<z8ٗ<8gqs<ӛ>ypa*fXT-^M_)!.@-|։دdZHTζk2@Sk T5;AW%õȨ#E4Wu]yR/лFaFtgGUT40pyJ*sVo3c57տ"uW{>dD@?,ƭ~6H6Tp"o>u:w( DZ/ 0uۀϜ)%&רlE(gw҆ HOp p#D âO|>_v992S2j x@9vqUB=#y,UvʑXT#lf 4Rb.W8gjE?ҥGuŏKov茅Ƶ#"㯡f9FŀE}]:ٝR PJ%nLn;4)X䅢l4 ϗiW?䙦G#}+,]{\ׯovyͷz=x"er勖cm2)%}yF;+_,a M=8+zpv#I]ZK{G[Wq n 0[5̐ѕㅌk%"hi4-X)KP@d\իc ~YH݀}J\C0dަHA}LiF+Q &6X(|?WBI 0wO/繪2HuQlC3+#^'%ɝ_nW1E.]Y]%2/a ,o7LWxu,REyPX^?vhDqsZGEte݇JXDx* j*VU*]@p=bI0TCǯ8Ko1B ]pތ E.;Q>­DLF%֡>C 泃zMbLiJ&:ΐlFn-CtЋendstream endobj 446 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3542 >> stream xWiXglA)ZgZh4q3@ 1WnOwa7&"I1c$L&&DɘĐb>f >{g~{ONad2v% _~0z2!qIi_2m8&}tԗ8z:7nۀ;n3Yxf262޸,ĬȨT˖רC^K՛Cc2RbaK-UoOȠՋ!Qq~7ڭٵwsKYdv3{Ōb`}zfY0Ŭ`63+mvf53"2̏)as6]Ͷs<>usrFӎO_:i3,3uu|{{ʏVtѰVeGCE,PeA!'[c)zA]FY*6@v 6Fe6,&pĕ]iAP 0d;s_]2z59\ xQ*`#wk#6!a;z ?"Zm8L:G 8K7}hk9b?9,)OYGc']?7 ]W umV8X^[ R(PC?/(f NEW% &~N%7I"H,RPHe8 0šPBL^ ܡGnPeV` %];*p݅w.튺{d!kb=d=Xq1ÆrOׂqUgxP5mH'1E񁼸49FB+q.Y_xOz>.TL#wu%"o1Aц9@~1,L! EP ?~ގj9ޜ蜰HV]o S_|]Ocɶ74bc ׬8"ʬNב@/cKl!ͧ24CuIזfyc&ʚtIg( )iSԁKyv*QomA3ؤ8 -đ&.$R?B @W mUt~؟ҹ+%I~x2]u; ל@+@Zc8XZLwILk{;6"TT]K.M2VVPuxs mwceh!08*"air<֤ojh i|sA ֒@ȄЗfRYiT^_iNA=#\fjZ*Ndfу;QҟY=GUTX#t *xj78ٿ'?^z1r8fە4~{qH/O}u9} Ý]h(+yMu%@h0їB0#vZ  g+W GD^W)MJm|2 .L_'Y\D[cXZ)5>\rOKbhD`P Ƈ1q7/DH]G_L^hCځc!J@d|pB FrGIɉk>`M}ovm-^\K%H2d6/zPgU^u@X#&~΁[^8GAvpkéH!)HSܠlTV `& + NrH$ p3ԏrNJOwa̹A'5U[3xhSB2 +N)`RIFWq}$bwGO ծC8'PRN蒸pRi%u;rO3c32#i= |^뱋Ң/YtcgFGJUt[{yoSUi؅A2N^ \ےh(*MPq0 # e QDލ[&Jz}bZ\2@N `ՊLb[[ߧzpٹ7JB4|z S7hil2NL&dR cʋAΆ{iiR.%I?XO=vգ/Z*`O A2^K26SL?/r~DO:މ+BruCa9DIkn4v> ?˻"O=Hh59ćFhH`?2,ω>A{z墡y5!{wփ_XX+w oU3ZS>j}dP}* a!Ha; 'DSzHR m&!;̵dS96PA cW['d vP/QV4+}+1NC鿘f' cl2~joo5]Æær ?Hendstream endobj 447 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3056 >> stream xWyTSW!$>Qy:LG긴ңUhEv [$"$l6 | 7کSGǥ93slus.\k{f朜{or}-"ׇDlHȌ'dΞ5cCrRښ-}QK^H?td"ҍG'PbhseYԴ9f͝1/ޚ:3xUBӃ2W\33xmVYL59-A1"WlZ9}EY;r;$%KMK.":*6RQ&j3JES3T5 VRWOQiKEEn>-⳾3|[%QKGСFcNI6l$_!@#>g7lV箃Wtp̽!}2WT^18h.{Jc^V$6pPۏVKont ,Q I}rZ/gi /ԁ@2>7e{>EU\)ʹ|gџsA>R\OZ:wpK5ch]x^"+no@_J!< eo4h9%Svrf4LNG[jd:)4TQ &-xxXve48@c ^R5Qqљ[ =Wq*?ÿwIO7]Jw;d.&:Rg"$y(~,fg1iIg,َbXkGD')~>e/E&cԍJ,8Dz)+H82tQhR?p7 e֜xpeL88HR2 Qn8]FKvJWT::L櫾?,`1MJ>ǐUW:hrU`ʡ^[w8.&O+Te&*WUWwh;t6iM%@NZkzv=A`Pkm[?DDҪ9M+E^wygc6JLZc;n L1y>Y{R7P)V7T6j,EYyD!ez5UKSS ?[j!ESGx|)\v4Qޑ[vVuwV^WUyt`‡IN<=tL9TpzspwYr TN|]hpPȬ>Bdv>,Ô kV54F^]*jgo"c,1fC91wK`չmlEf}5p(W:[Uphwmz+[&Ρu)ˍ+2雵q4^Xť~8tf$& ={nxnQR(=6(N=w -tO:u+eC#T]`Ppe%2 O&6y*L6u=ˏ%W70(J['ߘ&pLraw]k7B(gЄvڎԓDjO!`;/k(.cє9͂枛 `$;)yO::lo9|?x>HnwgƄ +똽:$f)OkUS\%\JqY' IAG\|zQ?n}t+IR ak S/>{*v$7FbO߉e;1ssemߪ^2K(/ɌNVƂy}CΪ =M3p c]Pf!gxP(~%*]jу:c馐P[ mN~GZ^SU!۠BqDDe$8p Q!p6i+-C6Kdڻ'Yjw@ObS8AdJw# IqJc1ߛ$A#~9 Es!ri-z|O}M=zU}_֩S!ɵSp*NdZ]kAi.z+͌RZA3aaXb${vئK˝y uT_lDAh ޘfe^kTfm7oo5l{g2/pJgNMp{xD ZuXP 84ބ)![=OQJQB[=͘&G>_rce׷L!4%Σ=Yɧč|5ō`iкSAkDE(Si_U IZh.SȝKWb;^n☀2[S~c"ˠgsC;44F\ɣ!|v:3"íB9sҏ*y{ 0Y  UG,3!V ZYɥ.+6Vi5rI.ѹX\k"qWa8Eh6qOJ} N4Tȷ cW'f 4oH&P" l AJAiP6B:܏^EQv:]@\'eNG(Qm~D\ޒ/"k>SwG .Q&xǾ+,E30vߍFXl1[,r_ûVj6q"endstream endobj 448 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6690 >> stream xY\SWaDEZG]uWqNP ({Ma[d "EōGP[֊UkCIH 8w<^xFKg͜n8qܡw6Qì,sHYz4)}C\b6{9ӧffV3l\HÃ}\̰a^N.zMyIݶmټnj [6np-SJ}yAW  wY+s]'rz =<6{lŔS>gω;o -4_4aޒ)jOm̩MʎHm&Qdj !B9P(+jZNMj%IfQk(kjeCͥRu|ʖZO-bʘB-R( j852vQBj$L1(ʔQbJMR4CDm$1 Ȑx-z}C"FB#_b1Pӡ_[6|;Ft<.QRLLM9 D'b=\qсc 8tKyn։-d$JnJ~5ocvZrUH{?W^}.Ab2CQ4C Qay/GQ( QJJKJG1:TLՠp! =‘_ *L֡& ^,? 5;C^i U“\^Xgv|'/=DCEy`s"gXI ?|Bt5kI;)Ii U;4"v4/Qu<[8"?ZT"F CZ#,hg\}T#ލsY,0`i7MVNPɻZ> `,~]̰}7ܽa3+y #2k bhqs R\SPv 9G% UC6 ŵċd%Ia\5\S[+yGUpl)LK0KAC`W$9w~{K̋$QMHЊ*+$ ᗠZxE2y7؆!mZ av?Woni HD\Yf_/V1/_sÕnw9p 5u%ΰBW.\j7.20wLb^,ꑇ` `Yڄ_`؉Xs_Va{mt9Dnl@kiXzNUWJNigs;Dž -mn?q[ɥS@5%f:I|t[p)EBU{Q,6gZ[i =%Ts/g ɛz~~$P%7Da USL.T[mm:{_P"/qbuIc}u}k¦"AUEI}fo*KpIL#Ll{n@;7cɭLG]%)9g_1S铽@䟵l"L6=2#Tr'WΕHFB(dGEK:Ǡww;7? b":\)(ԔWVvx\Hm /Z`T[ך Kf=#?c5^+ǚ+$(\+X̯Fͽvlb0aPwZX$fkKZ $~'tV.Zg8z/w:Vd&mqځHꞹ;aOpݨk{Aax 0=:MfRh.Yq9OhMha,XŨ GOUko? wW1Z_u`ߎZZS9tƇ5+qFJ^ l 񠁣6И`<0lqܠW5/ *P%j|x%jѾᜒW;^}.4`M5*t94z_]̩)(TD\hbf h8gء!H&x% Ɍ>GQf^YzQf vhà "PjC%YUנ[Tٗ$zw&gUEPQƕtLdg_eTMqC^[:Wd<$ ^mGS¸Um~nm> -|Τ@h_"FN(q#O`B@L;a IX Yr2\8,i`o 1)%s  _GqVk 7sBALA`3 ̓I=8b{p@~GSCNћo{h<QG'>%dX';`'xZXi+ J°nUԲSAz5W&F2w=z#Θ@,Sg%CiR' G[bܓ"4:fwY(!1BA {4+-j#ʂXȈ@C{:ޞQ~>v6 [sSlRl%_~3 TA try T6;RPث 0*UbA_!R. 1Hc pe{qπafKWl7:6N$l'ץiq)-љHZԬ?]t8i: zR =Kꐸ ˩D ]THL)m[Yz*ste9jzxU-c<٢q[6FsPPes[t u[ 6CG-x>fiͱ`Nӛ:~I?Gko 7 {1:@_f[m ԺIc_fo?z07Mh?k'hͨDjV2njW6[+{mn@ _$rOԈ嵁+}Rg1ąN6vpaȃd"WFy#'lJ Gc|}p#icKjt{uQ |Y|=ˤל$&fk*|P s팟&W8I{nǴ@ZpOZxg-iPvZg+ k5yʓw{* =kP<Td^r#SsP.ijPv]B߫>Ww=ѝ}ߡLCʹiq uo !1) 0^BA_d㔐q&'7tvj1 +yպgWX]+PoXt %mn~r묇_g@'0(#n_4Bu"h7lZy=KOJsI(UFafo^To蒨$' M̋)) BH"QQ%99lSC]I:@dfalaLE!TJK+PYoC)HxwvHK/~/VJ䴘p,bP*)#?^vD͢>IJ?81$,/aWx* r i0@zNl3te'WqfpYx]mEiS)vF! ǃVx?~ Ƥgmfo2ɨAEkg?K¸+5T5]oJ"P"xPĥW7PJcK  '#F6`:X)Vedz3U HQr6O\|\]</6;@Vx"lFS^΁`8:[+ۓ;|I>¬޼-T)\u5U%qyk !LB,J k@(,T=.h#QAͣwLd 뫦a gAl<dT$;s--6=v>L+k8^TXXGG}5\?q " oQZV g'!)&ĈG5&?q !%yyE,<=y9өM$EkAŊ<] F` $?7  'WXSZlAq|XO+TY#{ d> stream x]RmLSW>nF X8u8PNJ4(R -JCȡEU@t0q SNQ揽w;?;_y<{< A7'vEÓ ՟xrv! `tx2`47f}p=a?,"Hx)H8!g.dC$AljJyeQrytx8eb"d UFߥQTE-Z=TBE|UaL+Kɐ)6Siao;BhvQn4pPR"Q4ZV"C"H0#z4y~B;Ա&l"p<4ai$8~L,VjV_nc3lܰ_ 2<Qv >=/9r@_`R__ ѷzf[o ]t]=^sTՉ2}XQSaCQƬ8*;Fnc\h_;91}uM}ߴ&A<-LXdd{2P<3'% =G(-Oś91%N^AWPhqm*gHxK]xĩf/C aX>Fpr,U{K`ƿj_{?drA*CtUfӚG؊:/9d8Pc((˩;pv-.Xu'p ɚNvN/Es;upvGIJ->Mh8}p]_)^ K[Vz٫oza]> stream x\IsHv'Ǧ7";|۞阅㉰(XbQd6':($ryV ݟ5g?)ߑî.g\pva:0n__Wu&  6?QW7VwpWUG.vN1q[-O`}!xN/Ϣ4y]A*9wviw:4,U;e1Qj mFas}ݵ pmuxx,K akC'bȏumuŠ;ư:^{šr:x609P4]U' bٛ Q6v["r-ޜ_(S;ު(U;Ȃ\8s)^dMkޥ=6Z@%E@e M{RS,W;RιK qZשGdF;5u@G:P4ʗ6WkI0i5:KTS)t"`KŵN@z_'O92t3o,,)MdIةaU1 n8&ZI;j9#(H zDcle3MQ^iE1cb#(DA u(h>t( c#4hAO X;R*M Q'sTnQ#@,+zGkEO/t`n"Cm59Ihi-<u?F_gzy|oxRx*pIamGv@}3o42 >q#>2E> {(G+oѷ_NWHoB7ۑ{5trmm`T[&<.4uaCҥ&6w ~}\T*ZΟPֆpmK "K0LJMa6Á ɷ(`;wc)8/'@4 ,bt:\;?`xU&2gF(>O<ܥBR }d(2e)"pQѧxqN3%?hb0rjy0C2~nB$StYzՉ%9dDž< KIYbںK_>C"D=ZɓZo9Հ0\ QL1hB?:8yfPɡVRz*`a,rwGǣ| rK`h(i<(7;Zk!]8u{n` p)PI'#k,|+nd@X@(] c;|qde~ӎ$'S"Z[N2KE<<ׅqHGLqF(x#h)W"ldJ*QSYecnQ'UIQ$=8$7"F^f`5"oM^&h'iԳm<laM&rVYAH~]6_(lJ=V15ɬ~!U.˻4}82 ]G%J{f6+AO#Lo II8aԹ)pc%%)X#0FJ+Cw蟖Z L`Z:K(M$46;%{%'SѤ1ofiZi"1֭mJٺ9)l[%O3yJ{ u g)EbKR/qCi]|4Kr& pu y:')Ez4::3%3kDseq' x|?Qd[MPl3eۼ}mfc;MG5(7^er90ύ tIskyf*B>U׭OAf3`Mv%T"~v c@~QS[a6!Ҙ L?-1w #ГE[ W`ġ>([[vT>.]ȅiBvnAgBP<(O&ྲ.dNMp@Z=id ɬoѴuW(ک$B-l[ōdy^(p\BF"[zrE,!Y#UB3v5}j'3\E`.@rDZDˈܼ,o'-\V<~UM3׻LV*j?fVط+\U̱v#˕5:sϫOwTlS۰FM4>(9_k<3@}?z , IfD<ƎPD"qB.Pn[Qc哮}7##"Lob7kjtp\n2/#e 5b+V0Mc̔lS<\";F9OeuS{ صHB#ƈCf AW"G/R+~A|1*1$j J ߟݰ^~Ƒ<:@=V?AӺgS u(Fy2zjbPຜ2OaȦئRtjA\Sވfڔ{b4R{Lr.㮛tfU>x0\{#]f>mL},ò,4:*U[mSg˙ -058l6DK3X/R}+i*KYSD^.%J90%j~3s=sP,Q=)>B uo0Ge! XKendstream endobj 451 0 obj << /Filter /FlateDecode /Length 5812 >> stream x\K7r0aGH178³#$zKkC {ӏQu)83T#Q3$D"QiS_O_O==Z~y~o-<}I"N8mM[{eN'>;;1XFEZH!Ҕ?-uӨLڻUoϤgBUg 鄳lOr}cU$0$V8i{1ЪZf]PM:2yx߅AD)Ȃz! ejgmɛA@E4&'5V_ ;)_[Ὄc^V4]K08=9 TʻӅlx;p-F< CZ(']҈%mWHgl78&iMXSo ںie]o# }[u8Ij7UG2¾ٷ i&GOBjSK>$x 䳁/2o$Imn_JEA*l̂e[sƢ||P)mIFcv lO \_m =c6ҲЭp`N&BVx࠹Eu !se9.דe"ȫ~ JWqe1zg' VI:۬ᐾۤf/8L<"BsƁ=sm/QM3W =(=S$KB϶C4q*NԘN.2tM7lDswX4m?`S7sbpn_"M-+6dߛTz>6f7@G<Oan|Jpc%xq =w x=kpu|cs T,P l=`RQ8x 9ՠ qB}WE1oщ`?:nśpe4%CN]%S%>wi|U[:y\ߏ6mZߏ( wc˨}N$*#\bnd|]E:O 5LIףZk9\+δ28ᖌ0v .n~@  UNUcä>S@/J NG&,vqVD$DPG@4;/atc G~!1T%H^$ #*o$(bFuJ'~W`Pj`D‚CLD٢AObFFn. ¸| 8?sjăixO^pHJz-<lUMnuy&1ٿ0T)r2ZQ \%U9_Rx2ʱp*s4e Zȳ5dj%*xӷ>QU_rPD Y7=a=ZDz\W5Q TE8?c1V@#bp!K3иCCㅡsT-' PE0E &z<#~ߦřc <[Kt&UI2uQěHn&>qʖ>UgufNu$p-[`O76 ) Kғ)xH7`]eUjQ M1m(۽ .jVDM) &$!_'1 ؄%vH^FQ@<1BT7*q|_|bh,n虔ŎU\3WkRoX%1eC wt5Gu2Z![~uΜy7$ת)Ÿj_Ȅ~BGX&biPHl8dyrurUfR 4ʄp%Hr6ցM0hشJߜuq* )b9]FOmu+&tj$SbL9f!ͩ7;V;΁ؖxYB,Hޙ|~ wPG|[Ԃz}3?ŏ\=K/4*6_&CRŠ!1%_Ā\eFm ёU 9}?MѴƚ4jcyV2|Ad ^­4.]]3µ.]QjӑaKo݇qx\5p1w$_Xo]QȆ+0a.4|IxJܙoBCN>M pb0o|Ì*X< +"1ECǡ^L&58IR~WjKáġ'tQbA& EL8LM S`Ό(aϻΊխnl>/E<;).́ꓳD4JRcO M,Lp첳5"Sp#tU!m‡e|2lڢkzFcK@h i<0%'F4c'\3?A&Lﶢ28Y2ӊkOۺc߆`d)i$;kH~ŻaI_>`2gFѢ.!8KC'tuCbFY,bmŨ!f{<{Kdf3!j^M\8WŁ J P4~g#g85u|7b^CH)΄c~pSr1fmBj{-vOGR|tck~4.ۻdcAri=el6xU|{(ŭuC"t2D@"m{k!p} -iuZZ Y7~ZJ"^Бe<)Cl9l/:'P'zQd1NԫK].!~d |W9|NGkrf?JRm-eJHOVj\c\~ڃUDBTC!#hhүh"TmZ(WC~^Ix6-~.v~D loD2`{ PT Shf>;WiV>iTTuZXcqq)B6Q+2TP rE@KrɤqnDkB,h V3j̡hS]+m)"(S]¤"R:&) /ɾ֌}~M,uXg|?<[V Xv~ݸώn;=bG5X5`UCPP!4|)>uɧF@2=})MSq۬dmZ7 b0# ␭nEJb?d_FaOK@C6xY[Um݊bQoZ&xuK0~bBbYA(^/Hw@iʵ̓Դ2HS>/d+`sd+ +)\I }w,LV¢>q 5zcTr,|`/Skn}oRE!5MzS?!?\cJl*k]2)WWh.?M[\(xŪ[u#Pl5z͊Z D^()ݡfKkb-rob@;OP]2CK0mCuhyi$nxH\mNz`]%Bnц6\(u"Pb&a85ZHѥBv6y]/P s62 '=Ƥ0k&F>0$n {ቆI`@sPQX,'5&XkgtGq-f=qq7-{ &Wٺ!gFg򴮵-C7Z7ݡ"xqvSGjj6SM2ס7T<%V}TXr揬Ȼ>:1ӂrbׄiPcU]*c\7}K-@\z`}`ͻ`t!f<'^/8""P^Os3襤Uxkc ;hv\ (7'] t}" ?8bS}D6^Հj[_Vu[d4-@3UfB߁ n/f&3`#2(>zv]]n ]20vbyx]-z*X.ӫT4I :/6S"Bꫥ-W%1E)+lprTpL ^1UI%F8OUӠ>ŽEH п"T s ]!uYXMG endstream endobj 452 0 obj << /Filter /FlateDecode /Length 6537 >> stream x\YFr~ GǾe>QE!Lρe#4gfU)r&iά<< -\, ڜ۳=]6.^<ɫ3E,6z+e_eRY}8/('*[/_OSf5|6FQ%(Ei_H񊤳UVu:?ΖZRず-̮aBfv65Цt6Uh`diT)-kٯDj8f{U^Xgf':@tK D<ߴ@Zaz8ޔ.CjlU}=_sXeK m"]Er"EQRp|_ Tvy ,axL"'l4un:(ņby5۲Qh X`Ӝ Ѭ*+lvK^BFW3};_JȮ¬N70a ?U7c۵Eaَ~Yc6^k6R'TbXK- 3Ql.0RĐD5:۸2kիQ݈)7F]5 J)dwTJfa28 ՛_~D bϧ#B %b,9ǹJqɺݦCE[k )~On6ͶW=9;&ncs"L' /Px$f@KM":Q/a5lد[I`l⼅l<2%?_ۉҋ?n{`-:XU(R|ۇ 6Fq u u{_ ltv< R֥ȵP kt6qAtQPKS~÷Gc;~ +ʭ 9/2月"lnq*?\K \W7}x"+UZq51zzo~H Jũz/PG׬(r[ؕDNr<iai.S EY\jQJPM]S{/a#KB#J|w);Ka_3 ZQ9X.lWd$|n7p4grTR)htnN<.DO; ex*9A *='Gɷ:fxvE %m:CV2R.n)0րPFnᏕ:P.*VcD~VVͫ9X4lP»mꛇӡ5*Wzrۤ;ѷenR,KqktԌ /q;v6ʏhmDF'c=Я!cs¯Lc-Þuh"\|pX)ԅM@6x`1L  xr]&x.D*-AɿqBp/A !*9pދ@IUv3x21Xr: }%l(NIdpj*l4hTocnRGP/ѭ#F~٪CݓkiجS@Ipv @tn\H < HX; cdCG.t.ae`M7!|8裬7$ R4`͢:'}|m(sI(:EQ GZC3J-NņǠMBP;J"jjS9ka!w}ORc>jO1^w;͎B'Ћ"i`B$%o@$eȍ CkXChr |МЈaSkf¯sf/ps/|eL G>Say8.wdzY2hQv‰ùA!1+լ"~|1Тjibdop ۂ_Ws!}I|G&XI` h$T??ߢ꒑$}?:V=E1% ε2w7(쵔uvE YA̓ʵr #\hwg_;upt9;#_w14fy?Z>sH&G>Me*Ph aӨ<`'X.@<^qƋhGvҖ$)GZ C|r|2dHfci⛚  L~ UBATW5Mo6DD)5z7}wIBV٩îmn& R> #ʷUM<˭cX]c[nub.^$h֯TЯ{N"{]wpi̦'"=ࡼImlN0>4XC->ȃXP &.VQ {4́X䉠<ߺ8NGlC4$Éz2dOhқ$ImS}EO2p=Q Cۮ=S/p(i<+)t@x >]Uaشٞ!kߍ9f.{XdzKC )/pXJ]3RzZu$ N=Oxv6 Gg:Č̻Yp-Z0U2~ M>B#? 0NaVmu/bȴ+ cpB@TuL-,:ЇCxZ!BMΝa7bohIh'oaL.K)p<8|F}3Ʋ(7ϟ]l"rx*@">3[/6.BJ[IY9 BDEΓPcb@o1{GO^0PC;Ń>*٢2QTz 校/8$x?O.WF\1嫠@Q O3Q9{9[˱/ǡ]?C#S?KA-r>`%tߗ-@z_}1)B#ᝇ\(q-$ ̦s\4eE+7[^RSm+fe{e9kA cKһ<-q&p (8}y|y,Ył2 C9:Xӟfg[U[%#K^nPJ~; /Ube+~8Cf<ߓppl;oŷbqmu?xDTLDSÙOO % lI7Mla<0g E_B{γiizY/箜,CUoJ|OU4M\.$ޥ"02O(*1ހrCO,HU%@j!"]|G徫KS%F^Cy>%UwoC,Ei@]u],wූ>ţt 4/I(QXQ>/Ąe㎙;M%rrU r%DWc*>|#L~ŝfS?߻$7Ms;J':R-&J*ēnz6U[Mãh]e!N;;[6'[5jEuasf>J}*1. vĭR=Ws?i*'S_h TZ 9ܠk*;L< ~vizӬg+++;ĝƳN(wPcSMpk峍ؾnW{ GM?s\Ik^oXLʇJ'_"VϘZ(' HJ,V$O_a\ƐI.=(UHG@`ߜ`r͙4\d}8)0ٱ FQfP-ZIsa |tCk+%[b@s8b\vuߣYS%6u3|7r8pzwbPD-5ĖA P&XQ9 J<ga3U? KNMw\{@XYbKx٣ 'mӊ%/w@kԂۇf=ĭF[]oVGGDꄫl*#Tʾq2EJ~p&\UYj®b[dYؕ/%^$=*+endstream endobj 453 0 obj << /Filter /FlateDecode /Length 467 >> stream x]n@ D{}k"@&iRdi,(Ns8<<>?=/>nwӼ[~mC/}^cSp;GVǗ~eڴt?c\l^.MS2/.ӽX]1K)EE*Uw:f)%M`S`O%Xmz`6,i,ƾ%XMFR &lԢIF-9R9S9 ]bGUDTT#~9k88s87~9f圕_{;Ђu\S`$BN# BA\0 p! `. `0 ND/s N?S_VvN;Uo-ʯj Qendstream endobj 454 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5085 >> stream xX XWplhBw .5ƸQTP\Th "҈4. 3nMGLBDGOy3&js?-c12ׂKB|CG1=,xtj$&/'_PczMvKOw_`J/\ޛd^Bgmy#~G:]yȅ#?':m ww^y%γ,Z9l'aܦNyf{msuQ~n_cc@MA!#F3v?a33Y bv%ޅ7#U>=!_l`nqMUe8Kr^Lo`HJ&/ ; ۀ*>JPj,6ai`2bVYcjz+"*zRХ+, pQkPgmw,`t0`j`nJ;2yB^Ȅ򥻖-5]C܃; dn^:R1E 9'K-2[|Wj.T8]8=?оhosъJx2fA d\+4Ղ+) F:m1mbiOd4?n'OFh'mj=`'Ӣ:~ ͅa6H 3 FZdELm(Uabθ8x2F<b@}@~̿q[OsN-Ud)>+SCRH&LyrN4p$H2?F7;5D܂"g {NQ@[x#T(|D<# XLAp)[9s{8XClɠڒf|ߓGZ . j^P9y] F|B> J\C)4T;Vi4pUlCWC2W_J *a%DQ"ϐs)"j:a.LF&hTph\p5gX.'lhϼ\xT9;BZRzIaql:S PS?1!)%?c#p!}콭:ISdNLp>r0!1u;p:H٪!, &(8 ,:p쓇KYSGeMأɁo׋(Z@eC}M6ҿC.w^|@yh,amyK\J.LhfO)k><5|8v/n8wP--Ċrn4 _z &NVmJ֙B=B$D2:"T]?Z$)iHwCTX8cQn5̂ a }EgeGJLp":3?="tC*;5FDLǶO;0$iz92AH7qM۽'&=>-1'cSb`tVlzU@}yGɌNO(\J/p)&}t,Tl_3 2?v׭Jb?]%EOX_*]‚T(R'/Ob4޷&(a\]E]=afKƐk k߄eqhvW|/TGĀk {,LF-r蘻7;8ZQPBQSL ͸aCMB`~ŢYB|H쮄 @TI?dO0efiIU"Q"ײɌZ`Ν*ocOx( Oo ߚ¢¢-v / T`UqA0O*$xMN?Wә ,^JP&d! %$Ĥ$S$J!}Jd@G%/}PRФXGݚ Y 0VA=g( j }3g]Ǟb -}:Y=풳Z.PhM6Eup I`4n//{.a3YG]\&oŐs{BYC$Vp<墎@_(aѸ%9inVYju}ѺiT(3,ܣψ+BٓUSf0=Zz< u uD+U.<pnJٹ` Ǣ8(dI}I3ʁ{$l F%5RZ3"'~,oyHd hm;Msc3xDAS_Zzlt"+ԚkϞ(m^+Ulqďv8QXRL#g:@~=v <.W82BNAKKw|I=x!.sa<% F`.HV'jp{MX]%/p<LjM@UYtOx;o}~4+8ǯ)P=*:*ԿxH\u;[^\'͇k!)U[OU4+5?w-g!_Ҏ3%.6GSYP^hJI?:xp_Q@6V8bÇZC]/U@bzxEנRy/M/\b?{2W lՏ_5ݟĚu$|IbkM˜ѩ|NsO' ve+iJ #h6BMlɖ@:͡_K Ƿk,q9> GǑIuhi=$3ej[ŁvT ܁/{B=?G>p-{y^^^Bh`Mj֘$D)jDFم\YX ^5;hx-pSxc ;.)6wʲno[оc&Ck_w./#r?xކ!ת#BjTV0_mpY2T=!.Dp>cuMo 374;f1Y.Ep*b$wJ JHyZ3r%DeqIu7Z%tl΢#z`_h jRt ;v'LgRRA-#Z;ED:rMBb4O1y ?ȸx!5cP@oSvvۃ. ؍"Jb.V'ߓQ5"hqpҸUBѫ#p I^Űk4&S ԿT Y.MXi] .oT&K?cIDdc+-lrczxv }BG$) y-$91GőRIWgA#Nnd*G!sP)UsCo Xފ][N)sS<biu' W=OS)Xۮ!T$rL9YB޾& @z/w.P;3{K4|+S?dSqߌGw#ao͛#1FCx@>C_𩪡0iY.3܆l{:LU:0ZP<%J" @5r 9rjoi_fgܣ sͻ \hXi}{33tl.BWΡ̽ɂNv6~~[N^O=_sF5q9uYŢX^txxj6R_/ZdqFVJ5sv[դI3z~s ,> stream x]=n0 FwB7d+R\%Cd.> stream x pg[4`"6ILL,+l-lRKjI˶,l|c0  !;v2I& ";nӮͶ RuWKW~IeOD"܍6%2e˖lSזɫ3=~L?% /nD~lU :fτ,;0sп?fl,W$]^g9<`)J_,.Q=l%Kyy歗Q+ /ݴ4osVXT=YYwXQ"/+ʫ,ۡؓsk۶-۟Z BEZP+j%[\˪J䇅pa6 E#/\9_9=ۉcgzl%`3D |"ZQ#EAL*IJ(Wtn?gUg?}+8 NW>3yZδ=?Czg~K z&D=j ZJufЙXHOlr(0CQ&~dR趭V ^T*%O~JEWY9zpkR^ >&OK~}h/%g~/R\ZփvkE]$lCġ'v(Xbm'ÐX@O.Ǚ$fS@UzJD!5xj'd I{}HEeב Z;;ݦ}TJE~p bpLoQ{7|r:e=Ar&DJK((%PB0!D^D4P ɭR^n%<^5ybUxN:X nKKa+t^އVq#Hnf,74h,Dh-D]gޖ { OU=hѧ N49zْSO ecz7ī&IO vՠKhSNUQ8p[o%cRh78ޗ~cKȟ#=ekV ʶ47BRF\0=]SYhp"GoRWUtqC[;M I7n#XN+v/! %ضKe!m<^wȢj( Wr Zfjz7.4QŵBu\Kut ;!@{maSm WN_y x%WSf\29U۶Fгƣ-Uڳ1U{&F }CE#"t_d,崫{k 4S&,;^$K;} >O-$e-^uZVo[P* J$͸]V6#Ƴ>ۮLtF6 vm Qu`eq&0[ hSKQ$[fc TeW㫛Yi Z/a~&OzF3\_(a2&P= P#_@`"V.+X=6O=et[lf)`Wφ퇭򽇔:4n-hUTtϨ.'m:nuʍpS>[ t7c#mG{=,&_5TT* AFRmueUR0A|^ b%BԺ…`"wfݚٜ(Yi#0 :;#V]RC8/ɻ~Zzeow2Bۜh껑~#MIn'|?_g}h^!n;|Q6lgS$i="1׼XSVग़/Fw ef,:OK4'x_/4$C3f8[vDSı#\ k_T)ܲSU9뷡ued{դ%E-t+B!/-|8%Cܯ.A+L d`t6ĻaY7 QeMJ bgLIyi "VL@Lf &BΈ3-,Hcv0dNJ<. :hߧea[Q֊f ?WW]zU~/.k+[&3Vɯ=.Y_/~(Ž'ܾW"3dVU6RqbRsʫ >N3GePcjo4D>ycsU|sa@NR%?:[OJ˛CguĴ6DQ{n5x99" >oRF:Gun`Ue6nujCjA*gۉ%}>A^oB%H Jx4 m uG(O h Dm<҆4Vqd㽓Uow:*3- q1"-<aB De3脑s  ηcBLCV[Т lIcjZC@Ϡh.$)A: U`QΨV<Τ9SZ27 PmA~^tEhBC peT]=symgeqp^Y˥D"1TΉiӉi;v̝ 9Y_Sm 1˝akendstream endobj 457 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 762 >> stream xm[HSqǭ8ٲF#=;]2P^^$R˙d&贩tsvwvlrmmZaebRCcD/=#jE/A0Ak:Nk2ib]+~oJ][r f6*\W6/+l{oٞ<𸋫mz+#̴oA ӕ:CC*-=\\գd<^B ŭ]>u\&%U%d/kJE!6򼤞Ւ'Eg!đvv!$DgPv|X. Sęo9*7?<8QћZ{Fd'4ZZZ7=$$?$?Op?z8C]ԈEWVi`(N(NCQF/~aH(g?Aү0w`)re?ƣ8*wA_j_orGKgRk.4DDHueݻrmh֦uA7?vpXuDfk!pOђM?`dr{A4HzySlcS6rm:ON/NSN P&~[endstream endobj 458 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 575 >> stream xcd`ab`ddM,M)64 JM/I,f!Cg۬ Ni[yyX#_{fVFږIE% Fƺ@R!RIO+19;8;S!1/EKWO/(áhř@̢Ҝ̂Jf .e]`3003*~_͌?ί讕|^L[f/U'y;a|]3#mӚ;T};>?/CWQxRi}'|總@-W=OmWP-L[o߾5~k]w5k-\wߩS^'B9~J%O]$}{Օ9~*m ԓ*={ĭzD#b3,II5NmfoOG^endstream endobj 459 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2052 >> stream xeU PgaVklBgq<AA(( 0rccA`d0 Y+}DUFc$/J[-Ylz_!BRbR=]Cc2&cLmR˹5XJr~lP %"Q"/mOnFbb|])ߙ+uٕLJǤ׻?NS DKZ|g"&9^/E&$T1tAo .r\lW"b1JL';žE&G‰F "O@[,IIJ--GFI'R,D?Y:@0$HI~| Lr@bZ9%`5GͫHL:pӞ4V柕XNۑLmo$OW;L"6a]r h-ih87]LW%HHdFJ!!GtQk3]BqyB8EW'.2C&3Yz 'U!Z"?QÝ}P J&<R&K0 _bC=zM[3 jTY]od1hy R*v:F~?$~'*?(E(#hfdl,B+Kp`xm"UQWi ]a$響Qe>4}bSP# ܍hW*!GHR-,H& y(o;}vE{{|n}> psP'/dyUoJfS%2jj2ˡJzhcNI,s>JvE4ճ#530:$BOe YZ4"tatͫ#:Ni?:{lkD9fa(F3VĶ2Xln?m֨i *Uҭʵ@,]UYU (gY2Z՜Ѷ@ՐP@pfLfl|zBȹ+ۻj6}WW~wU1Ƭ]hL+5U胛͝}1|y/iK e#A?b /˚ brJSr]4Nܛ)3OH,BzpR'!W_H #ytJ~)ȯ=: !$ +  *#62[ErX3/`˫&4u\=k-ePVߚLlm5 fjյjumZ]ҟ4j q5uV3 _p%endstream endobj 460 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1773 >> stream xmPSW_{EDkDn ,ຢeVH H!&(&_,?TalQPY\Uf?;{̝g=¬0}@}w|B,&e.I/aKelHm,/Ol`kݷ!/BړؠHV$&H"oOU}(V- mKRRE1=D;*K2QR!KbdbB, o,ڼН+paRg-bHc9b [9c -bؗcV4di5^~b}-)2}`Y4 M(HCV㲦$ _7ȁ? 7[Q9ĸ ȸyn:B2u\棧nvuLhjG &މ($` r44%Di?W NRSyhK"UˑⷊDߗQFI N :bm >;qixذ/bE鏥YOP6FV{Cz=u! =[@=wNF#y˨ K4+6eK!6jazaVrS=dVz?l8fu i`Gwӕ^>7? KuBC?x 4nDm?.K8PN;# BRڻ4T훵w'~ PGYBn3-J6bWa u;/׿x굀LLJ㿩o馝Dn6%tZ>q $nFyLvLxF<8Dd ̽o޵BHc_a!}|mIH^oV$%+2^nG\"ּbl~OPx[NagC18Ǿ:Rj4P!ЪL7MyDq!Ujh>dFEBA$q^":@, ¶v gZe*uR>{"rx3ڡ}0}9>[yΰh>bEF()xPtpr5n]$*x"h5g4^#c"ޖ@oYׅDŽ{>!Zp0R0|mTvނVyƃ*1DA`~ZnLs='N 7I4q %#K `+0f[*/BW)Ճr9l.@%PB; d3:gNkV|ؑUY o9?cwqYbH?~eC+B,7?|S89w 眯tzF=~ݗk)oxrl`hYӛ_,43iΙe¤"o|܅Y{Fg ]R*1D̮=d|yBo:ܨHU˵b2䈖"))>7HyUKEL:tԷמo7!P*"O"ҫ 8U\5ٌ/lCc*,/(5 o.İgf endstream endobj 461 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1116 >> stream xUmLSgǟKӫV`6h* C]∋#1SnSH B uVKSn[9eP8_r\KXa2 [>{Թ {I$> stream xVyp_YHĎ(]%)W[I$C 98l1I$˶e˶$,c}P.cp4LҤ-d526hvFҾ~,lFbܜ")*^j醲Bq͕s,j^5-ܝƙMX;?A 1l11/r?s'06](]'dbLjŊK2   $ 8GqY2AD K{sEYyI -7C-T)[mM]aSҲry*;W".,ö`[d,[a۰tl;co`oaX"a9l͚J`fgD,㊸S3W͜Cѿ70[8uR>9]b[Oы<LVsЊ+SҹmC~6'ʎpw>a.̈́2((jp]DnۡVi*7U `׺kaoIG֓詿vT$CT$2CEC;'K4P2N7άc8yz1CЋh!{JiNA2YK#m(475X)$Z:=!6eBye^|cMz#=.l*ۡ+rրRԵGpNK */&56^~(fehIGxJ O4aw(}S 楧'Yd5%r*7K[=О$ 6;hFsoBkuIY2\ެk h%b) NLXwߢJymu`Zߧ Fn74 ܽAȠKj}v'ep^TnN!`w)^VI* Rh9j8 oMrm%_2xz\NࣙoۊޔU2R 0oG>94hq6Z΃o 0^1R&\[#т=yAGCTG4cKKWQQ5qG5SZiџ(b$bUΰ{)6VH,Ȉ ;+h pI+Af}R-gC=y2*8@֩/OuحEtpM ~kPI/7 >(sN~SFUuaP[|EV(s=wS;\!| 1Ƞ>n$w34%d`UM+ϖ?$|9=0Hc48X*aNќlJI)UJLk72:/ɤꣃgoJ66 A/0zy}/rMy:>cU V娸A |EϤ Wn44>O5N(JIfg~>ے1?gFM^yy^eS\:ljuD 3g:tG&Kf-WB).k5z}Y}I?X['n~. !Yb!d56huuPp^ir~OᅪE.~ 9_z%bf[{(1H- 20l`m(*jՄe݌qzpw'G\.0b[ o6)d}IE2MWÅcO?p[X-&0yA S`&y'NCQ>ׄ u=$z"r_{t,A;؆ BIϿw1C *t qƽ֒We^x5TOe w]>5EU"ŏ`1|R2pum][7pp h49_\hHT`4Jil9)\c8!rEni( kw 2[M7V2U~N_ЍQ&i9G;ٮ0"F⇽RY"`OQ4ynS  J0L@)c>x|΋J-Y㳉Y3$13;{{l11]&%l6g 'mWFendstream endobj 463 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 422 >> stream xcd`ab`dd M̳uIf!C'O/VY~'ٴۭ%|=<<<, }=D1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P0000f300012,?S[C/~\;w9Zs &=~o>Μ:ynݑ1)U!}쳺:[V.W3k8I)6{[ս{k}==;o6ɖfp}swMm}{u_Ӿϝ:m?w>n9.|nM}=}'89 &Θ?_C endstream endobj 464 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1529 >> stream x]{PTu%iY<ԻPQHa,cu9`D"n.7|%(f5a%R`4ru~uh~3gw8|(?1%I2yqI"Ir?YOS%P_{/J&hf U>$wnL% bӔD$z]oZj/)oݮ3'62R%(*\ӎl6O^&JִCHfg[,)K!3U<)Y.e}Q8v(jܜ1/7/~>E) JIMjLPT5^R}!HJ$vKwc0P Nb! ΄@hA chd`5ܕlzaBbKp gG|'JQ뒦׬ ηCΛ6KCWş`dWzur'\>&ρ=lNv>=bwgnsw ߁,r@d '~;:_Ө ҫ$0s"S?>p=g_=,{3aj$ʚ01u^F99: yP#Q@2WN~"HI-oqm8VԜogO >+W)l0ti7AIL}b?pI’И[2ACj?9.mSt^b~GX aYZuoԖ[idzb~(u4\l:MhbD2HDh}z N4.9{0ׇ7 \8^) _n׍J۵igFo)M? >8sMWlwTʂa"(B wXa*#!JŚ:u75lۣ@8Vg7n oQጅ=獐:vuxbӲѽgb54ɯd"aMB4.mNtU'{,y[_WK۸CJ)P+@ dՄb M 2nxvS?XmZj#^gLendstream endobj 465 0 obj << /Filter /FlateDecode /Length 497 >> stream x]n0D )y/%E%*!8}gv= \]˺\2v^i}-k׻:[sx1l"7:ï'muCe0}XjsJ4uڝ;U5)uhkoEJ +S`)ؑv*Dz]9\*%]`;ھH=m.l5@S1؎E=Z`Ift*/,`3 p&@#hH)&(( Y|FYy3(l"gV̼Vu:9999X]N^4[ݑӕיׁwҽ_o͙pUY G]vjAlNZ9kSVZBA\0 p! `. `0 BA;hÙt>?WFߔoZ 1endstream endobj 466 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5745 >> stream xX XڞɌJAA턺PwQ޺QZTP\Q@6 *-LE@`h[Zmu[OsoI|C#bBC}÷:/b[ 1~@@gΫ=tv_rI_)0|Q:ףG/]F|oI[Vzxzքy>a$)SQj%5P*ʅ^Xj 5Ru5ZONmSK(WʝB-ޠQPө j&5@Q/P5Xʁ/ H UQ;IAj-)_:R.02o@ 8?p]63lJl#E>h`/}-U!!ۡ1ƱSQvd9g@6 X!^t3NR~g2$6;;vQm cT>C#*7I*(;\Sx|^z.0OTa 8 V9Vtr2!d]EWYfB4y4E5IeIJ{ ch\^Mjxӡ= GTwRj8Dh觿~Nñ?[}P,&&أ_#9LTL|F=W~;KU^쮥y\2JFanU#tDOA9}adgRـ[9%GoӬb}KIV$E͖q&IMzSjID!r8)ì3?1!ގiS/_SvIR68k:|V8 CC|sׂ/]A%v9:nJM5iJ<bt8χ|\h?-XAAYfexz: Xh3B,=/iU"oGw sfSqBDArE(Zvȋ(%2t\&mf~$ڃve'zxFR)k(EiY<ͭ]*:Nx c + M0NsdvtdX ُX./HqW (q醨m"Q9ܧ håVIk9]>sS0j;,#IBm9Zh ~B4A1#{/'Tz1ei{`Lέm zr7#❋#dQfBB$K^#ۊL40mUEdٕsSJ3MOS&> G( ؙ1"PuT}#W vgW}zX[w5]xI}iq@,z_ҦD%e$tV1LdMR]M]y3GZuUk:eF#F-=|);l@H|^BP5HlxAI*P㦠*D0yV`8R̄j%-QqEpBc\]Ȟ…eQJ, gE4&**^iiU DmֆE)[TgO&g!)9ź4`R s X[UM0v>B&j+ ?]0*"{=$?^ Z Vrh< 5gi[I}{c>{ B1NtS@inӗ\ >:

(5sl46zvfi[p73zȏw\Ʊ| 3],ċNYӸa7nnyó\>)! Qd"/)ԟ%GRAv4T;9C~Ukzr`,1։Q:r*#?bbn*m<6$an~3++M]UqlPƈ]_{}Kgj3vɳ+DR{۸o Gty0n;/l~78йrؘ&?.O| >-rELxT9V@LoNJ/=vIWlLgd+Xai`Q d~6Yv`>WAL; uNzN P~<(6SC Ԉ<"g@}H4VD{ĭ w_NѐT\Ζ ۓ#}O|pz2%=i0;Mf J-@Q I\Ϩ9%7:ΕF)sR Nhh%\Xo2cI?܍^(zfIbVJciZ<MjE%=,O$UƳͲ'mȚG?d#?ܕ`$]%ԚeZVD<_qd-mf=~Bk |NFV`<\TIc(؏pϰ->Mö}T{!'>(v8`L$|aXz4'{Y)Hrt\c[ӝvC͏ߓfH<;edl=Sr#+B:+p!bWn⚻|Dڵ}'G[d4$d<oƛ+oMEr\\\[Tx4h䫘j|rhY_yQ.[=^{/+1:OUx !w>ı#X[L`MbT4UXY=$E'DLlQ50asK#'+G;s8< f24|דaFFht&Z4ZGveɱ,J}A\d2_`&9bR&jΰk7}*4䵃s{߆ DZC!S^H0T_^TjJv.O/_?ђZ<FΨ *k8 SX7cQ߲^Y^Xn*3c.8]QMwN7G3G}<]s폛>X}{r[oߛK^TM Q5ք rWZ4{TM8[N5@a1a1aa11aa1$[.Ǚ#ʌ6۟%4#HY 9'yiejs.;G.}'ƻq\'+&t5] ;Ӳt,.u{q6jw'Æq_ؐQ\A#L, a`o0ٙJ]9%ƫ4 ='.~?p<@E]]ĂF®A <˒f:¼YhXH(,X!$Ȕ$V1WWxe.IzCw?j( @݊l#$FAzR@ I2p2Bx>k,7MI^lKqbߓ$(GyrkFRd73k3y/R獬ph"%CFN#괤a-=lݾ5'[iLX}R]cM/c??t OkDNh5$hIڶ'FFFVE5Wfl|7.GFSUX54@ kZqARh qGTro%EkOɛbLDˍ3uٛ$n2gr׿ +a~K4s`>,ӏ;jnj/p]Gt$X0=:ne:}I:px&xkw#&\"GpMjyA'6 m飹oN&h&p&>6 %6$W(}7<}"H?EI-~h1 L%DF? O=kij+ZkUt9\KOd%oj1:YXY_HpWۄڝ`THn td950lXL" ZG7gA|Ң< i/4ʍ2}a T1i4+tOC^AQs<<)H+"|[I'-*ÞP[XT*UFG~>I9yzLJh0ΐC>o.r4`+Z f^BCg 5yYiSܰd5a|(Nc?V6 nJk[P۱x.O`Fbs?^6nېyaSH+}|> stream xYMs|m)|),;vI]kWbRIIA;C~yy=3 1fS>zzf^^B$MO>pÓke&Γ'n e4Υ 'LhOC+,D 㓏.JD˔'9wDvsK^D}T$,"aVp1Cჱ;Ҍ;qeu3B'(P:0@H$Nw քę_j\`eT&f՜1,gU^i9r5jx dzS[Αɲ,7jwX܁R] c& ηLF,L@􅒱ѢhK )eC~'5 !AeWd_"78>tim)ESs/=v)w*9+겸O0_.rQNR6njY,lw Md8\l#s v8ⰷKtVݼnΔ~g:g,S{gl[H6su0P: g .)|D(Wu H}6 mܫӋFJ+JDAbM{Ɣ] X<Ggt5vI[_{=jۈ)JǁSeU mCd"W(bwaU.\sR3@q&M*TeVq/,#W2 1-]\FnVvl*<%#>%~^6wܯg.q&w:)๯kônzL(vq1<$%q $702ib5qYӬ^}5^#Q5,"7gN(GzRԣZ1>W?R#&̢eߓ;of8iG,GQ4PA&E(C$Cv1vilʫaJ=@q%EjJَ_&0+mw[HsVQJ"F!8y([Qch Ro-tP:^]~+Hhf01d%@َ]CbbdE5|*I1<p0I 68A(JFRΩ٦8-ٸ8/AĂhO֣8|AJ(j@N΁ Ї |p9+Puf,Ln;#\I\Kr USrkmHJT leP۝MPOn=l0y+#͍q@;zr'cm`{[,u5".Z"iuلd}eVTr9|^~stz[ R=:_uwr'pGH68o3}jR@Uzi*z[%WhV.'Gș* A4W(KK X)7&;O]/&?hZ-~R IҮ1Yc!BEnҦ{eE!2TmtA4ZՄ(ۙ@^ңzu& Q3s[pӸ2S_FH$6޾58j*GHo!9$v $4&U&3]6X}ږ.-p~*{ {3:h%^Fdn:Jܒ֪[[Eir|Rw{^x5/ BBLLۆV :(s_S>qm&tOSډ10Ċ iώw&O?nbTub-hjendstream endobj 468 0 obj << /Filter /FlateDecode /Length 4690 >> stream x[ݏ$7nOC㞪J[uA/CCtL1z#%Jg`J(T~YtXto/7".~?zھ&N /θWfqi' cXZ# ]oqٵQNt}3ONQ|ԭB)^mשa)m7/KOf\!M/i;+d˾-;ow4ץ{mk; |\`L'j֌T=&4|ZĔw͏>`isXA: N6x&1닃Api&p@ľ͖qE<D>ܱܱI?eC] 55Ti{L/n8'ŵV v:X yOCjF:ʁ+k =Ox6eIZ(J&i0/AoED[s0O:Q S62(ADs[|q,66@zLbz8Ǘ2~Or3 9iIVts-)lJkALjZV E}W@bZJg`לB=t.M~וx"Ly?ܭ#Uh6Cv5rHݜ4TStf`*p޿tang{|2OP׼)'xގwJG{VgHgtdzaFY |`IJ x_gy{ 2L¾za3@Z;@-Vf GZr=$kveya7 a!Dڀn^!hL/݁v+pl`4ďBY 70_J:U E #A·K=cmuMPb`'zT(c>М*8Wn`]Y,=/Ljj)_0^mãg}%1/ Va(os$O̜F'0ܟ3%7cR4q _tzؓχ !o*% .U&.'m=jh#CQtjUp^=yd]@E>c5JOG_IXL.0w0kY0b<8EZfAqȬ~KTP㑆 _U4!ػp',FXP2aeϕq\ak^= yKF!Η؉6v|=1*HoҁDIJ³42T9\˸C"$';9/5!_e8 doq&&ɵR+F x$ l'z`L3QtWژ9`+1 f>\ڟ3;%lN0mㄐ18e[p6Oy3JJg7,NA#j |Yu9eR!g/Z7qn[+YЎVMZㅛo^.+Lu ^>4w D]PK +`}Z/!< @ij!-@Yp#Y^|ϰ,@ $zv.=Rl kwM1@#X 0hFU}W8x-dQfiLz뉼g-?kA<¢]a8B.?PZJcP϶oX= ȣsWtȹU KOGEB vph~q6> .<2[QG!`K1NOL5hQ fS$$=D^ h C;K׌͂Q:$z v'r)ҮAŏJ"}HzuksW`f:su2Ґ sa襍z+V:ik[;E>l:w|!iSɊ ug $%$XI Vt_}k_q}`9YB#;/1uZS{?c2 Eԁ:u IA^ q<"53 x u 2Vrd4 [D ۆDiEfXD= 'C mZqx4ioSyTpD zLX{ do\EAudx<# fLx5oFT(ԜumkjWBxׅʻVBukt0!2_`:K# 8!b w-z`e35߄uz>uFC8\c6`#a "ց1xN[=ú2"`]j&֕ʂױ/:zQO}HϚf8'scQ8#JȕROԓ0+,*9d\d:ci*qi7]G8Nߋq-d9=PhًkR%FˍxkgE yYEtA|J+ ҵ$W".D]|br 8Nj=Ȋ$iUt ׸",h"$\a W+̸&D\ W򂌟WqE95ÕSp%d{s%Sօ [-Jw6i۠fB= ['c ,miPqV%iަ}2=2kV/GwWL0@ѡ8ejs)&TL_Pd~!YOe~QG rHp׮ݦ>+XץѭW<.OyNxyuVGz™'x_hgHSjwd@-r<=+c&f+3?_-w*9=[Ne RWow`\y,u ƕ>B q a0 `\9Ќ0U0 ˈ㩙`O0^G/ 2~B1xwX⚍z>}?DɻHޛPk5 5]*VJ^B`^/1}u o_\2-j,#Z:]Wr~Uh}־i33hw^>cbqb` _j>-C"?/!-Fwgo#01(,cS} O,t)y#K~PU?C'g@K8a9K:S7mŇo6;tp :[@#] 㭞UǛяa!;* o^H ٨\ 4/-w/@X-I}B=p::\jxo< }[ =mSm6+0 ҍH7~p0Jh>0Rgg0d,bGUeTi#/>mD4kV%. + (fendstream endobj 469 0 obj << /Filter /FlateDecode /Length 5312 >> stream x[[o8v~/E.E",f/AdFlk.*z s!%Ų ?$Q!y\㢩Ţ?~{,~two\:wj8yWBVF/j7۫O\K[7ʩ?]7uӪN4,DV8zwMEwQ/^%2 o~Zj-Kk+kgQƏ'[xG9Iɭ>U9K%,q4<1ƋnpIa4P ;Dg`D4]7WWmy E#a&ADǹ[T!Ӭ'q1827l1dIh. v V+Bgfm5Щ4r9S $ে,jaC"&q{|( M]~5(R7tALfJ!'R=] ) XGCn>U:`Dwm+i#HvͧJւ"{h߈V, >  V ؉Ι]ZA˯ u\eG0=qLhelXYTf纝SϒI>³J Bm&lͻöH3ByNx1 TIG&Qm&&elH?=Gl! oy{>TW├Rvu43ABO6;)#j#:W_"*|~3LW $U'ӭ! 7{^ LFywp2L"3 iaGe[USN6( @ClDjdjFr(δt;hPwz鬮6eJI )BӖ)'81р]J)on%J^us#2|V#;'e,:xxηd݇1nYc?)a[~y,/*Ģm3*<V‹0 M<AU>aZ$*qxWim>p[=y -Ҿq.^*4ǹa\ {*Ft, *pߍfnImկg]^ᰵ8͢+ ʫ4:EkU3J4jx 6iV6xᚬr#[EA;^wGl_>#@%yPsѣҜpOƵ{-]{iD&A'U/Dmaa]g`|Г.4r Y)pz700PmJd 6Or!i{Si Oٰ{i6gv/9ğY-YgҊ,Jv4=|Cu=7ْ}P+vٹ:j9]N8v{:_ Oz8nTL@xP;اv<`NN R&5@Cee} Uڈ̫Oj$(`$id^91ͳ3g6G 8%GxUJsC]y9O:(iAINB(QֿOtL=g' 8%3-RV%Zufg¹k7A!TO"c=$9-ew]YvG}RA75;-왾h P cF&!ٴGؤfmT'o 1~NGxG>-jlaaKQ*]aj>) h|) `iV]/i,Vm4>jJ/+j> ّnVU8:rVFs xxHvn)*(l fŠ/) uq]  ^ !^sQ)MJw?|Cl(|DCUdϪ-M$qkAUYl=؀=M.3p}2񀦣T:@#j=Wec21DoΈ50R2' MO>.2qHݵ1mB]]z70C)gekТց} @adP!HI&'\krU35I2 ٩k+n/?X g 2+,p%(}_DsL.,reLoPy:!4 =PE,jPŘ omS:EX-QԦ^;ϒҒByK|Y sSfqc"V6U'ztі=6"^ M}4R*".r~)Ľ8 ǻ+wɼJQ3IWQRSؚܿI!}aTS^ۥAdp.&=(B"lBGw&m)eAm|p7UD>GG4&>c˳?oqU)گD/>^ɇ $(Gƿ"K8!S5<9\ -.<6Ri>6oVŚ98_Qk_cAgtyW˱^WC#X2%G׻}]~ȣL 1rz*:P>MOq"ss"KsPcyM0a~oдLfCr294ŷWo|;#mˌS.v ϾӁk#c @K.pRvB+YQO).Gӣ.di+ލS0Uc:tFH'$))hT}s)hN|ľSR\\7طO,h_L]^Q, 5F"^eXݩ漞ygGpgƆkTol>s\@CL/lLN&htr3qoriQxgN_@XBzLx)̀ؗS>c)|aa׷QbjLw՗sф/sgLQR*,*|}#t_cZ!eEpYw:bP qPj;C]f ygel%𒟛 g0`QPuIH۞5o|Lie4DQrxNCxt?ȒSY߯獫@Э/WWV4 !H^ɶgtgs]-ftrpBtz n뉡_ZMgKߞ8o`c L*Nݟ|:ВDԛh ÏmI a΄zN&mu7w>0c5> stream x\[Fv~-n_BmF Nz`7ٍȃ;08-Č$)̴s(JG$r\smv7Aoo?ۯn7yUT񆻈R:JۻMV.Bp߭n/0ʉaw]TE\H!BY,yQz!m^E,d ʚ,EiR)WMڭok*]Dž+m!&sXBBh!;i*]]z{nRh[eo䥵%~~hF.qK";Ĥz+/) P째Mҡ|dw 5UN&nhMjQɄk ֳ}50?o lwOU&UYq^|(?S?A"e\$(~طXI8 >a@#&eFTY{/``fd خ7{fV,xD@fIDV֢iF]fQ76YpCЭO3DTat`;XoeI 2A .{~/OTܑmL64`~+zmw=W^"痮ʺ!vH>TzJ>r2$QGWi+Zp|Uu)!b0uqdz\6u_g=[\IU&vr $)PuqNNn 86D #ۓu1A0RBUp$7ѧ> *e2 bWXQ:Bwj~#vFV vjO^`Xi JWs&dNBr'^FK("`aD}jT[r@4ѯUA-B9th؇ւN|@+uSr()Jevj&׬ @RmdHĹ&Ag)F[3{H=%2+4F5QL{VOT$QLRB8[Q{78lީ* Hp{ϙ `0` Ydx^5av0KGtrI|Oo605nxqC;̚O^;}@fQ' ]<g'-H`tqm} UP+rN y2p }3Fu4G GZX @OeaaE°` xmĺܔȎWY+o=Sσp'^}@ d~0D[RBč~@) a9 I#:E8jk .6PMAl|ۉcA>hz5)h θH'({áo+sgrf}q/L ^%:g TL\ pd6i~`W@$W UQVU^Z?czh?Ɯ@61uN2CYA|Th;\(b(ƚuܫBnZ2{$ S YJeL/8 F @~jvӬ~`M].@0I Wl&˪䶲cD1qatB]鸛~m.tY\ip1#_bbjPZ׉:?6JN>gYSɳFJN|&o)$+PݺdaJnLr6Lф;b'C\kϭf:dUI%Iܘw~#H_., i8o(xR!%" 7&Fy?yiM.% )`ggRB ^X.Sm_8PCsHq!qell,?3AB(ۿ@,UmOUC@x̅9ן5!+qBUk2ɔ(P?*͢RL gt>3U>3e^z$@F@$B]|bVmA'^!yoUƒl05M67d͘L^KPǺ%dL8QULcQ:,Shc>tR5tن '1Q1PqUM`xN0V0G WHAf6d1DM#sl44u娙TpY@AVGbAп*+>=]*s'vr,vK&_cWMwgo ֟gX.MSM/*=1&DETS.Ͼw'&Gc _Y|e DX©x (5}wڊ)RI|JfGNC0 `3 D&Uwq`TEG"Px 4"ji"N31<FÆvq}HuNbbc{\T\ɈMŵGߡEe/75 >ei#vD]GS#t%;CڪCC84eGBFx%O`UQǒ9򑣓BGHyɳĉC-0D 4q5="S#D4o7euIZg!"϶ v;@7!/10@aa2 $ST\O\M|WnRm>?6;X3KKq+۪[Z*5_P ;(X]&*6<ѵ4G_f Jd4wvO܍T%w32TIwg_x'W| nXi-c_v*Oq3ůܭ)ED<L"4qH[sw~{CFDQdw2 gZM19 Sms!WJW\m&ۺ8qBq!CaRDrI;#Z\W] BL0 "s.J/-ׂ !hFC{ƆJ]yU\%W$\\$ qݦG|b\B'TUqNxr) AIk1W>u䩙: ǻ* TpQcC3I/4yQkSV˷ hxũ/: M<_[ H 3 3z c &0KCkN\IUpqxs? JQtD!ƷΨ4#_Tϟ1Fu8c;Vt!\V`}S*б/bi;SӔA]Тp'M%(BP$W {gD4=AT?@4b IB;gul-NtYQF~=Á·p cx5)zn=5hvm -?sRK{+ų}rIt" 󺉢K؅zρVj:^`}zH0W|"Ed4o%ʂ]_J)YX,31v,)B~9V*Sn[v9ŘQUR!d!v{ӹBW$:fڴRq<=Lb40Yhu̥t< *$ap: n@!=І@@%HA+.WE"L;YU=v52 hBB<6Qe+Læ18e{tƛs4Pд;INCjf!#D=eT6t7a4E\GA7B(]Rv/V8cTNJ~q?}¢NUfQpиN}Q:(%rho?M JNTUy(|w>d-hl4s >mWM1\*TyY!A tAY!|냿9@+U㐦MBLmkf'XpZe%l6!EnC$ *1tJV 0̈=Uu}SU2&z]n@g1Z3|];(,~h׼0+|O#T] vf7w<2s[)so frLԫ"]4vdO2B+2ssdU d\D(0%gASp؄$`b ۄP 6C}Q+0pᔱ!fN:ŷn1KyЇ::3%c0pe:X?xb>r&)r o`&|v0 L7}SDN}k\9 mEF+(@(3.$Uvv/>4j7lϿYU{:B[{Iq(6.txta60l';#:JŜU^[sH48!;pj^1J.!Qyf ϦIo`mBs~W#5_Uqd=x}R??JpNjgOxh2 돁/o{/t!)9 FO|{^~mWR rTLC^%= _9O1d'%6?.h*% U'.1+hOC"˘ VbD+~kVއBO>!>?TXG*.XOQU:. TbOm[_('7׽i`*ƸxMhs;%Hߊ?}2Qef`Էrc|%@)]3Lcy$TxCHٲĵrv̶Vun>AJӦ%_H<0}ނOF?J9xTтLɖ q|&9Tv iu2Wǿ%<  Zn<#KMW̭5ݕJa0m͒|NS h}L{QI9Jz5=՘G.Y}ȁE9Nf|O ?H9~~쇐^J@)0^uqC7/c PL*u6i5z`#CMF+;aAO8endstream endobj 471 0 obj << /Filter /FlateDecode /Length 6484 >> stream x\KsqmMè7mM~PhpfQU@UC̈́cC4UEUEwjp+A.wwWߍ_ʦjԦTV/qezwc_KYjT>/2ʉ)4;]_Rqa?iYP"liWEJkX)S?w~-s (\q\\+>Q{ jQ$.i;i[°Zv7a6+aj q)m H*%+Nh/:4@fM?nM!n:;U)jrcQ.WNi)[\?NbJLǁN@_{2T-eM(Giydz!i_AC»dUV~#aׄs%p@@1m+48(5Qáxj*>3@T*v'ac0ӾR:X jp.!HoGp!yꏾMEaloN|DVᰠ&Zyn I-~{:QJ۩O6ӷ*Pm#.<]}ֆLu+[]_ $''=fI;?uw7YJQ{B{12Ln'FA8(*bM)Ο}xV@{CB}Sr8XH5Ʒ6"=Z`e4lC)ф,&aS5@t&-Pir; ;Tq%ovnO(n8|6h&rɛ^ `EdN9p\dOq"?]13<Z]:Qu@LWXT~ce*UNɅdIkej .Tbw7~GLaS,-{2XJ}\рX?5tmKn@[*CG -٨v̷ҧ\[7ʄq㺕ӥ4M2" \'CrJu%K?TT_@)'8o%J+zK]˪[.CU&@4ҤzdG6'za] 6=H*0} 7OdueMEl}{7iif)41=Sn3=;+8Uy"|3 MB n#(y o3(a.B®Ǵ y43zk<oBΒ $nM]l']۩plHf{4qnÓ(O4i['-j8'NB6h 0?N\9@磡;U-#@ݫI˃R8&T*.y~O?s0gHڧ /ݞWU. +}!cHHp'H964vޝ"Ԅ4>(A`Pp@6j Y i }@)ݶM ixE{He'wjd;)7璞luI@]F×+#)0ظ/pdJY녱Mx%[J|&+ >I]Z0Btd`CC~9W=_A5,h-M\萩gNb1 =3{:n%ɁBpe1 ?aȞfדTNR48o&yS,B8Hȳ1Bs8=2"տi_(37Oy7h\4\o# h†q$źKiJHq7hyLc+ڢΤٮ 3 ۳cp:@Չ?)9":35],i(sm( cC+tda]e?X3ҝ,e5ۢPAȶ4uNk6CfY:̶keUͷ !! ѯ/ LSZ' _Ey#%xG,i`IwLGrx$a~MG:^?q 7*`~ 1 yF4% { SGAɄzJkz,Il*YǓLA܆^U,BS;NC۵'DWV GO5 JteLKUm> #$&9I_.)N.îƗT/hUspUHtCGQ C6)d+vh8o(Z ySDhfodR's8pCkx pZaP.1YRijc|o 5RJ b6)iW !pU2LNwKH(5xNoeO7[82^U7pivlr"} >XmgrIVq1폏W˥)p)@ C_MV{:g„cßonPl ؖ 2uh)*fs%hv軟`>!=.3\ bJ^)}hEoݡ-;E:.Sx7,wgwW*n?qJU%#mRE1z9AJ\@@ms^d`\ɂ?k|Lu( 鐭[̚Sgk;{S>[OypWЄNNݍK}OL&,事_ą3BU]." 15Qg5n8s۩8Ma9jfow?N pۻ=VK?2^p`]_.J' $ʜN-8zm iV!nV'e5jI{M7>HqWH qŐdzDGͫdƸ|Z|ڇ`KO?[ڐ '6p xw\nt~S,%4bB.Zc?QO_'C}y)/b346(3uM3f: t1w*=zؿLǀ>ֲ^pM_ئ5LH ^PE+=P\5584AZr C+k(V")N6MXq>G&t)k|fqP2,>a L/CPN/eXmQ#)iMř<*hJ %&z6>N`n2QQ`9>t,n>[ԥ#wo=LRmmzwk%4NRA^WTAQGpm<~erXpB9|aVO9Yf"çJH&8RXf_B-ӳZǸEvϰ:u(&ۘIk7yE)2ݵ;=+Ft+p0*ƿf XuXw= ;lx㌗|.0G\rR>2AVv 7s*T>tK~Oqhn?૴FTUnpVQV|=l!L2w)O@]zЉW{5 lTT2\]hަkEiG jŗ%'w^R419LD:fnl!$Zmk*,$=y9 H6J>[J/a'q?" Մ(hj*lbF3+}FPYB%}BHڤ \GSCz˙4Ը7:BRёv(Y%m,]+')9{#X[r7|A_?4QAVc'_eH¨& JO ,VH@o'Ϟijiߕmh4z61"ֆ_w/&z+?Dk=遼(_ h{8XhY ST$ c9MH0}қvDUMB% tUk.XA4OO?ZX]Rw`8O^?ʺvq7p5^J=X oFD[ XƛkJKT2…䍌 Q7SLgj 0,ߤq*NZKnӷ= K?`;^GrCv)Qt+kh<5,SI~l64+xI"|O$/,7wMzJ9W 3S@ rwlZ1q4yqɜ^9`%ij*&ojş-[u5|ʅ\Cz@z 8I6όc/%;mFJv5\GMdbC 0")zPw_72r\oe`'G2u%|;]! z}:^ZgX53C B!IPvP=|o{O?AJVqu=Oݷ[_x`/c̕Vʅuj;|F)_7a$U]?J`k~kPeDqf뎰>yan.4_Y\m=ޡ3"X]#DCݰ$Д\'rwtA%F:qQtí~{  1k9RWzCdThwMj|'u%ASp;x>lίkY`Ĕw6|ty}fendstream endobj 472 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5333 >> stream xX XS׶>9E8F֩VTo[J T<ɔ $$$yPCJh[cAiwi @o{wk_8QcPNNNS$%&mߑ"w؟ 'pO:̟{ry23b:SO<7ջɔSH|I{2SbӼ̟p\{Loy;v%Sbw$0;0ILcg&%z+7l^aݦ/f(jIbҞ)i~"񎌝k3wX!&68nS| fϙ;//I,yy3^˫ͤuT@M6R&j35 ޢ|-ԛ\*ZAVQʏZHSQkTHP(Wj"5r|ɔ;AM*SS)gj%SK멱Tv̢1}οMqώń2wi9!uęLvt뙼}k,O2eٔ)6869>T0QP&JzwӴMGz`ǿl(ɶrm|S{ H #iՏ^k୘ u R@Nhr`ޣCy^A.f0 -׆VKR,zm㵹1:!`f=}`V||ހ7 FŽo> \SphEǽlKcSI<_a9=*[@-xeX%n"'t7ٵ㠢 wueVMe}ْWW _clSሯa^z|jC<[MCUrzL"jwi(}f1hd"[#782H'ߜE\t N450( vPIl_NW@Tbeˈ 7_n3`1753t+ #̆Q9A~E^ѐ/҈eއyȓVH {CLDt !-UldwW7 /`'&';Ln `ey b@NXL0`%6';;Mj.`MZsBptoG+F/f>۽Q5 R_/G"x&894pN4tLPhr3ڨH{_Nu=5O<2CO1H&=5tK/I8}7+"Eg%:?ޕ W0|( [Z {j ?7@BI1A.}͉ۖ%K.i8Y7Sa6nhl˲ dwr{疓.og,1JIH&{y2}4ԧ4$:CF/D-yеWAaw7BSőI>^?pΣAĶ mpfMtQ0_|cCv&ChAH ^]Wp.,^^[#IW'S e˶Wkً DS~;_GֽpwK z)mnP\'Jg4[wޅ/]djo46u=bRsI% ~2O ?1eg X}𰡺Ȧ~Bv ]4-GB89$?lQҪLTtHZ$b\{=NLrԑJ" iJ(SDfASEꆼ֌:Y՞Vk ŕ#azA4a$RwP,&]C@7FA>JN(c M.p3 ^?7o^ٯ6"x -'kI3B̨,9Jw"9=?IT-\?V_{7G)61 οʭziҫf)#⢽b#A3,3B]bWlAd'<"ג} MA4ۍ9ɩz(cAe2͔y92ycd C]={L\( /%w/@O^OW<x3h3zuGn"OxH@>=4s΢҄VCū3!eMNJ*.u:UP<6񰘹 + k.!9I_ų"ѭmOHxm'`Mjݗbv~[Π~8smC%a; t1uV?/ ˓=UۗADjhH9ѻ])ձqI { LaY %c:3;%j{у(ɒقu7/TOn@ghB{yͿ7LiIc'^$Ϛ?gUIѕ'9 HűOV&C$MR2Ng n\_l )AhJ!BHE"˗k2VZU1p4^8=6HCЕ[9/InHW+#;`^N܀jA3jYEҼ]%ȕ@A&4t_EeW +u+VՓzr8i ?~=rCHĬ)˫چ\F4IpT|#Gӿ.β;|0t\"ɤԃ|cg;$+K'uW{?Z.,{*Y^E*cT[WXTi`t|ez`lyJV794^MnEPbT[wb93^<v-]vWdԱٵ}M-b Kp8*o:AҮϗ:qP2Ь>X>7p>%Gͦ"3E)DS8 On=6__) JS$%JI-Ks˲H#%)YitMVR*!@!YPQ"+ɩu涽P=H˔{ވ޷{ #&"D͝"$ľ K_F-Rۻd"@&x fЬ5H6j;Ш 7v+ZDC6~ D=)w -^l(qFOj0,ŝ@tn )h2NMBCU\)F q WW3u rv:p@E8\PTm"҈c-;z:n^$GE L΀-I8]u`<;T%B|Eq-"Y~&$X_~2>*Y%IeQh\N00g__2JJ/RW [F/|S]RdW Hl`;n9F`Rkyy f zXB[]lr:\'4?ZյW_+-`[:t>endstream endobj 473 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3046 >> stream x}V TSg~1)XߣZ=nXNER\@$@BXB@oXe] "Cju+oXv:cc3IΔɟw]"c%ƭ ֨5ۮOJ6_$!LƩBA؀/1xyXƣ(|%6)v.hӒTە:-#_N 1$ǫ|ԱAsȏ*ot2*AQn ۰bUCB7̜_ :E=L&%VRbbUvΜ5{fP* QT5 7f*ZAVQT R(OʋM-(IM!0QTE;bʈij=S<,%2T.M~,92aQFUy{{2zjћG7~:M " yD`hhnqc u[yPpdCxl(p$#6z%qeh֟Ԑv`V HW[H&Mn+ZF~WxiYv89GWxm&T(A%X7B4]V&r|%p3}3IGȲ;|!JW+YكcY%'lG;{Gܲ V~qʞp nY$ŮH` ȓ>5"%0͍->xOxTk܊ЗО\s(į&JfwXH7V̷9^*Zuq!8O~"*;b%ϲM&A6kSF:7OjmGMue?<4^m5x6$h<\ޕ}l e`mP*Z(F/f+>g W/ڜkynP,LsR}c ȉcDcQēOeP6Jc\VHb6QnqwH nsjSe}/-٥Zm,jѢxDɑ^VtgqM6&!v߳!= )v;aLTm~)}k:,U7ԶZNX'f2lnBQXs:AA)1F֙m3)4Dãh{Lv`rBCh>$'tsW~/i!;$9Kbtс 1_bF3'y4 ͻӎVA &t66u{ Ӊ׬̚=\Ib،gh^V}t^lb^Mr %eP ogL2%oAWsAq?bZN*()JHFivvz6 N_X]M̰^łr}V&Ml1=j0/zLo詃k(03T964ޘxZ4uϞs<<,D=x\uR/B/eaotew\0QE>E7B,{Y߀lX/5g) #&X̙F5)Sw[8W_~! ɷʖ/=ŋPiWmϳov)lNVWK#+Zxi%HۘL(o&I', {Xmz^k~385 $ʃ<}uS943}Mv8zim{(ߛ_&]- O[ậ VST,`@|]2gIDjFf$C޿]Mk@ҲE=J[v`~]dI?5UVKs⪯kpömk9ӻ-}ẙmg8yHE"xyXcl?{#K656;屮in0Ik[Bܢkht:Yg47[ D64 BC/U\%/MuHE=kEn7@W#=FSJJ(E] NL4y"IPsI9xHuoqc> stream xcd`ab`dd N+ JM/I,f!CǗ^ Ni[f0v0wG{( ̌iE% Fƺ@R!RIO+19;8;S!1/EKWO/(B(1%L33,(~DŽ޼0Oqo R~s,{_7a^\endstream endobj 475 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3054 >> stream xWyTSW~!@+;/iKGmmNuTj]pi-[Y  D$d%%!a HAQQln-]l8z홹sfw}Bx^~z~ٷk9\T`ng:F+g9ؒ\G0-%QߨgxQE+bѽo<Ó0{Qt+$p&A30 >W݇[upHۜԜk@ M&$*6Cv7x,Juc],!۫O ?1Ak3 Sםpy%RGo pD51tctp }΢hQr (4l>]pVh-FjTwtڒ8H_^D*r:Q1,Ret12$ 楟|;g1JD8·bilV64N7;#R^T-HH ڮ/93wȬԀQ ]{u&(*䌆]5v#e)["᥎Vn΃;ҋ]FS=|â!J`/40qR;z,wZ&N[:}շ}ΡQ=K/ݽwЌߦ/|% G9\"fYp)89SF$e+ Y/1)P+| QLFw&A0ơC_CNiKLEn 6kd(=*&r,Vن|?%ḷ+PVKjA ̮ҏd]Fg.#V]覑Ά-1vB' b m} ڊ`*<myNn-2Awٻ${^H1Y#ۋr%yXyqN jAH#59Mw*e—OOxp&f( Y`3'Ӑ&]mRf^inmNW vz`;W9TAAB݃f~wUxZocQUkW <'S(v[KM&.#f itˏHt&p#8^4g>kB:{-95xfREuS#nj=0]m޺S%]QJL* TQ2<<ҽ[$Hvؠ̬3ꠈɯ)jT_ԑOI;˴.[0%l˂ F8FF]5 $Lܵ(M~BWM܆5LIPbZ D> stream xT}Lwk&ul*N9EE T| *7*eT1Jd&"sQ)t+_²?\r&o{}$$"$IiDlBY/-rLo/rX2pvEF"$65 6n}anPSS)fS(´*>#EUUZYh :?`X7/ d2+z^0Oa0+*ިJUD JJ1HӮ[oPX]"BerJ*SKqWD<@,'‰""%RbS7!!64]5⧒+.:G !$/`d<y=*Oaé<iGQww$T0*;jx%AŜ}b[.ŦT@߱7+g{_Č" v5h07,xʄ 23{pztI^i}%|oTL[첃WHR^6 ٢ 9='iK Hz m,G ]m=tcC,,{5:~$f s٨Kt# QLk0> stream xkLSgÁJsa71Q1pJǘ(ڞRnEo .Њqe-L6w#˾e[da_*rav?.I $Ph5?"v/M zd:}AR9;ؘ]X갖6hr di]T]fKYVglPj:C޻;wQf O}@iC1t KWjij}͈SM,mtj`dT- |@<}8-Na~#^݂^wa?!A+A%?T7COPq3 W "%ǹhHT]iNdz;, OGiXt|OZ,!T#桡NJ=V20<1uYvc> stream x%?hQ].1=S$͐jiD`FLk0pBQdATp%`+[cU*S0,_>CclœP S 1@JŅF\r-|Ze* T_'x#0 ՘ZZٜN|qF#$ F傢ө"RHOj:w1 aTaѯj bSmIɐjQ'7RfS h$fbsHF"g쑣&o3/{Cp0B0{לQy/0 [zqGiv܇ޞ"s~q~qq6%Hmw5pB Vm%fff>z1Մ11t6 +NX jZuhKu0 Zy 5{``RVL6nGNk%֮}wm#|XwGMMendstream endobj 479 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1397 >> stream x}S{LSW>zi)&z{-D`ɬ`U^)f<$Cu["wYS3>ئ|-.ʹq.l/O>@ ,W9_c*H7^Fpn7ׂ:)\P %'CHּ |U9LcޜK20U1t5YlɦSVөi:15)'~:Fo0K2s HI`%HSւ@  R^- 0( }s B4)^>>{d]P+wrJܘU\J jQ^uؤ8OMkBdlc[5cU,.t˥9 !8ܽdTy*6>^u ._F!`"Ytu\n^BW4q7Vlpٟ6 ?HEkJM9 AMvnrN*?޼cuaz+yjo.5w c-7P Aݺ|^-vʖsh(v6z,"D xۖ#Cʒ0B{M Q_@g~4PQr8oNϽrQcjcըLh2qxfR HҴa P;?0moqPdlNl.LO}NP1OՍQk)]S#ֺV ڭփa#tð@jVVkJ?ɁJ1{5GxfM-զ]Iz}?rLdeoV $CmlywL/.W-}a$:y{5wvW]aQdSUaH[gIVn l4z2)rxm( x>.e_"+Օ;)SڪPyT΃5Şi-uPLyq6܈cR0K _>jVxMg(xȗ;#?boRCRwh'v`G@$3 ՈZ!.nSUMG @W]LcK p[ݢ|8ǭ1xZ."OP6• ?Mjt[#߈4٬;5*La$@ɴkjYvTA?7Dendstream endobj 480 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 367 >> stream xcd`ab`ddM,M) JM/I,If!CgVY~'ٴ-yyX|(\{ BF&ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp LTL%YR,]wGVdY jZ~K%uvǬ?|[7oz5&O>G~wKtޕ~4p Yg!8uB7-dƵ[9\Ğ{IOoߴމ{,;cڄI}=< OIendstream endobj 481 0 obj << /Filter /FlateDecode /Length 4980 >> stream x[Yoȑ~_ -.dxm`s,<6#AJjUE 6 vGDF,Z] 0"󈌌㋃?R*]T.=]?wWN[xRʋE"VNjS^_a0 JKYza*+jQ.qJH] ))_\^i˪RWͥw+>]Jτ*6Wκ6+`SvۡXsŇK {m%81ЪX'KqBnwhɯ& hWB26d;Rz$_{"GW;Jk`2I^^E6 \v= o.?]U8liva.t 4"pJqcXVZjWxX$](pLx+vA|nO"yUQΚP(ޏ\JGJ۵)vP-j_b)n8վzq)(ի+J0DNDڪC8הFz\T~tY27uF]vK )Pimf }35F[psɳ+)h.Yɳk ]ni9~ZiY"q GhADLqdD00`&8؟n?,6_7w]h?>ަ&nX[ٸ eJDņ'?3Vqv"՟sIƠg Q($şϴ B;".y_"xk$ Šx_m>w׿*<#=U]a}wܮi mO<pAƴv7 n2iVm>+}:\2};sFMkn7ۡY2@klq}S5a5@E,voϺLQUp|柅Bܶ&;I=~ X%a0x2O\JWtgF^bJ.X.3(pJށw:o7A2$9 Pw(pqju g;@ɹٟ ]Fl,9*)#nr}NA'{'g>&X[Á`S3>srU3(&#RԱ.6s%?ؾtμPTfb!T"A~4\3 י 'yB_SNk8/DX*Tk= FD|5"J~}e*SuXish2itAXo˚n)29zfzN-2l a! 9%G;d[K,6ÁqБt3Zx"w0 »d0hrc(GCrss> >eC$qdoպF5{A0g9mljk(VMl")q'ך\<&Xpu-ic /ᚩH;X!,> 2yq Ƈb3r [%)E!Q58N(3odlw]RUK*1T+xZ%C,Jag}`;/jSU䗾7A<)')Y&@8| )@UeqEc\9bDR:R hd lR6E};,FPZy&H[|8Ei2 y-j ay P D(sxAICh i%X"y ϛɪ2&Ǫ9Qؚt`ϵ,~g^3nVd7x@6=-( `|,y䙆[\$koQ 턿Kx(ȴyEMMMx &X f؁-Vk(n {x6ζ= #^_"ͪ?_,Z-b1"m ч cJ>񪯭=kJ|TQ|R ]pCW`isUȖަgLPc$ZtPkmKG=!-G6WxzvŠ e72W6'p@l$Sm ͩ@z}0 e3WE;SL߁Dk[}ƅ]X]ZAph> D99CE4Xǣ1 yR3 a$ܜnny 5 ҙbr$dӤWg%{|0il!NH(v:;Xv[^* J 6r ƺhzV!4ct ҥ:R@Jz2BdnX=%4{9 BbE)._%YDZLy%-o!1cA ,{ %fWz-pM^XBDSȓź뒎`̐ͣy#kpVs#5]"ՋfFTCE5S#;HY=nd@X;4c2^QŠ F| 3rWѿ,#zrblC=,d.s)OUs3d %gxTߏCds9XO`]7;]vv=u vTعz,]q&x[ݤc(yGX=F#wi҈)wcS^)J!;ŗ0ҨjRr7֕Xi2P5%ST72^QE]hI.~9FP0o;De7:Ɠ?}-܋eE5?}Ծ}m$}vk_kekw6_ęH?pCs` ND <í)6>=_MsLw7s\{czzŬc_ׁq~ij)~ݎ]MJܾ=l!~L8_l1D!NÛiMs~|Ȣ_-/p">XKۦ#E>rxh(s+: ur; Wn.6hP-`](B|E:66iɲ1]s K\ p[@iW'۾;+WDodx;lvs| IaKB|߰ڦKW66JMEGWjlU #1%~jEMu:`(zN>7aWC]+#03= 5Eu}tQ2HVq?Zʔ=6=(P!uk$v?)Pah Ӝ6*zj MTB㶽빎K␬bM"}R@Iuۃ HNr}/3endstream endobj 482 0 obj << /Filter /FlateDecode /Length 5765 >> stream x\IGv 8Jg54c[Gj`>dwSY rȬ"9MH+3c}-fM-f [_5wW?w7WW7WE̤6z挫2ի?e[7ʫ;^7ucV?<Уm]?zƻzh-tiH:[[eaU7J뛟 ,RmZ\ts荬iSQV{roixano0u#Ew-LyW7nV 衫ͥMZzޮV[M%ԠvD%XZ$@uH&ǽIܜnw}F>vZ]p>műH۰/뽫aK~\n-,8k80΀ [\nUZ  gMYD$A{W-qn%o.W[ /3rVeXg> "_Ls`x?_"1y|P Fպ3ۺ: _#,N &uLJyHeEO݇F^WGe]A PJȟ~k4IZc @[ _,Sڄ:ÄRKg+@ J%᫵:N3"G= Ƌ)Zu>ȟTSK-N9}XFF_c6K4$t .ܯ ӄK5JVO#S "Oj80 SlRhTpE {`d' 9ǰ& acP 'pw#J_4˪>cm3`ڇ!q(Ň02얘Kd?Hk+HOUal>LB3XL!/.-Uy\ [&}[Kюif`#ׯE"% &5N-#o/8pNVѻvIbZ:058Uh6w|5%GDi=9wd[?F"zPe85*RM t\Zۈ=RBujN0Qd$hMT?]n 7r}7f>%hI5Ƒ o 88p n3;;=DMqu;(cQ!@R2yN͸xP*M)S!N/jUA{޻L4\>HN"IޕyPՠ/G'BZRt\{&D## K>vC`ߨRP1H?gmՑ ,U*}$IQώ x`*Tc &8H]\#Q(vwXNO;no/hA,dB'x+&^WUG`HFPgxh Z|{J HqL7sd')Cp4wi@rY%é{-c4̖)1kpԖ]2\sPt{-D [0R f΂O{Ѥ@r`uB 0H փ80гWC2#h.,1 F *| UϡӢI&%ʏ8쀮cTStAu#` ZI[՜ZKRzA O)ܚ#2q&هՇCR Ve}{ 8xy<HI8~ե4=w.<5yӭ9pك>. V0"`o#t^cg6}Q!U@<[:I) ol CF<"2(jT#<4{<)mt}xٜ(uNd"D^q1N->a4"D3"KMHu a0`>j#f8CyW!HOɂqzl Oc;x X?On"ɫL\K&A_ģ/PNQVq3RS\S"'/u >8YLJARC J34X< >y莃ɳ0n4Y4C:*s%XMA\~؇\38uM^fC419X)e) (AQ9Kt꟞_\؂X"_mx2_KXko̳Jl:'XI4 8o7ťbĬ FC%Շ +f4- qC9m`a._@ ݾ~2k xq6VT7 0+ Fx͔hnc:/(.ד>.&{K"Ѧt8~\ 6kSiXME *s8rK*7z3rhp,Kc.CȂMqH} 0( ޢ+~Џu+d[:+JjSb$؇ F?N9gڨ!J3Tڹ!¸ I8Xzfkt^L'0s'ۘ'HhN'}v.OG|[OƓW3a8.yXas?M ~PΑ06Ջcnsx12DK!ىU'Td'"TEV;Wݦ#4)) 47JJ!M(BKYI@* 3y#P4T5||7ԨPZ.0֊*3MI=/ K7Hd&E)+moJ. >aLibu-,m%7+*Dm)7[e`;,hl,iI R]GDd7}/3]"?Et9Dnbosع4HEΊ%5aB. bY]g"8ԩB a?#֛JոpT @eԔk/tmb/VY%j3v>CTX=Dv0g`=TN9<Ew~.ns_Mj(S;i?~4Xih?MP*py#O =/#_[T[ҩ2 ِE q=%èΑFz0Q Tr[":/\;-0̰4!䑌mهGUS5fC"?2蘩Oq% J/o2Q35G3" T)<9X!s>PFL8{Vq4 / `I eZؓeT"Bīڽ=-][=K|C%YgQ:m-lYO˹(re"W8)DC,jHj2TtpRDIV--;wsX>U"jr%S~eo2Y )YܦKgqk`qT̀ |4S+)kӝF;ƍT'Qݾ1:'Oc轵E,;}y4hՌaaj9Ҷu:)CaU\?*jc+ "2d5 ~>)>}0\I u mTkqP.WZm!Y?(o~[>+Κz EyJlpcf qnCpYPO-Wa#VzӚ`1\f&M'N%좖6EXO=~9mU$~&83 LT,hjOWiN"A)FluݢLTR^A$ 裀5XwTXC AaX0GCmK" +HG'-;-X&2BЏ*#&(Q:as9Aendstream endobj 483 0 obj << /Filter /FlateDecode /Length 6275 >> stream x\[sFv~_]ŷ)A߁ʓl޲ʻ6PCX hyȹt ̐XzsΥiU N?N==۝pqo6 MՈӋ'EJmJe3l9؝-z&R*rji]ݍ]?;WJA_[Q>j{uv?jQ[xiH:[ZeaUBk-Nϕ)kk\JkpLiTY麸~'l<^nB`QixDt *Yce?Y6,|Dۦq&< ]k`x?t<.1]Bᕭa Q7.I#Dv|aZx>VZ+akvZVSt{k1i7|<3n=5jUtwn{%Jyzſ-vp8w9oQcrŶRܵʚ'hʈ=] +J7~" q+?a#;xָQ)kHF򫵙}J>ch,ukTU;`«dZrE;"pt7I j$28H`n^n:>_c /\UxyA~NH\&lްP.9v VLG?>R7BM]YtgڀBinڱ*czZ%^ E?MM9_X],E%o߅ UNޡ8iaEI܁u^悋/-C?6,'q6& ܶCLK$Lnp2iU#L&In*hݾ׆-@KMmO#q)&Z\Q8 ̠qcZ"IZD鵈aFiR&o7;H!s/R7d\uv KFT,a5ư^g@I~W$8S)[]CS`tQڑxQ+)]{ R!J /5UQ. `P囕 e]9,LSj?hԺa0! &($D7(ޑSBggcc!+j wgJy?x h^RF ۶Gtё'63VB|`:vrӊAۮe3|׉lTc`n=g)Zl% D$ Ġ=YQ-J 9(%yJ2RpI fw6lݟpě߰yӁh$Q%Qʷ_Wsp-?nR̽_Qs^ NBnTH1z2pɏw}3cO}t&<Ϗ"%0:1$hTKjM4d2A}vvXn{J\{r6Q{dlZN  =Mة77tKo /:7hȒ: G?li~5q84F)Ms역 uCHNֱ E,ynSH< nqM|;GI"tqgHcbȷ˷ UvwW-ZVO';92C7>5VNpk[jS%*]#m.Wh*z\5xȊ~~W_ a&g*w"YWUynv]Yo1BGR@@N~n եBO"1+3)4l%}* CM17ala!r]2>^,x\ zU5pPxYa=YJ1vvX3uF xR9ܕ0ǬX֨3@{Ǡ!Ϋ[p{dZi4aiJ  KJuE!> [ KV c{IQ)NlaWس@0;58 "=5h4l&X'8\7ջ>$WȲn< ՠM&6M&dމWS[[SZ(29: $_EO iw~ ܈t7E17 5u#RI88O|f4FΊN~$ +PX~NE4hnӶ[Ñ3TSPyG;ͪAv9vl;Sޒ?IB1&c, MQmkN,sc7BC 3B>1ԩb%( CjJfK+IȖdKL0x}5UReiO -XnQߤIvl!,raM&nW%͑ xOn+K p\"zao}6a(fyԪ#Gb[q}2]=CJw }~| WBmO!N }]G~X0O+Y;1mg`Kͱ3琿ք2(?z}s!Ң:W,a;: ]= WudSq{VK,BAk4 ѕ";7yݧnmC yI?쐼+1!x JڃU.*nVN tCwTLgI?1'%m?Sؑ[&2:ީDH*bCg{?lh*~?'%&-gX>R7S)7g9Q'UZ@i?s#R͡cͺJ=mTO+Gd-ey"UTͶwf4wD5ռDCG=/RR\V uwVq$>)UQ M L5Zh/璙m(p>/vKE(-z n~IhX99<3~՚JA<׶=-1WĨu5ҺBJ 1[(@4Yܤ)!K RE ϻwR;i`i!٨:)+X[ h` ) v%'`2#81<%]!R>4D.>,Gőu|p8EI6@#?wqustI=.Rx@>J,9;cdnKhF Jjv56~c+DU Y[Ғj^AgfPJavl8)HZfh֚pm(I_J> :5G&5().&, |MUisIҘyS, S~JEPaਫ਼^p ,&O8yeb[*u92ZwCx8DԊC7><A&#-]~t-:1]ЅW~UFx*3Q?T®6ncI4_Mhcy)>2MZʱ/괰tTIU ؅U;L@ދpn^i!znS7'p9TU/pt] $穤Zþ-8aJ+σ_UbaSRlCGl7MzC;_#)K69cXȀoҰķ<5ٲJQS9Vx#ώ:97{3"J%1 + !1oK/Ljt|9$MPDZVL8qd !è̒ӛ1OPyM\й=*u9:adZZEEג F! I8SlqW2&mE4l7ByLlj&v&)OatC5c~_g\JvSبʹkpxF@G5PW7qtd|5v*ĝ,jŖXʲ7~(w=Y=fȐRNey=58Q SP ݎXѵA{2rrx֎SoS tXp.X-WvX\›O] ׄw"-W鍿9#פ*Hf \5+% #E kA j_5K7FcA% ^o25L@pia1L<;w-%X>oއ>A̽@#P"$>ROMlRsks(Ue1*}Bc!rx~3y2 YK9#o\='b/|k1OO3aZzl9ٚ~_D`ŽÌf䭌=VEafsD21\&k?nۤv9Nho Tv|߸7mJa= fuɑԒ N^S+f ^Olɻǜ!x+a%Ռ` ^ _.p׀>[h3Ў䊼_勁r%c3P|%1T=Hl\҇۹—[٬He9b9rb'?1~23JlCc[g8AݬJDbO}|5z?!G|#ZTew}~'r2 yJjT4E3?,_׼pd+?g_~YJWTLWy+Gu|n7k`dWʭ\ik.6QO*"[.@Ը)_*kb  +>AhT-HM@fDE i(gbr}5xC`L!|3ɕ*Zb֮]:Ƙ#e-+)O0/o.9f{?x'ϡoYU-al8>zTy|%a=+ʀ%zD'\1+czq;mV<3JmCA+V|P(5S2#jL-xתz1=cG60& ,W t+"8`RGgj_ֽ͘/aLY+vzEVAew>=!?n7tG&y(k>=?}ec Jaj.\lRNM׸.>\#}+24||/@P|y=vg:={> stream x[ݏFr. 44 : ܙ]ʺ=&Hւ@;$Wխ_nRV/]nǛ_nVۯn+xSʋۻ0EJmJe3n~*_ȺWEsYTee/OhԞ/J)k'|y,PpmΑtWwBe>h]*S⸟]vmNͱ{X,4UQƄ$RZK_S%'AO:SpD!KJoo9hZ.n6p8횠"#NkF'.Thc %9>Tی[+.7OQ+s롰W+dVsnXSIJ >&NM#6+ P]AAREQQqޑK0[{>w!APHhQ'.6qȠ[[tWo_. ׮_x<%3&["iB ~A\H6WVќa3f2M`]|AF (iY47*&\,m! TVo= L3AmI7ePu D _ nǛ㞿;/ f(bsZ9Z$Oynɠϑi#,`7f#HD9^ugylJ$ccbsh$X>ìg2 ! ',4>M,txB`1"ɐ=)"e+i%CviM@>YIwdO蓵,{nD/ .&A PR8Êd'W?@V5("y6"c*WtYykЬ7 f ƆR2&s!כDtpYVy#ƍ#c}Fy)YAeXWlb2JYJ d'1/Q@ Q:$Ac4٨cU)j쌺?56\SZ>ZR2NQ#x% %pQ:SCbE Ap⑙3Q -  'oQ JRυe&ЄM3IP;7G!tgr'Sց%B%V1CM>2 0ŬeJLu_Ψ[rm h!5cؑ5fME 'e,[dQgEѧK]beg hí4f \BAEQx!Vu4;%U9ݩgQ2۠ 33233Ñ1lv}Y axԺ&cf2O s<*,5kFʺZ^KC[!sզu߰H~) B/MUDMr0OZ8*I8E ZeLVnPZK2Kf\Rb1K{ 8H ;b~xxh"N ǛIV*[gF" ssHD5&Zni\ȺܣzHS LCE8N VXBGt e@ЄjiQS7">ҪK*)ܸY$uEZb(HN͵4L;0rsa+Msu9^_YeьZ51ϼ]Z+YM$q$@ `oY|hy_%Z5V&X mM"JyM2ih!ھ9f]ߚi_fAY!ZiӞ#~b|<]RsąD -S`9"$N2 !)_^uZ>Nmc)[Cy1ݨ{1Pypvj{Q?o!>뫊"xcv PV7=׵!Tjpe2iVI#yb} >7ᆂ=]n&`kD[i4.&]Km?rϸT6XINx)3rxFSIJ|K{AH|EJq*CW=ȖXMDlF:9'3!>{?DRךiXK5}L!{/Fے͉F0i:Q*a?Ƃl(~H©5JY*~1^#l49Cn~!SC E >?t d1:fWF1!0Ex.k3.ShIWzt {|ng(ׇaEy>?jb~ <^1KdbKY_eb&vW9FV_q0ƈw‡*$@YW7)Ef+ Sth o>/f95b+1Z~eHBT[sUyH}ben?!äG]izhgi.joLS``sU0ђBciϗ%-PaOiz+*@p,H00l~S0RK'.z_7d =>d 0g*ˀ,]UB@dKE};2 z LӢ.Xۙxe@8]2aM`?o:O /\}K 2\M><ԍ 6)]ˏdz1i;Bq+f$h>Z<o'V!|L,t'A~JEBgZKUπ_Xc9&$;#ƜMYG3V5ԢRXMUXئ$8<s縭1؆A&`͹ ,C_r).EmFp{mlhKu?/.V(ζ%(C+}mltO2T _eH:ukHub'#7C ޼p-%tmpgq|CG5 Ց#C(GXN"+ v!`Do^ElKZIO.0ASkqK uo3/'K='3 + Pc&mH3of?pQiQ[Ǜ\ WP!RM{mc|#s8N_Rkb`<+t{jvkN()])E)\/JA/=ア Ͷv0,W P-uZ=,Ekm: e:D t=;鞉p0vP҅P2rI0ӇF"tAf/i W-A\IQ.p׮$`oJ jK3SO2_8(endstream endobj 485 0 obj << /Filter /FlateDecode /Length 278969 >> stream xKmMiV5Y+/-$e/ğAF1""|V .š1wu?_Ϳy"?/W_=_O?}{~>o|f~?w?/_W}^h\>?]zϵ>_m?O?/}~1?u6_z]>o_~?ǟ"Xx+\뗝|; 2>$sNî|LjAE֒ŏ?B{ úxyH/2_Aa8L\ ]+׏?!y@NtN1q0I֪@v\;Zgǵ.k'x=lߝxõ/^g- ZQo'V˛֒:Iy֒:l^A׊ڝJ6yJ/ҸoGH^_ `ZP}-+vDzHK.Dpö$VNJ9 m.Y7 G~Wǂ־?YW1{WoS վ>Uwr1pzE$u7F/0u ¤urk9Mb/4p!&Y$y'Zxt=N{\Fk#BLOktr`8Y ^g-XzpkQ| ۝t.@c- I=+".'wtg%jGw57gc/o\_DzH#[\ 5X_p~} ?&!zzIcX7netumY عи-ݲ֕l - L}W̧򺞶E^Ls-B:tgLK\ [_mr^]W둷;Vmsq'C_zT'J: Z,2f`%VDyHܞ;kz5niky]AH ZqTe ,\u /'Z+Z]E #9}kUfzwg=ogrr8?Fż\̶u8ɋWu}ÕǶ$'g^5']d) EEn{V\kQHZWoǐ{=+p7?q-^p≃v'-yN/lNCb%fPZVXdrȗ " ֽ/y;Ip-O hoܖe7`W߹+?7Vͅ^&F/TiČZW 2XȗEbi;"8?xkibF-A6AXֈ˃/ UEmYݫRV"͐mkyCBYbuȺ8_L=LU^ZB|u#Q۵L( ,2Q W/CK~뭡6us'[Bmj0?(Cܝ׫ۑrW^!/עu`q!.Tj}-h^Z{KZ!.V"[_KKת)yQZqmQ+UHKu=0.Vj"/Sv{Y8W+|qgm/ HoG,2,Bcۉ#/|/t0.nm&:@uJETceqjȅP vu#ke}H^g7c[@q1y\E\ƺJ^Z7_w |u+gha\+=.l#EtKSb=aLC FK~e͵~XB@kfE΂W6~-e_61؏XdFK""hsbeEZ*xxm7b\߹K!@k"Otp U—ZѨݱ6:6;h /A.qm%8mx^keH\ke"6^ke$U?l@bý_qu5еohs\b)e m 5YyI֚rfmVB:B;;N^1X 7B""| r$߆s/I9D!9D\=+Ax[x'=>@qp[x'$x X_Fm ! yC ?{E!l'y?! ! yCĥx> {E!WuzkAde9zyDD<#(FAXcMg'/J#c=d$쏵QY Er,5Gv[Wِ@&%ξr=D&u㭕Rg7~>k؃!+"(db˸b퓃H"cc;d\$yw4E\ ?og$Y畤<"zhaG-0Ab Ju*ֲ %њ$$\DxAb?HҎ}%$ڟ{aI}7Iߟؼp{O Hd II$Q$2$ZDfDOeHA"JL+HdcI%QV'_~FfIe&Q&$$X2 qeA"N,<2 'QD<6$@ǎ<KyETRAU<"mIt+B>GP)44ZOTDBx8Nt!8'a_<"JIT^(%B>ǾJFm&T )`DTTR@;N!%Q~Qn*d[FUH?҉(r[FڒW8҉(%Q鯐vE G:u Q,a5BND4꫅#:m!H'ޛD5BƑNDi9υ#6]8҉('QɼvQz/dD_H?b#S!#F@ iBёIM!'S8"%Q~lѰ*d_9~DlGdYWz? #`p#'؄C G Uy;+~lndA&q%h$odѕgq}723[腰˾vod[@~ldAAB%2l%@5{>Fxly&"ǦAI%l -!L%l 9{B!aO%ld DD@hDžJ{B!S%Fmq!FPHU \{B!]FPH_V5h{B!-[%лmۆds{B!]%mAFT VUFPH'8VFQHMk(ʬ͍#4-*э}t#c5%Z֍#4(걁GJ6ЎdFƱq¥Lli62|C9`pg#'8y$FG>B8شv62|sV`k# tmdg*FOpm#'8Vf G>sI4_WH;b# Glļ_ҎM#F G>#I4XH?b#! G>SI4yYH?,d E&8r͘8MBڑPpk!㈍MABue9ʄ2-e+CYW>BM/y.]~+;EA$vRߙ:5`6kdl|fF&={9ykˮܛCx9(KwfsپNF y:z # @t1ro< yǃ H,z͢|t~3B#+x 48%ܾFv4; Kix5؎x4tH6Hp6 aEc=Ct7rm#'PtZ`a5,j/犢a7K#4 b[Mz.6*5F$| "P2Ew4A ʴ2XT2«R\eиg(F(W%6%l͡{Z&o^Ɔkc~g&[l&\l4An \E| OX&;aPwqx>u)ôNz}9ޛVhꈎu/l2Y\& 9pYM=i]|@`[\q/KY%GCΪ,!+4Lg^0&4w@B1͚=Y);4I1lS%\0Oc>|GTKgRN L|:AQS)-)q"s9y[Kt3$*mͼ^WI3oN44֡gf@fIcj/X|+2LK4/Xi`N:'#vfL-J/eDv0ť/̫' <)Ui7p&ޥhҵJQ/$|a8]47%J-ǕLc^!]<.muEJ E} ihmiZ\םBM}O5nFOP*O9[V盖<*pByׇg_M<;n HK[m MFێ(xcʸm(Q>Q1>WMm6/EuH>Kn;| H R~6JKoċOMG)۟vxוߒ~=d8"`;X"EXKqŃi!`x0<빘T3J#0cTnXXN,8b<^8cxe0c-}8cG LkHƒW?cg\^so";>kȎXҘ9Obrt$Ԙ bj찉YYq -;~H0e']Rbv` fP

ekW5礮#h7j1u/9UGᆥbu8uטRlL2G6 ^s"/MM0uTcB*մݛ M[u6#77oy:&Ͻt@iwNm sq GO#CCjht_}GoG%IF h5@4ŒhXWx#߃;@chyB`!za}0j^9T~`BTˇD!46cTB#hNE%/ZzeQMaQ :&#ZaR C* 2)$ $29F(?'~*L o)`2(d+B4S h Q89~Mo -Nd8f>w6S$_X ޚ ?@||u")R$HrDƒ{/!DRL$Q$u3E2XU$z 0'Iھp$$$Zh! ]-AbL27H,I'B$6$툍ThI)HQ<#6rL26 &vn  $J2D""g(I rBZ(H2Cy$ qH{#0"o,e!?(G- ]IRZ2O͘_I$< BQ@?+#(ܓDgBz(AU<"jIt+'B>GPɱ8@ !DgBxN3s!8{'_<"ITR(UB>ǾEm&T)`DTQR@;2C* *jC#S!2VDBNDQ2DBƑND!/}#,hX8҉(>d!s2t"ʥITR-Df G:%$*2t"ITr.u%H'T/eJڑN^< ai~ĆZc"*G>~F!yTҎCJj6M%'dFTI?5*G>.W!UҏPGyD:sIԽd||BJڑOXCmG]`Aj!lVҏ}CJj ®q%'}.JZIdg:u̓^I; u+ơ&!Tҏ|BrJ%BB%'$(2JZT0|z訤y!cӐ¤P*{BjJi**g*{B JP}*{BB,q JƞPҩ*{\'*JP '!'K"Y%cO(BVm}\%cO(BmWy#6$d!}`!V"=+{FJ=f!TjV2А3D}qi%c5SD:B-d$"%=H)pɅ {F$R&"΅=4$N@!u[pyDE(H]HST2z) {J$Ҏq}!:.iH_HS(d+HуBRC!:b"HK". 9Q҉I1R8gIBOL!'b't G>>cψ$-*dD(1B1U8JѫB1U<KaBOY!'bx- G>rI4LWH;b# Glp_ ҎM# G>Ibf1I?b#G# 1fO\k&GBSAb248r͜0 SIڑP4kqF$:r6֧u(/B; i2pԠ7+N0Ht  $WYlNLX^4"xeV&4-;L6Bu>i+Y@vQ,YG\ ,iiz:~8@ϴ,.: \Oh MWjJm~9`V4v"-E ~6=dM!°M7gM⭼EjtbKp1˶>&ms@Q lJ8 OK>?|Q"G,q| fcCX >o>a'+wHx(5 }>!2BA*)EnM4FJ )8Pi7|Be> J}b&\zO{9 /Gye`­=ReuLRe࡭aaI(3İhF^0jVAl$m—>"%3l6:qyI`H\2<+Nm>:D5?#4*0GA#7B#c1B ^ZP7.>TDkDv y%4˜4-҂ZrQ$.:Z<rUگh#RO%)c z#  lAưD<4́P9ڢA ~ca Q/> mş/s d"e[lKlnζ’Hh[f; Dӟ!՚ߋk3 鿄޾W,bpN0Nxn9ԫ\8}Z ȿr7rox)_uܱ%Mf'ƏŐ; җC6X p`.; <}́Oqp9i tؙDh^'W4r Ve!zU?N1aы2WuSu1^,)r4ďQN'2"FBVplK^X<9Z&0>Z&TЂƇXQ7JS,):(m 5i[uxTG,yjI:N1*ži5edtqZ N4gAXSY4d4 c\u5%kMj OLs{.@Z£Z,k} o0h7.|s#ȏ=!\Kxd'I O8!XPNl&+ɮ!jt^l_'k>b#7[ʆb#.,*E6eQIxfFUc#=&TW 9"p!%)H>"d[4Dra!' 9 %b$$\xD x-Ab#HҎ %$ڗ=0$lIņ[zA"3H!'3$$i&2IFhyD>!)I Id_IfhD!Ob>^~FfIU&Q$$X2 pJFdj=IەkyHSBx(M'BqICQ!}᪐yEҒ W{|3c!mٳ DBzF!l{TҎCJjVM%'8 *A{i#:ئ< *AKl# 62`62~A>G>FڑO󸑱ڗ,zxst X+Av#7SJ"9ɔAQIƑOdUJҎ!$'r,H̋%G>sgIOZqKҏ|"g<]vF%Glt_LҎM#' #ȁ 1ÏIOeLҏ'.6#ȩ 1dfN)$H(r58b#bl~l9dfo~F 'GlqPN22  EU'юsNo'h{B<As~lu`]dQQIlH?:a/rQQI(ȟXiâ?C2o*a=ß< +/?&N??-+oc|N ?m6ӮoXza5ȟ!Z ?J82BRlSD}EdEuq!)N/"툊#2^:IS t N%ů$Ű  ,tx6B:<nJg4]&`ͲFUZ02V,?Һ@% #s31KRih4rHrRȃwgYCߜuU1]PX1@Vy wX '&.?ULi|UzwVЗPʤU`⋪Rf]ePMC@ٓz*gҎ/fPA "Fh72ˈ}^(4΄1^ sj8tаm(P]1Ϛnjoym$wKbWoE+Ͱ6m,ƪ_= kܾ3ޟM3\Შg蛲.o#Ơvߑn~OG4LaϾWV(7rkȞD/?u//7FƇCjVɌ=cЀ1im5ilƞF 6jؚ 5LF#5l백d3Ѱ4l\d^Ma"3a3l7fEfI5l; s&Cm3}@2Wh?h{CҬ*j]ڛФ-P!ҤmRzhVI[u6V5dGi;i_d4M7'Q bEDwE4M&_aD#a˰m!"ƇXp-?79hldC'1mWz#X)¬-,`'w 0GKߗ$#<$S0B/JLGr#"baB-)+,V#'X60[,0&t0Gj#XliCUyrIy24#{S2^yIc2F KOcA)r<vB1r@d( Xh`Vz##s$-,O">/SFM"] JW4Z.{Sewx,r (/C z djud R{ y.2񌇬JzgG&2`]qa\me -䂶Nz@R8O@v8bZ!ʐ/ K8b,W*H[_:͸Pr\@J9&ZyR1badsLC9 HrvĆDumO^JUEutYxHj:Yd2jֶVd<=HSLGJy@kg̛2PZrJj`K`Bh_} -k ǖ8'K\/CPl)tЖM@+|\]g dvFqϩRoK<"wȭ:'%axBɺtRg|)I- 7޲2P[cl&27;О~+`{v / ;׼BeSI; W8`K<1 3v6{I07a ;aaء T0j2nfNs~QY@9/hѻy9hʇ2/+S+'F% _^Ok G?`sb YaJF0XkZ 6nGW#pm3nIGGEt+#ZQ <7a[Q=#:R˨BJGR=K* 7>)(CuKnR\*6C8T2_Upu} 5H$e]$'A%ѽ ?b$(HZEcHt}x6Ig,HL!lTҎ|B-JƑOsTKcPyvŽX% u*GdCD]JO?'1=4ؼ,`d, Fl%7ЭdAp!lW2|BMBjf†w%8өmDJڑOA_<65 ~T26*G>!D!JTРࢂm@]G%3dt&PRIдT2M#1P?SIT2"UI4UH;" G>aI43VH?=+dD%ј[!'b\.F iGld^!㈍K)Bڱi,a!'b&1 Glc!'b2f- 3#$-df&$j!H(bqFL& :b y1$&~ Glp!󈍜@SIƱֵ\&#$:|mO(Z?~GTy5Oҏ#g֓#*r=H'GBcI#*r\?HL'GBAξsIƱw|Ͼs;Hҏ"}#*!Hx4$GB^Iw|#CϞPoEvdEh9;ŏ#Hxv$Ǯ9;#SI?B>2#L!d|d L% T2B>2|#*-%}d*GF! #S}dGRb#3ۗaՅ)>LT+\ FNmf%xV(݈GF<+j*6Oh%x7Oz%X 6+F|U)+F|u0wY6``mXE7FTu5ވؕ`U߈y#*o]DGTpG+]o#3s#.!•`vs o`Y 2syKvS@Y$2؃d z9AaOdU#wT.Ε ̞z; Sfa ʸrͺf)C??I[/c -k0~zlh: ̱8A^ShNKc9VFNOgtFhqZazliɓSl`\4k4w%ŖVLodaK+SҷF{ZKޟ{>$;7WF^/\Tf duqB9Rf_'Kt`'ȁecK&Уt7 57NJ}";r>1+e#rn:,G^g'X R F6rN%hmr('J%p`)ߋ.-ɥ\ leh5\g:T.ni6x-nN ddXg#l`3?y̯˗%UgP3nZ~h?ca^=*ȇS?~>|pl"Xgh) J ֳZbU 0k@,B.|,h? -G(q @_TWh=>ܨgmPB@9@ZHP@)@dA( D.@Ved/J " %*3UHfpDJ`Z#Bq>F?oAO#F8Y&A N:%msUp8%h!bA1)'r:&ݗ8&= zGCx8i?  >Bܶm]Eylz5?I"QoHTI}]Ş澲bE4Un%`*%`x _ Wm*O&`몃0}],0}]e,pcUdN2tmU.`J X5/w_|?_@7vP@Wx" WG#}cW뤀oj$`0]l){TsjoPw7vu hƮ~]c~aby]]l\]~DM>kcխMns iHj?]=澾^o / ("(m7:6v g* h{J'Dc_%H@]F}CQЈ$!F)`n;{(ZPRv$0=T> (*ow>DmgMRʖ 涳*TVmgVj hƶ,gKVDp (+o;{ .^LM_}CXm/L@ bc2ֱ,`li].PY@oK&Jm!jM@kcC=ڶL|*}gH5t$$.`]rT,з=tK=L;K^WxPx^@0QP&_@0^Rз=f _1{P~u_p(?kJ/LHqHGs5#}cJs_c%F]}cI9 ƾ|O%hmAsc)&}cscz*}c1J` ~c,}mcc.ǰ]o1@3} d`1_@# ƞ c:&h ~sR F3}sOrL 'IF),aN|m"X& h*[˘qku@,F0[7#վadn[n>m2Rɵs͡wԭqnF[d#׶m;yp.L:*?̷fޡ50B!F0GT #?TNdJZP"Vu .6ޗ6Ð@a~hdl)<2 안` S0m(mΌךbURlF.̛ܛIK%b䡏 R_^?e yhCk F0ӄ(rX$Fhc2dc+FZemv2X:#g18֡ȻнƛulxJ7fK \#b!<6#0*q/?Nmn-ֶŨ-\t_ߞ,AܪЦܑTc0+2u'^6bx.a%?K"mjjb /#/XP{ix_ ѧ7)Z{a;eu;!b+>vLLYl]LkŮ~,޶1Dױϲ:??aȦ"xvL7+eҭ-[k#v\4LWa֛ٝ xynL}YmqhCJ4 MtŎ{ȖԳٝil|XnW(-چ 25a է5M[ajb$ɐF+)KFł *]$hoު:̰I+WV{+v+a_ޝ=}paeTNB]ue%R{/#OPI|#.bA #8Iv!aE)^0W#C[#8I#b}g(#1Z!Cjҫ瀊{:y(z1Ж.2tƞrCRJ@M~Itltm䯄>chla꿊ZȲdgKPd]\ 9Ϡ EI&E([%[2+UOLSe+1MJ%*lƴW2"᲍C3lh!X{GXx 9YI8%g ŚNHl;koeޛ?KЙ&e dv6lzۥQ |vf&}Qe n2Bb@ZC;1ޛ?*P턎OH%%EKZDO);H)G7nPMw_bOڱ5T)$*8}m;f`HHS-qh% e^ %Ѵ>T4(Rq<Z>hɮe|wzf@𷙻^&͐7ÝA^XN=$BlZPlR VݗfNtkڏV^[q.4fQ|JBx۹Chx2v4K˩v\};K>RDZ7v4 yЎz&.Z%ҷ;>_R( 4Ji` X4S`WLcV[q0f@eT Fdkª*7ZyYziFStXE BcFj2*S8׳6y|BMX+d1?⨋ri#XX50c*?2gjͨJdāw|Wy8ʘ*&=k8HddnV-FGq$[px+0d_#MFá(j*jAMZ `)\ gleL^,xZ L-/cP8mHbw̬yA'k3uGoQE(V1Ӌ E SFifuV!b01yNTs##!Ĝ,2~-ei`XA4ghd|iaOub;PUO*FJShjb5zq1_F0FmӸcu3܌@c2F3J.vpY]u.aoK\ -}deJݎBJD=& {ċ +˚ͯN` J``k| @%p0rm6-гBJ`P  ł8Ѕ5 C%{(zSB-qD6)iBJ`Q;(cJQA*Gld$#J>$ p %~)J#2",$1H /HPH^'r-Ez=!)M$=Hq#־7????6~uZc=}X?uZ$r\Iʞ Ҏب{H?"Gh+a!3 ᾚD{o!}޴ I'\I´%2B(rBg%Q.VB~F䏅0,yh媅c7B;'Q~]sB'Q*_$<#CQ% tn)dA$:#sT8j2#[ ѯq,$:j!<&ѩ\<"NItR/$dA$*.D!cPg0l" *IXs)OYn ayLTH?҉(72-#VIT*D -#*mIT+dDTWH;"*#JdVҏg!H'zDBNDyQMp!H'DBQ.dDēh^H;҉(2t"IT/ B@BOD# c^J!㈌$Qߦv)dDTH?ȈV!#P~Dl#2ԯK^%U9 +iG>6d%c 2 YB @y)[I? 5w+GPI\ɕ#PC6+Ac+ǙN=$WҎ|BJq_#Ơy PPI? " v=j<*iG)H%4$8)JPo)2JP'!)ʟJƞP*i{\Z'!z*¨Jd E ' d Eh˒H~VB[%m6B2WBy^% *GlH.X=eb%sO(o,JQk(BYe#4L"h%mo6+BRDBu|͊'T/fE(H_p Hםm  qu4?(oEx(2Y^ IPH}(dn{ǹ}K0~l8ր"<, i[^-O${99ȿ^qNB6+sEa]D&-$R)m"Y [D.IRHfE2"hDұ& =zMCRrӭRmP>="A+88B|<,c^ S]%NpΓJ0@2ߜ@9H![m' _#NZrRͯr;:B,w5]J:82 wY,=Nzy—FA HNfPsrE]}؜@H͢[dP@M 319mo_lNYR'sힱ˖dٶlMۿDBy6'<3pb4A{F,Nh[ڥ]g?9G9Ü/ 8=p<…D,`qq, e٭ 6׏8G4 \ճ#a;` Vɻ%p0nwI[ hpȻZtKaq#2⑥[o(xH>N^[C1#/Q[~ <}$pwόcG(pѰ[6򡋏G̏Lrqn0}eӽ=Ӻ+kr3S~d]_d #Wi[5HvkSDbF=ݚ[饃d=Zb]zs/5J~vk`[NLvEv<݊5vkٽmOv sC݊Bv,!ܟS/q26R&uc?n=Ƚ|dpZ3 Y%»ӭ ݪcմ[4v?͖ 2ӽB}I|.iԗ>=⛢@խJ԰ d`#/.$Ka.,9ĒZ~y0.m],uàz7}$|~Ptۀnbΰ,,J~P.;VO,:yEnUjLORغ/Ȗb u]9pUvrVpADjz(hex5o\)|dƆu'ZT:OZ}ez'9u7~ ``#(=Pn[9e~ce/4AO/,/ĜBkfrU(c|x}"t({I]/7%.@IHubJlm/-2)`hFMBO_# -;%խǷ-lÇ A#{ی>r2`+ҷɾ2BGnN?4Knk; oZG'floCk<}폈ЫgOCQzOZCcv0`lsƠϋ1 [?B/)VDh!^]~Xy}b[;ުM3O3ayVW?фțayV'}~ء7Ϟ>+?z|?"9Rp=~^ z c %In-ޠُti/'UI{ࡑR{nk>1:=peеY H ~Ot{vcVω H,f7:/+tgF]t 7Gs}W(ћ=NYا) Fߢ4N:d,[M{6LI72Pn2:8մɉׁf|UdO '2q}H8>[A~Exr!iXPv{)f> OthQðX={j'0 f2U9NɅ<( -|\I\)t4/ȳ/\հɨ=2㕡|rB3<7MU毥r Ioj9奞3I!77œbB:)x˜hB y}8ɸLP n=5b9'oDѝ=k{^w~Y0 :$GTS󲢥[,HUi#cև̪tYy2ScFIKͬzI֙1I?x&uhפ -*ӬITɇ xۖr/ڥؿ,dr&$y9j-ƏϏE 91h|@ oMjm}c]mwKcuy=5K$.|͌$:94h<; YwOwwwwo???&?8*rJJjO+ϫO;ϻo4?F?X?j?|;UFSlsssNEBA"TJX*H[I%Q&]M (L03HIeH$$ DD!vA"TOp>"<$X@gUy-T5>BثUI_hdE%Q{Y%c (MUm#*k@mu U2VB.XI_h;d `c%}(Qk(߲dV2$GtV2X3Q aj%}|dnSC ɶFѬ[\#-}5B؃\I[#hedS]erݺ mJRDx%sj5/5B^IfZ+֡VBخ_I[ChdPBA%} )BPܶ)HQI_CPWTrۏߡߨ`,D@ LM M*&J!TTx"2-Ҧq*[_ \v#]w+OݙгLpF]>;N5?Oݔ @SFWg`dl1G7Qan51Ybtw k >vGFp!B٬i,#m ?]; @h&0:X!=+;ubyo/ѩDF-'ߞ7)!FF]zQx ?EF8Tލt807r`. ]B8L ˡĿ:Xujjnx1p,<=5t2Bw?ytf3r,:[Va|ho9i5-x\U#m9cұ:v Zm60<Q t>,t7r -`Y#X^hds1B2l©tG ,Dji#yV$H4#EcAL4#$tP/RFM:H+YäRO )#ٕ²[߂ifqeZ)[aFyߕukSEfɲOɻ\kkVW ֗7g0#Gݚ'JHZI 7Wn$?m9e* L?YmQwf"}ӟs$sma~^"ئxQfpPPbSJMFU6 :0͠Iipza~Ly*ŌΫӸ M ~ wݭ*A7WpUl숀aqWNA,X[yPdiR!6DX밠?xaǂId8`Vk ce59թhޢE3OkS[efwP*hiawTɭ٩e9h΃];=5!W b4˄wa#{ F064txB$004/: \IkЕQ[引@BYp%WpNDJ+ɒ@]>!eӕLI^A(.CwI+ %< %t$+b(JC$ MI+rRPR_`0f4E~%qP\*ca`"ؠD$T2Lh!*!SJ$ 4drH2dYE%7 4dyMF%7 ,'W2M$yM4F2Bh.RɈ!0B#J.n"2D3Hr3BJ&$o Tr3BØJf 7 i*b#AYc{c֋E' })W2iQ _ [/_HIKo&$庉%)_$H#2Dr>Z$>I?D܋"y{Z${\H!k IYDry".Sdns$eɵ\${'mnԽE$ܣH>&ҷQC3Er_%){H_ﷲ /QID2ɈD%"F$# HJ$%ID2jF"}5HS$Qm/IE2nf%dNR"zI` G!r~ۤ rVH=smR-H&E%$ItlMm8&9HmRq‰͍$ QHҷx"KAm[GUmfdq&Hp-BPYP E$}:xdnDD,IfFܒmfdN${XȱQH,mD$ PU3z˪*u6)DBoDM'QUQN v!r'Q%颢$-}!s8 : [<mRDB57ҷx"$N uc0xB QH)dlF$QJ!} (ߥn7D55BPdO2ր"ۊ΋la*dEvB%QT!}tU\lJBPdX!s (,H42ր"{ےnGWXKB67͍!L>BPdb!s (1:# kD핅\kDmIYئFBںyDi%c5VҷJ65y[s+kD]5nBQ\X#L.˕5&J:5H]Z+X&z+۬Pw! !EW2Y>B^I_C/]mVmj/m5@%s=ׅJ*kH JuH)DjJR*k}~Gc 'B;R%mRHRؖ7i] dn9PSI *۞!9Q!U2xBҥ$R7UҶI!T%c'*J6)dnbPEVI *[Bm'r5q 97XeY?}wsh;4!MYF6Zf`7}[':9WY`HY,=ou)KYM,``ˮ*벓 ;{zlrp-7`OZ[`o; :"{-[c|Ns>i oC8 '2)SJN 4FFN=}DRVFXNhe(IHyx ,FP ,/=dojqNnv`Jҭc{#xD< C5ջA/E|}yʼn|Ά&{z6r|uXN?}@j՜L:q n:|!ǩxrteK'G#_R'Y'<:'b'<5UOe o?r=N2w<3NpU ̃: ;Qv@f0>$81⭊` l 0:4O~LZ}0L%5ˡ3tG1BJʛ)1 #eQ=d-q32?㇞ ~l p T]vC&ɍm[1,L zI&&wa:2,ߏIG({URF*#5ӝ霎Lzw(의686CYmVH_pr6"?|]:@\dO3}f6Ǝ`"lo $~dG:0nVxhb"݊otq29V_#/wn&ٝb"YdGn4L{M$?2ic@ PCP?`Ef3h9(z4;k?"Pt?"F}^, D-,775r"_Py9]P#:"-ۘ묚g=֛݂X Y| YwrNj )5f@>M/T# Cm}^h@MsT'y#pl CLΦ 'QbF66wx7;S689{Gn.fl$i4f''f=ދlǺ~';BOzL̊~[5 9ս R!}"jUrELN:z |UQ8" ؁lh]ZN/(=P{~(ef[1\(ؘ~l^!'lsuC?-IwfH4ˆ;'p aS^/{9 W .ŠebygaP=N`r¦ŏ\`ce}:#tUTL6c6 hM'4ASG?䤡o'.:8c m8:ިB*a4:k:=xZ'0Saӭz1/ٹr<!L@t;i0AX#0Fq\r$t;N0l~v'6H;euڽ Ɵ hv⋼H +4;d: ɼD2 hgwi?Xx8_g5qrT㞏 L6='hЦg"|8(prѦqR )BЉ8wM 7qB HR4'4 M ivjmE,8i6NmhT8FUT9D 8d,% AB#Dʨ|rZȍ" Z x/pf*̉|.m-sB? -/[}|6= Tp  C3PӁ8ஂɽB u{N@mB\:Iu#tj +Qz_CO]B\+ @Cku@ W^s!υ^>pYP+Vu!/5,|-g+C.e]+|+ \*ĤgB\\ quB\] T 4B\m]  cR{!*Ҽ[Bos#$3h_ {oo'@Q@(H#HD%I&"$D$DVȒ(jKN$$}E&QDhXm7HDIA(;"$փDDDQ `=p /I6)$JIt3WM<]ۤȳd7ę4έIt $:#stM<3{ ٿM  ad"*9} M0%RX U AꥂeT,O%lQ%} 'udn[W0UI e*-CBdlᄲ{0XIۦ-PB2mZ(Y %Q aV2pByBdlr̅0]Iٕ-PZ+i[8|%s '/J67T6d7K8QI 9 am*T2L!,TҶxB%JOTMmP٪*Y%}U2]U*A寒c'T>mʐujYz˲ZmRZ˲mPyM a)*I²u%(mw%c;өDJOd_6 a{@%}'fP& aKC%}'Q'*i`Fc'Q[=*i[Jƶi6T׀"\*2јS\)=@5^BoTI[E-U2ր"ڟ aT%}jU\h*m]5JPDYU2ր": a[%m6uтW*iP_%sj,5JPѱBV׈"Z*+ֈ"Z3 af%cjMVJںyDi%c5uVҷ6J65n[[r+kD5BF\X#hG.-˕5J:5?]:+X&Z+۬Pw!l!EW2YB^I_Ch/mVmj/-5@%s=ׅ*kHJuH&DRJRk}~Gc 'B6R%mRHRؖ7 ] !dn9PSI *۞!1Q!U2xB$6UҶI!T%c'$*ZJ6)dnaP>VI *[IQ" YjWÚwT2]+aRIfG*[D!BnR" B'J6+dR" BJ6+!S" Y BhhD70!ٳix[L{Bk՗#8Cf4 w8Urߞ|u9^ tO2,zu]X.6ovB9,T[kޞ:i+z[|yƷ}ta{nT,}Tq5~{.u6p~V6v.=iA뜒DkƳXؿ[yI~ #bc|^>3󺩹n8%U+Ѭi!v}Հ伻LxX#?] MQVU9G^:0 'ǜzbw~9mhÐ|&JV~|n,Se%(=Oa8v ^]RonWhWi{o9j=B%8ѤcI;<xtiIG ?:&+RV|1,%:*6hNg3C<"~Y&j5JGVѥ 5ޝ:6:-X7Vn7Fto&d_ߤ~@f:r}0`ݒtaM2Z_;`1SNPx[1K ͻr4F%i+cG.f=*+d)JJLXeyQs0_=A.B >kT!A2V%͙dï4w%7$H՜FZ*j@ȵ苋T0cOIĎ:cNY8$Vf5ROXm|S^f @r9 ::7RM¯"yP0M4D*tQŧ˙ RLN:k aI}*Ai(ɍR.M,>6_V\TaT,Dʌ$kJnF֫,!ދ;[ahKrJZذI#tAviA,~~DR#"rTh>%*AO밨k c8l|P@6m{s FgZE6=^6`Lo xRu=UJoa X7?bb <—bӀXsz x!Tz!3B9 멧$A߅7#Ao|n4TVHG&#U_]q&Sh1Dm2ߪ 6%ǻh惶#O?Fd!'xۈ`UjG2r.=FĶE(#a U Y~4lŊSCk'Z']_v 31;j3<F؉jѫZjKρCuc7e#lj5;5ZH͹Y{ 7-^a#P?eeqsl$K\N.N>=Ж%UQzRKl62H=ۑn#|.f"p#b®ŶH_g/կ5ݲn {7;UF*yKaIcz,&EFź-&?Q#x (B$)Cp^xg,/-@agCiW:T"݅yk6g5FŷrҊ$y9Ԝg.:+Pbw40#ƼN*iiNCL+ Cݎqw,gH!KÔE:"#>=J|Y2*,c~/=F :ܑ2p.=V PuS.#ρj 4C2ȁO mˢ2! ~j26s NKhҕZ8ۮ%;;!twFHuŻKNp ? ¯HzB#czPhVi$}„JN4B3E-6I2`[NIAO -^\CRC^LOs @Ez>FCIW#.=:][k\zkPk0Dk( ŵBd79GiJ=Id#K$]BnJɛ& 8P%m!"?FuJCmTV6EJ#6Q qW#JEJs#݊b=Jj?9yJn2٤7P 3\褲c7UtgOԱ%?^yd `#A/5X49vD =, /RX5c ?c~m:&X[\@<9/XS FX`G8DAEXVx9JD_BNXxrcX{.`@.^[_{sۉ4\#9rXGFűu|1)r$bt1K$=6;ޗqӹ>OU)ޟpEe'!XTL'ڵ2 t1%\/uQ0ǥc ڔŌbs\Wqիǚ2`HAhs\1,09~ GK `鎑IYEck<_,'%'5˔W2.v·/4p%;I> [-aN>V\<h}I%(WZp 0\~,&Ҝw- zcH$7ڏǦ^n{&7]c O] VHpD 6 )cfH,z Z%Jjư MH N ח˳q((Ԑy(`]<#N eY*[8V^v /OXΗ x-<0ˮp :(Xw&lLEc֕H|Gy֛FIW)}])溲+O+!UJk%`櫀ʟ%`^x% 뾮t_e W^N&`J0}]̷0}]YL֫pcוaN$tmו.` 2/W_r0w/j XF(+ګUںvRX7v_BS@_Wxy ƮbQ֓ WU^{նU*dƮ2[mU+`,|}c^v$lY@_w? eW 5ucW6zRY9*ucW廒'`n*0ˮ} Ʈ(- X6vv0c8֐NumWEdЗ=; ! AR@_vC)`.;{t$`Kc٣m&;k h˕ƲGOЗ+Degl[*/;{4?0=Z˪ѫ\euƲGgY6֫zreg河k/lXlb]⫗1 hM%,_-Q4{I ek|-`,[{&`mmڣQ\|6ctuꨏ^wu#'`r}ۣ빀^wN'`ku}ۣA;{ huW'xc]O-eor"l//{{40^_@_p-ڏ_7(GC򸭗]ʈƺ0I_n쒃$bn쒝0ץ]Է0֍]*.9Nc%I@OmR0׍] )׍]R溱K0Kz%+`W_BԚֵ]ƺKz+`4z (+7Āug0U5v1ugH^}I-PY@_Wxi: kX'ehG ՗^} YPZXW1CT[@[wvIs  Bq[vGsR 'H:0.tj .evz%N@xc%#O0Czƺğ[ 3ug^wP_@_wv0%\?cceiP@[#:#0֭] e4$Cs]܏zvn2H@ƺ%#4 huGc#|?{HnAB6%d"f' X~rj9ĩ{fS:81}x'~?' !p88yS9r8|WI/=xٝ:3 tn_ٮ22I2@ƙfpޣZ"9i6ɨ6-Nd缽?"gN,qu ~K`5:* ?,'iN DŪˢ˦?VU?WYu.;ĒKw8d]jg"\WE>8N{e%v;GdgMﴒ/Ԉ?rQvͭ jA rzny8%GV]^} ~B{!H(3^8l}T .u%0Em2c%@ #%cyp`FMXJ;>&}uAG[ ;_A?_5^[R$F;*0=3],ުOGdVSGFj:xL|f9iÓG%.>(8|Q (րO1yH9!9Yȓ#>".>2eC%X չ#lIͳ24A#Rίq<+*4=#}Ӽ^zyƻH{گtc{/~=y䦂N<ɉ}pux4<$zl[RKyYEО G,W$͋3}^>^Lg'm[yw"Nk Nݙx?W;yKA/(k ⥄W68bÂD}/,AY f9+ xW'AߝK,Nn,Å $OsV@N䀾+(%Ujl,H5JPZWL'~愮/(9B*&' uNQ9ծ "% }fc5bYI˃m♼,NC/^-7&ƯQ[!(uTбԪkTc}e@0͂iXYU__XQX:*.`L?}uĩ5jp.n_qX fI=7}ua]}-,H/=҂mc"Em 57-L[3['@82LFz<̄8=?BĶ !r"2<|΃{CA/3as y#i WNR@G$ge?GhGƮ.?6{w7P}ytdBj6C65?լz!OLPTEr!L]u~: A;`RVB?BnCi iCE?BʶF?CG1ڳHD\C)H|jӏ֡g,!W@j!3_c8ͦp#Qв {./C0eMB"''X ߸Npq]Z nTtUї}u겅ܬo:#B&ˆnN0G)3[,IO ]֎$ss$iT91 Y˒гc(3tR[L-&$gtrV`!Q$v/nEb+lF\|z! [<[`JC)i;%2dY5RYjZ|:(bd+Ѭ&.~_eW[ &ԹY7K.ht|R8K/A!I^ȅdO Kѕ@A䨞-@m=8H /HP@$ljHH^rDڒ/sD$,r-A 9 RH}r=$IE~%)H"2C$2%KIYDrI$)˦F]~I-˸H.$e;iܨۊHn="=-Losn"]JR]oeiww $QDFB(P LmnD}p*2$PQHG!m:vRfF`NSH≨2x"FITZ*o[G [<$*ҷeB638$ xWȱQ,,mD P2z˪򙀵6)NX [H(2Ie$*2x"JIT.$*w23]̓DiQ/dnGUP&Et'$QC!}'"% iPEc'ضQ;;*i[JƶiӤvT׀"Z*1чS\h)-?5֡B^TI[E)U2ր"ڝ aKT%}ZU\h*m\5vJPD[YuU2ր": a[%m6[tW*iP_%sj,̈́5JP?V׈"(+ֈ"1 af%cLJںyDi%c5UVҷJ65][;p+kD5B5\X#>.ʕ5FJ:5-]+X&+۬Pcw!l!E4W2YfBذ^I_Ch|/mVɾmj/ 5a@%s=ׅ*kHbJuHD‰JRk}~Gc 'B'R$mRHRؖ7 [ !dn8PSI i~*۞aN *[#IQ_I4I2"Q{J͊taI2"\K͊4I2" hHIM0I a$LsH~6FҬ߇ {/??/w'6LwǿwP?BG ߖng????}dsq'Qyi=GS qOc7k<0gz6*-Jσ!ɿ:=|볂䳾H1$|%]C͞ LqGx`֟k)IyUY$ E wM؞7a㴇74cs;swRּ,/o_ Kt~O9 9o}R|VYIY,=]ɇqy}#v}{^ o7"9%H,$꼇ʳ䳾s/h$Y֒}/ м,EǓշ&^Ü*}0/ks9;j9"b0;lZ1 N%V,,ejgY=ʳDʳ_\i~FiBuW 3] &o߲msk;XH /糒g]Tgg5Q_l׿V]lѾY[&c[ ܐ~-TwkrY@8 P|'gc- RZKY} YilF{gk=~bcZ{?-;r\FOLT.,$=YAY-JRun-R$N˓{vσ3Ż;V9Hz~ 4Y~l'Ib$<1O}VKjyJ{>+&{zpyRf}V<=ˎN ߅:Lݬzws_w3ܖAnд[p~8^8~|ķu.FGVz"O|<|Ϲ9^^7w;e53. tN|6߾W\r[]p[}_ |OW6nu}91?ub4zoQtX| /n.hLC/ċW#>nkh.NoRƨ&=ݪާÖnGu~ 'F]#jQ& +l(ƥ0ShnГH IE'U{7A+yf$>ƻDS~wGӼ1rv4&O[OX,un \+c~]n10&1?`ukj)a5._asR̎Q$p͋U:ޫU&è+fD|ĨSs$uQAϙ~X5KWqe/L4׋Qƫ6 j;L|q!бsYȾ oe+?®6w,sůC21kۮ}4}e~Kuk]3KKZ_ra̱4?Yq⼲Fo3@Nw1SѝOhp{\y0˿ǗVt Y~jt_$_%&L ԗB{y`(WJxaøM+ ͦWŞk}ýL!1qOd>|}"'J5Āk%iuט]Ŗn׋bܘ]WB2^pcnRHByĈgMPqx^C>x$םmΫ>#Ƀ1W|a2Aۑ֙͌ .?$ݺυ1>5s p]'OfCݣ'.k@<7X]ωTɿzVy/.lTϒ 彛@ky=e)}(W(o`oi^ 6.s?pe0}ۭ3A9,Ɖ!0fpgc'6<hs*A1:K_ɲ+GW~:s.!,rPpo~ '&x?-^ WdzVJ,q*cҡ'~ˉo}]vtaѹyw9]gE39O~>.Ud{`ͻq_,°͡=11 Ű;[f?އǤY)U"VK~I>/>Ǽo<^B01~v>fYYdخ?Izs.[/|ynk|=D–caچnϺpGǰ}=e)|O2Gnvf~Ln<;cjQ oβj߰-*'ՓϹ0~{5ns|a7( ;ul+ܭcc2ߠU.u 99fW奄ݤ'/'f e^)G0sj>O8 cV6w11Ko=k5FM.^cM|qLp}؁ɑՔysfHӢ\xS'|Kɜ3"4C[m/Bwi*6g v5sp "hk^Ő놏^hݦW;_JV /kw ~b^jmoW/J ;S|@y< yHPQ*]t+sg~n$(.NR7~JL:~ܪdrz+c~Y-\x'ؒQ&Q9p=}_aE2IC9˚ٓD&,`54A?h:O|O~ve LrҺݡIԲlO;?mbŔ4=)Jz=fuϸ0Q*1Kcd3ybi,~q#^rPԒxqo/SfRYP"ƞ{ >dHٵ<9Y>=tQ^ rO/{!ۏxهS]}8|.4d|:~kٯn19thqњF|:w?ЧЎ.kpsR[MKd˄sH^u&6ܚfeSvwM[q1O.gڴ9|=GsRyz?>G4, JU_S6uox&4ZS}Kk#@axZbFQtM9$g}w7>|~osIKoc>w-c7s`+"}u3ru7昫W~U_T;Tϧ?kcXg鈛bpw ʘudWTW1^qa1/cK~okO̘ O;N6S^=Rqz6:uTzދ)GG]SmN;"c^DŽc8TtIc#tc;pMsQּnY9.5GS4 Ukޏ.V|t}<ʝ{u~7k6=ruçqGV:y珵Lkm(I\$m:rv{umLmYgJ^e!'֭i)sީA6#{/ VN2[}瀾(q|{>snygj߹еn:v:#WCeG9uK+s11>esC)ܯչO(o~Wpȫ%f窇Gs>ys^D1~^E#Agϱ.Kť#}ϲs3J>ws/K٠~.=E>;^eھhg{iSyGePoywܡҢN8 D1K|;SGG7奴stV 6υ=/ҺT~k1/޵,~ͭy/QզŎ1~>=to_y]SG,9rctu{OΧ3fՏ~ =Zu]"v\v̪O_}KU+"§}p-)cT;|Y㋽poq׍C[dMqR9 Zk@t1~>4zΡ?ƣ"3Y*S~>TՌ5Nײņ>(#bl>Ng;0~:~$zY)cZW! fjN0[qmVᾋfϩA`:rw~GpvuDԑOBD?s2o>[͎2_G~nHO1wuPwrfϧfzJ=ⳙ?1K{wQ#"(CXzYs3m֖W@߹=c|36dfjJ{xnw*qБΠg:❕=u"cVl֨X]QG2V_gB.x^=:gmϦd~p?YeluSW|Ux,֪Q;0j֫׌Cэ~p*\*r2_\s7jKyckИ=X,puWE{!zw\uuO {2 ]kgϣA]GV={Nܪ:[[P,d#G/\ON.jz<dѕ 2fO{OgϦNyVb;zrW߫+]ԯsW.~8'y?S ƤzяYh؏Ysr*Ҭy<լQǢsv槗P<>1qȷ)g {QZm)'ٛcVt٧Qٔ gϣCGn-&3w8ulzۭ{.=k|8v׹c8櫯|Їqeo,y,QZt.' c{Ů|cʜe?Jq'";1?،κ )BU. q&⑟uSBt܀UHEWy\폮ʍFWwP*^YtU46Bv4UHB' 2 fͫQ扦0ë5z*īSGīx^:TBʀiW}W*RS£ں]azcEp☧uz[9Q'M2 R4`W2zlWy*MlP΋2N/U.'bx)),o UrWVADigSh%WQJ @q'&UVن*Ze-L `Vqr< 'BZ%kW۔} *Lx 'nmHWq(g]hU*Sab/DW!]j*WqmUnqݕ*~WWO(*DW ԃjj]%(U9@Wqd;i ^E!*6W"\տxI; QttW^Rq w}"Z#^(ʫOR8:q1W~U'UE`l#*{Uϫ5O_:FeleVvfVlfes)ZYq*̊fţ 3+.Y,@҄Y02܂fVf0+bV񇘕Nbocf 700fV fũaVPg @{$!VhUd@+N[ 2\4)h'VLYZq ^C+cʡ5iA+]fzIBK>R^Z VQ#h<~t@+^`0rB!y< ܪFZVPV?lhOZh%\VA+Y(+N< kh⁴퀴t% 6VDGZ9 eie'VNGZAZ}qRgKii~nlIZyR/ja(b|rCZlj RLpL3¤j'VyAtVBD=MT9Q+Ǧ1*  > B) RgߣX+X+RTDZYfGVꔋQQ>q4RS+NږP+{ㅷ*JSbV)AZy1O?K+ƒV`k)!F@+Xx(00ʬ ejej62A2rmFIL\[Lθl}l2x[xlVz7[[aBal $Jί}7cll#05 lqWZ\Z)[mmFjk+8?  Ch+#Vp[qcx@&`+ock؋%-Ԋ#V0~j%j26LVe*/YñVxbPԌB]>顈>V^3X+akzX+Ȉm< akMq#զVztX+:VkE"ZDzX+YX+旱VHVcl=[+ˑ˭lkLgZa6@֊V8* !ڊ##V~ܻVEuVfp9 W \E8yJi5ggi%d; W?ŕe7 +5Q ,%"D:jq%9W W`WXoA\y^WO ĕ'" ӋB8%J+ίh3+N cfWva֞pW]9-we]Y&+x+* ҫ+9  aT6+^)Pvxԕ Y1"z u%v&PWfWxĹ]aBZh YЕKЕͷ02/Mylti AWP@WPAW]^]aItvJ+@W6+AWWlu+0c|e{y $|Y+"¯Y+Tb¯Di%!U,V/[܆ ⯜n6+rWs+-+g_2{0(TDJHW0Uï18~-CwW> } 6W܆.,6B2 5XKXn,FXbSWx|x|%egWDN'}e 0$_}W8Wb_9'aD H[kLWU⯜+ަ+m,_^eÈ` bW`*W⯘_Z_ͯ<+_|#|.𕳷ZFW}gn |E9Wja܌2ڃ0_0_ ұ+u !ݦl3\ /HOeTp V8]k ps(+W|;PI?#'} +0W<^9-XRى, '^9? ,GS#fy2 W>"8)$NH^},x ܕ ]e]>Z쮜1TFxT~J" C^RUr8+JxŹ8W0!mxwDĬ^W+e^Yz*+K|x%gsPxJD^q}j.+0WllX%WW:+sNț)weK WԎ{'Z"y%}yJ_蕘#WW^a c+Hl;|-W-gug0+u)v BE5 Q+0?kW7o|$\b|5r(e '~a'݅_M/_q]8 Wp`_ќve2c]Wl"ʳtuJHe3_a.~erD_aB~>~ CWfXOP&'EI02,+*lEi_I2퐿[%Y_ 4  b,tbXZ#6,mT?uKW%lFӁXb.` B,?R,N6 &9,ޥ2 u%ߗzl 3b= 6 c/ R,dTck`a(22Xh~!E (6X?K~ z, `1XE?E˒ū,§r64;?KM,I1X(T`a,6XCPwQ|xm,}R X,`veA( yc,.F`!8|rDt; K* X`yOe #l[[D`[9H?2\XF`y>%I`踏(H %,CAx\ ,Ⱥ#ξ`9X,号81EXc>a,\2,31k!K<&Xv.[,`9RLlX&X`9\I,0 ,DO,Ov`1 b' s-!X %dB,sڂw#) y,.`bXő,A`Q@Wؚ:MW\o_q_|]ŘW^>+lh%fX),X_q47rhu_9Pri^J S?KLJX L ,˓GD`} X't#X؎W _IB["W`W(̍\u=֞zh,z X.o 2mXn$hy(j,X~B% xY'E ,^l,; `φ`peO &XEkE<)`&X!/Ga `}ãiXaڂa| ҡOea-G(OL߭ԕ~ix?),hb(,N >0,^6R3_ b ]bX$ r\fX&"Eh&0,@0,!t̰800, d. uG, 7,֗)򩟝pԏ}gey^`X0X ,*+SQ, ,4 -GH#0A`q[|c%ojc,g^#5X ]J,Ç`YeI,B? e,A`ar+u5 ["32T+v& ,^4RF8k- XzF?\ w ԛ`#`Xn_IX"f`zL |%3 ,u_ hei <dz|,˹㳿V4 '@_ 4E W,O<"+0HSb.GJ6D'RirĻ_ɦW:/[D,E+gW΋-(Lt{8g^Jh(+ip+ި4 & Cl͠aCk:WEp6 X]! "+ץ\g. i+ xAWo#R#+65Btڱ[P9 J= u}]A,]9+aPg󅍮O ]Y/3+eҠ+u  X.C;+Р+AW@HAWǰՕ9%<=+ޅvQWZԕy@Wt%L+]AWx+Jw+T8cx* !2+谘+X6W\&asEi-!W#Cx돐+ა+/GD ?G W. P+!WX \ rhJx+ LE$> K'+!W C +bp5AByW!SmAfp2Q% ,' 2 +,~8 \A~WWX]i[Yxy+'7qg[ɵennE)V@VPV\7mPbMVze_rH[VNh+[9tn­81 O'0=co4+\;\\y\ ʲ peQWepGW⇀+hpp%0Wpp/+믰qOoq%E\ټg9J+Ī WEi+u!?tg??dor3 +% <IB2ȭW,C\a B%H\aш+ޜqŝ*?,\A+B + #PGGW؂qś,7+)Wĕ(WU.] ,5+xvnqKWq%S\^[ cn VTxRdBi; h+?l? > ٱWN\l+Ux+8o*v\p/+uq 1ZF+\qXW\ۈ=W\lu+,Dsgqn\ p%fP+ ,jVȻ\aWru$+㲵R~DJ!Wer򮂐+fd2r)|r1R+݅)#D\aq#A,qBU y+]D\M\9 T+7G\rҪ\Ӣ6f1W<[ tu:QWn,uevO+d\Yvg\y+ERs q%6W`dTWW;CvG}efP+ \q^ J8++nW(q}SB\al BZBx.re`\aOW)1W6áz*WvEݕ(vWL]bwZwe4]pW6hvW5]aw® q)bW_u9+U&rbx_+RWBӇ]vٕ֩կ2*+ܻͮOsqW@qWNTrWwr]u)^!ryuv+^ ^~z+m2BPo r‡^ζW_ℏ+@[ `rᯀt0_ -MXWEC'+W>n~%xp_}_ul~IW/ïʆ_A1b}B+՛2r_991+X%_yJ~E@%WW-/HI[j VRꝙ:l_0p+FͯJ+6yYqʹ늃_9,<_JZ¼CYJ̯g¯, 7J+`WR3a}QW~ ۚ6W|eE_1Qa]TätRP F +D炯_|0GY+T+/ 靕TT}rhJa}}iB5b%0W+~‰_l=D'_Q:¯ Ř_9r0+].3EiF=TPaKWP W ܏<+֣M2A^q \+!&rE^ь O,+u,)r%WLh> aefr+t+N[TDlW+Pxs+A^Yf+8lyF+A`^ 1  $q9N.C\[ {蕅UQ:4h^} ʬ%a]F|WyŕّWBW,0C+ðġD^%jyVJo}X^^1B !w\W`Wy !Wlt^q x x} ,J+~+ +\pW6FpW~55kݕZɳ+1`rWs+b]_dA+JNoF̮8ʳ@OlNZ\J/kɿ`+Z8`+ c+3mo?A[z,_am+VmV(Cr’@߿xU'ҁE[E[ bΗBlߎBmh+^0 #'E < ɠV~_j\Eah+Jl]V<㴶GI̒BFPVl>[5<2,q+K5*}0 .[y`bͭ\RPB0ڊWWmp+[w񊸕[? RʶV[ⅈh+Ȣw6#VV$BQʜK[q_:VTX[HomzB ڊW(@[mڊ_; IVq4RmV:ѪOgep+{_q+6Ngl_`n;g[q'܊fVTտN0BVmOi08h+'Lf;lOs+d)E[ٽ)Ph+J~:_m!oz?DhrkSp+Nz[lo:}i[oŻ[ϔRinZxp) [íMV[h;܊̭tGHD~$lFP:[9/6$VfAyPS0%B!Sk+xVڄogm2؊/Zq5# eN%k%VAf_.@~03,*OCmУ `+qԶfS6E*hc+wleLP U ؐ1DZZYYW."PrXZarEck212St}? yDJRJ8jeCD,GLBh}J=DiZ9Bh$;K+Q" 5H+H8) 'ҊEZx-VnaסJZyҊ R"܆%t t*_nVKV[h+ VX[gC[qflG)PnQGC\x+f x} 0&\6+7 d+V0W bx+=͕Zjs%;\\!dœ'抋W\I 7tř8AWP@W?Cʊ4XaVطh2rfZ!h༠e'Њk9+Xpr{k pVe 5˕rqVg%AǼ+*Ί8+3YhYY*[bb3?)V;ΊBLaVfkaVoZZE[+&b2֊cV\iBhjeM(5Ҋsb ieH+s0oliT*%.PBᨰ?,f %VIdˆm|a+˾WbbpC,\;[\>ӈ+S#0@\1aq1Y$x#B}+KqJ+T\YRC>7&W< yPGBI\c+bf0WA:`p\Ɖ1WbbT\ZJO5~r\qv1W|*wv\` m<$cs{sN\s-Vbx EP+vsuy6W؛s?+5Ƽ.J?o+`0Wlc*抃)1W f\ara\csw+K4Mȕ4ɕGw\77ɕޡqWeBv\a+H1W\È]bl?-sI- uy++EeTp?S1W+\z+Fcx˜+ WȕLE,+Q\ re~5̵\s_ms 抷֋roz U+ ̕(`s%0W@ ++6W5+D+c6ԁ\ن2Wn~sś\?u+L00(]BHЕP l.]YR+Ճx۾]y%\=rѢ6W`ܻ  B`\l8DosaO++͕k!^EYWxЗ!x+F\_Wv?WX\B\VbqūW'X\ɥ%qxq   ʈ+LN$54\9Wэiĕ%-H\aqe"qX+w5 W{-hA%ʮT?VytJWiq%)W(rmrE WPvf7D\9@E 0\9_4lb/q=ʵ[1Bi+I$\q%?8ۗ[JMXř\a-pGRˠ_ipe3peBpp%*aOV?c:"p+ Wq oLÞV\[._O 0([A[a#o%B[IP&V1݁M#87sRB8r J7;KWNDWX\Y ++H2 %,. vB ,rFd F+c+̅1W\y5WAp@ +rœc+O~ȕ&WA\9-\ q%j仈+hW\q&;i&r++hWV|R%F\ -cq%jFWqQ+ a;ĕg\ }OS"x} qU$dUfWkr(Yȕ}4 0 5+SJbrśT\a൚+׿ӻ+eހxetv? hte) rl`*3hdd+,;<Ӟo*U_iӑE]+AV1 W]1&C3!̮]鸆^"wȸ+aW 8 hu%[E]zD]qhtu(AW樢]QQ̕*C1W0YhueŮGnc{NלjvkϞ۲O,d=+/Ȗ{J(< d]flfm*|hͶ5Yٖ{Vۋie|ͶawUol=G0b(mkl,eFOsYnfm爪 O?VcB}C-s%j[9ּ.ksDnK}=v[ KiidrϪo+-giNq-,~ufwڜC*.&5E-tJܖ{m]>h-oZmpj-j=g 6Z*

3Ce@[GcJl鑋 שбɖ{}X&[dtR?:L,+d l&[q-}zediB}!R׫] в鳆6[5 2cm~KdcniB[nXjKi-KCR#-5K-N켌/KmV[0F7ؖ 5RC<Ͷُ5-՟~zj{]Zb[f7˦ٖ`AI-#V*\V[r5S5'y`(zŶ;T򞗲v[a$j: s05([oh^ [~Cr= B&ZnY6[7gܲ $T?&{z&\iݖhH^qW}Գc-u`i9 OSЅ.V{?>MPHG-ˠ_dKhleĉl-sI@M-K/垏!&[I"ɖR^AWl-u?-tQn=ƂH 8,C[-W nhWpKEt56RW-՝i{/ TwC-%,Q)-=ŶUn-ltLlK%m{nU[/,\^7R[,]іm}Ve雟F[xuЖKMiAmYB[H$B[ЖjtV[iF7~G7Rmn֥PyKie m]-esh)o[$$cehR[u: eydKmB_Gj|gեl)ZjK]6Zm.ʢՖeo%3Z-KՖ7ymF[5zw4On"[ e-d2%R\;/>jɖzu:v_bK}l,̦:Ml駾竦{l.ec.[j۟-R-ױnnXTM-_ i|._î 9+5I^q 3R"]xZj֟,e>ŵ,g%̋D}V{-Ջngy#̉y-h@GZ?I+g}kkK|P\KvZjuYٺwk-:>ċkY"wZ~}32R'eFo'N.|TH߶Ȗ5@ْǟȖ5WoTL$ 0lVE-5rfK}.c͖ 6m_G-uu:TT3#xr[hKJG/.D͖%B)eQl3,eB}i:Mq-Ʒ 2[i,UuYW- IlYO[@L-ʗܗ?Ж|Z3[:γKhK5F[*<~믆7 mt ߸Ֆ>vE|\ڲV[\juR[ԖڲD,ak-՗I3i垹ʶjf[;?:m+d[ Kj7ng^FbBb[j1^ٖg5Rnf[Rcy/'F ʶ,-5#V宕b[*EM-KVn˭eܖԋd1tN3ؖM~=,4RQG-KlCr[nXL-~[Vr=ܲZHwnQrKq$ ;nY&[ۼjs6Kn5`Të׉( P1 -smSE- ;CbPZn#eh1[na҄i-RtKbYNE?"Mdk%# -7l?tKTEdStK.Hƹ-9Vb/:):30ru!:m$l3CձGn,,ni9[n~g-[ꮲRv]9:vvK]o/vK[2̱imdvK]$^t{gZKfI-DXoI.a}_zs[14޲^lԕ+z &[glep]J{iZӑ#͞5[oIzno{c,O5SdjgQj4ޒE -Kޢ-痁xK[cnO}V-o4xK4[mޒzKaqc}ZoY:-˥,%# -TAxKźŜ4޲<2d}zK&[i]xC|Y`j%[c7-ޒ'G?~Rrxr{oak%cT-ub?xl J-$oY4޲[wAjuY[~QxK[[ T +8$`n[z`jyѥoxKҚd}xKN-Y_5rϴ2Q;$xK[xK=?}mw6R.񖌒DIc-utFoż/KZ۰ e|F$@xKtY˭5-"o;@D-u-Kh%+[.NxK&/[kPxK<oM돾_1[j:#:$@n향KROŃ]&%+[2k'6Sݧnֿ`3MOVnIt@ݲz?Ed#e$$ fFuennYF[溔XAJ$-m^GwGNK-IT1Rn[j_tK?a.]=}:2Gne'{e-\MtTTsi]薄([V9oi5ߥ(!)EtKghO2=nW+{7mLQ >%ފ]tK܏nYw[3mSnK/C7oZؔUTl25ےU-Y0Rv ԽԖ垗ڒXՖ]Ֆ:"Ԗ$[mC#CF˩ri-R+2;Zm›ՖEHmIhjKGӥ\rqN5D`Z:G˫sH$lY:(-=ϓrjuʩ2/gg+k1ײ̏ĵd\KItD%YaRB}%Y,%Te%Y[IlYF%[ObK=m*dbK=1k}Sߎz uiXL-uHZ]"kYZlf\K ͵1lȒk^c}X6͵d\z5גT&s-Ih4Rgk^_9^CdTv,/kIʪ[?x-ג?{-Q#b(b ײt>c]RUb>U Zjjt!y-KhG^KCkI\ OZ)׿Z^L'$#rm\Ku˟p%ԃka\KmkY&sZkNɻWCn.5a-%VUٔSHoaZf3:$Xj]YZ=R=gZJV˲`[-YՒ%'TZjD"/jzI-,FTK;jYͫෛa7-eyDKjIF*dy_*]iZ4%,AUI-5o`S8PK1B}R g-;-*%)e jY&Zz¥#oYTjQw 9~JԲ$xL$e i"eͭ,")-5`?Dl,!-)0rSj~d]}ҒUx#-IZiITH2Ҳ^"CuWYi1[̡}#-PHKާ#Jmm iOQۍ$HPRIU6zPҢT -U*FB|n -Y@R[̈$JgCjZzBKƓ @Mѳha-JX@KOdj[Z**kfonaʱ;,K>K2|K+5<ڑϒ\:,K>rgI}eB>[2dlegYZjlyZxQE<ҹ8>-TOY%BK} ؝z:%%2 -Ցɡie'hI$hYVh!AD2Ѳ+ dedYY̦# ^Y =?J++Nl(+ ̭T^G ojeuW@颬XYneꡬ^1jS++7,eDCì<y},3+ 8+Z!l Zi{"Pki%wRi;AZV,zBZi%*OVUAZq}_V!⥵EZPǾ^c+;Gc+IO[!JэTAphxZ kšX+b;V\kj% lP+Z|m[U\6li%BrV/heN"g7l@+^ZE(Ί8+ķgpYJ{ 8+ᬐlf%ufVR=2 *)+ZYYn)+,FỴXY=n++˼ì@$YUC )h<`yXb1綕tm++)۳fe=KK3+!fVg'YI"5ٱ2+j9+$%%+%Zw=-,xyu0N$ uIVf[jЦV-Ԋ7S+RyJ ZylZarܾ&d#ke93Vw /Zy\$FJjV^?V_rZmV$ZmkV `+FD rVlC[jIc+:H,`j%"Vu+rjńy| j%w< gVkAYZ7ݮB]9V*CbaBZN--dSTaB}AZcj%eVXZ=.E  YS+ZZ6j%*V荡Vv~+jI 萵iO6M UX+c,؊{`+E[[[JNJub^@\=)ySqArh%p_W"\CT;WX\IpIWrW \ݽ_bt+f\9t6,ʑR ~|[IE<ފVV2q[9 NV\o;<#[ hop (  C0ĕmc+\ pŹ#W~WLaWrxxqmlq%'I,6JJ^*'"q+q)X\9]*B,r%% r!WH\#:&WLPcr%+) Pז\r0^mrzMȕ>1Wjt_oW \#+gXLZvW<дR%yD(kWV*̭ܔkk+k+nι@[IlmeҔ2>3VjW[almeV )h+.EJ<#k+VBF[cTr1BlVkC[J}ćV>v*VL­<'׳Ss+íŌ[AyUtrf=y+BX+ape0\q\!CpeA xpWhR+VV V,БVj 3ss{[ɷWĭl-B [OjIx+6s+7 np+LD~$ Yx+[YHy+[a]( ~Yq+8S/3mm% &8vhmeFta[[D[m'ܣKhl%&`+@kV*[9._VaHJc+5CБ`x*+jR5RPvm @e\!oo%{+5۴o~[IJ{+Ad0J y+WV! V[IJVHZn WFh!VJeg\OݞWBm8ip%$\!QjW|2|\y@pEW\aJ 12,"l"p-x+!Kފ<{+"oL[D[I7W% iVN,) q1k+*m%? V<[]~כ9ͭ­BytaC }E֊rakemøرVȃ[ Frǻ,ꨱ[yF[ێ| k3֊Vbxi[+sq诚kZ.keW"VVXZ  TP(dV\ikP+jE{`A$j솮? gH+K$,gX ;ɅZDjdyhjŻ,@8*RE"RDP+l04Vp_,C}6ubB1Ҋ%EZ1M䈤2ni6l RVR@BJ4V0nYq̬/_ʯ0+8"Bm3+fV?B=J3dfe{QYY #rYYy7(ʊgu(+ZVVR.felPcee5Uxe%R++ Oۏ3+^ɋLUYW X O&OgAVrGY1 @+23pYw,JK#+ػAVO?2 ꃔX-RV*bUVƏPV€ O AVd_dܡYO ed%*'p6VrEV"xdie=bi HH+D|iŽ@O-$J#lE)?`+sV#l[ NX+Z~ak%V"ZIʆ"khVظ2: %V#V- /؊VX58 ޑ|q \EY V |V#VܴJBuVسoex+j.ފVBψ[[FCs+lFUJq+I6BU<0s+xV0yXrc+$*B,wm@jmV2k+uHNim%h+A[q] 94M2/KPVBnn([V 4 aVO)s+coNx+V쭰MoEW6+A!q&EV: S&+Tԍe(eIG7ѕѕAW]:芣1VW2~ՕEnͮPrl~e] fWӷٕsdvY * d&GJwfWp®8vgmk Bݕټ{vW20sxSwp|+zaW-]Qpj=xwᕰWɵ„yU-++ x<BA+<2aK˸Ls~xxx ] v0R T>WA?L장^Œ ze*oz%pW'^a:+bD(ڼ+#>J,+WM2n=Wn.FJb+^a{V%+/\$+G2 sAqWο pW Sr+md.UŠ8 ۆ^# i(Wc+JT+ӆW~{uEЕcjяlt%[X]9wMମ,XP+NE]Kb.u l_y+TAWv+\+dBue9)RWsUN`Y;wƕ2۰+$a Jޭd_+)TRW+C~' u۾]W'iE\% G\Y + ,'q%{$J\I"ʠWE\?H ~~Wp{\I9 Z\Ypȕˢɕ.5kM+x擉{ĕzoG\ lepĕ\[\{W /Jg^,J}D\ͿM`БuMZ\bTp{+qI⭈[aU ov!dgy+.4{+VSVREaoeUZ%{++8%}j,y+, ʈ'Vnͭ0[!⹀+8W npmMW6_W\~qŵtWi[\Y+)@\OCrݔrE ԫObqJQb]p+WVV ,_+b\CJ:w'cx,̕ra@yWЕI<]t/1y),}sЕY׿a+dJ67lnI.te=]PO:̔?*迧# ڐЕxgӎb7 S0A]!B|QW4B, t%.FWғZ]Y<+1`||e+ .PWRG z9ue~K!VW(%RG6Wˡ+uj+ܬ$CJAD+3L! QVWf] 3atqFW\]js%& 抷\!ԋBB̕+ +!WF\aa;+"qE\3 WtqeqZqśP!ǿcLW(/.W=Qaq(Jo++ԯN ?05B\ar穆\ bdrȕlE[Wfws"qĕEA\gP&Wm 2ɕ@&W*#/r &W mDD brxMt\Q|0ʆ`qq=W6BŻb/dopeA>J12WiH\q dmpEFVQ0C[qA x`+ m ӽ؊z ,?؊@JVBZLnK+aG|RS6eӴ'K+#`K+3?z7^1gi\d%x cie"YZd i8V` VV#IZ ؈rl꼑VJZQ+U_m/ԊC).YbU hŻZave@+H:@+T\ZA)1B rgs 0+YZ4 qVvrvV3Yv֭0+yYMeV6*XYFחB+TdXNV'ZXY+GQEniBs;C#? vAVBJVV0Pgg%ޖb\8+s'hVŹTSy&V žP+:V`L@HZ$XZrEZaKJH+&H+<˅%GJVCZa~rZsFJrVXXZTiTD*~rˠVDGLVM7RQ.D93: J4[+=V42~P+!VvBx(cj%3$lPS+˧Y+Vf"fle[; مVR ]:7J[qY7B.¼[1k=J_%}eg$[+;`d+p>V(Z6 Mcl*MVJl<<,*l Ɲ[ {6$a+clŵTV坭dZI噭ZIrRZa 'c+l@R`a+3E O`|bj~̘ZqJojűzBV -  `VZb$e1BV蘌,ʌ؊rl&N}mK[o{ R=>,27#VѡJ Wc+ ble3cl%Ԥ[I YVEVR:hke9VRrCl2 OVf[+.WHJeX[nAk+X[!* o[[ÒNڊ!.-X[qmeqZ[a; *[anšp+/_+K׺DF!@hweSmT3v*[1ըV[qT 9p+=no}dSoy{+͸ଡ଼ FR=Cx+np͈ҏ|Cz+3ȭ8[qnVVNo[鯣BbFJJ,ʝ"gbmoVE[a2x+3,9Sɠ1o4]oNފ-&8Cu ܊[s+gAnMͭP&GwS[YXtҺWx.+ȯ+ t R0[aEIkfzV@o]㭌學qr>*pł\yc/\Ip- P 2f=X=+J+ -WΑԫBJ+ r;ɕ4\ykc@|N+fI4yҳ̕{s ̕Y&S W(nr%&W"!WG[ȕG%s!W& \s1+͕"8aNb Rs%MX ңvT\GqZy/p8rppeW (r \Y|6'\b+m 4`pe[n`x+3%A oԆVV[y%_g;mW̉ipE p;V⭌Ro ފ匋b[YLVQIoo(JX+%peʑS++b\a#pŕ[\9]jSW5 CpER+.2J\Z{ĕT\9n8 c MpBX\1v*rh+M\q^r/fI, VJ72K J8jmr re^?M JJ Wz(䊫+/ lb+\!} \Y裘+jP2]wm}2 brWK[Sᭌ˩›x+m魸ІҎ\{+EWz*R? j6/er(aJ[ĕ#>gj>\ics%-JңrJ<[B)B4%+&J4(2ԁ+A\iB q3Wz䠸ž-܇J\[]4DvdW {?ؕ@ؕy)Ձ+Vu<'Օ4ueqy슻7B®$%\+Fw ϸ+\]Y;ZٕdK6OaWD]K!BL ۤLmCkH%`ȫkݚ+k4hͶ+W`"J8]1(ҩAW:/Jہv!Wd\@\= WFSr*cM!,ŕ2D\Yĕ;+ ` ̬-G^K\WLq[ʗ Wrŭ(%WܒrCr#Rһ魰ފ{px+ 議+WwWkqNP\y{2EoqEDqzyĕ.FF\qiWH V\!қI+Pk+U.%"ҙ+pe%XW(opeO XV+7HYjoz}"2 VVzhmoZV[qe= ʻF"m19B0 [EmT͈20BÏ$' ub+>;V~;=hKbH(Y`+hw[l9؊Eb+^GVJTkŢ?sbt.JX+!JoYQ QZke\K2r\V [qjGVV,R[Y:`+4EAl%'b"Z)KX .;b}>JEb+7[*[kl*Vܹ:JWl%,J,[[yRc+f [t7S!0Le 6Vl4rhT,ޢ[ql Vf$-tN֊[b+=[];JK>Bd8BX+Ҿ ؊c VHm|D[qV4dh+ly+m.4/i[68!M[4rv̬ZXleHlIV\+meh+ ^meVYV>\#h+j+r+}[>Zq*2WcffW rZi`jJCS+'_7J]Z鍜V>r VKfhX+atO"#'RJM$Q VwV QjeE" bZ)kedu/cG2yߊ Kͭ[z,JV⭌%v[n.?8nƊ qsD@\yQJ+sRĕ.U\qnĕE\x{\O|ޙҤ2Ox4W$Wz#,ĕqeQnW؏W2\/+W4W2vipb I+=WS V23"AnGx+;M2ב@q+cvn[<1_oe̩V<neVLneԎon-Vz0$k[q+m.\!Ivoq+]M Қ6WnE tOt|>*SMuOWWqqt&UBW]e!u eG*8F*#ն30[erky ђ-K *h, BYh*'| Z %*iAEL5U5U0Uzצ fMhT R1;HwmR`҄Tސ*c RLv+GThҧQe;DgQ߾ɒ{Kս~^Wg,p,G}1rZ?L>90/!012*%&1ҥErt %3(GfLG*Jd2cc[DcKU]DycʔYdNEd9F~Ys,Ȳ53<2kEY~_e΅Ϛ(1Rk6~$u*(1rr"e9fP\I6ẻ12e/ S,3e8 H,ͪt,}ȔRe:pe9F:^\ߎHU\ GO0Z=(O,H1<|ɷTPcƈ(lp,=?+rW֑!NW˲rY^]Wg4e\]yEך1r2+0._eK\:UyY5jxT|`eh-et,t^!WeaY~?IvXyuUv^Yf2k2JfwhY~?"娆?Ƀ/8,ן\_? fyxY;)617!T4F,u^9R g?tfÕṖŹ,Vev= f)D,HOBS1.|I@YfC`c$k+2}Y_},G2|M},V%g c f9a,?ϯ.eʻ#3J0{œ̒Y}d2vUeUf dy2X(%,,ZQR,bŲ[](Z~tYQ,ow1eW3l,#".2 +e>WrOl N,~,s\,-,C.=iT,˼?Z>²,s,?R22Su3jCh ͲLC}LWbf=f]ZYᝦP Ͳ~v,"4|b9s?/2˜Dl^Od.5d2JyO92loRji+0rD,78l)QsYfwpGgy񙂳{:Q?'"Le fs]^YE|>9yggCk=HJ-(cI7в<}Ԁ!d{~Z0Z%e6zPZ5![v5@2вpUiJ$gCYf/3u\}βף>2 \:2Fβ ,Yf=y*,ˍY8|8K=_(eQYT坸#6q̳t97P,VWe^~Q$ gY"Y&=RpY湭zd 2aDJfY_YfGrayW?0#2A̲#,Qd%e>BC,˅GqY*,e O\%e']-F`c0,\ 3o`2?rV_̧߾,s4d,ۍ42aee,,420˜_}Ve 68J4_z,ǐ 2Wu20h t@-0˲n,*R:p2'PV(K<_l,?.1 ̙R\ZigYfbiq YɍYEeBK#[ f fDJP2ˌʽ"GfV#{dDb9i,K&48{lc2 e_le5le"6fKhWͲ8^ ³64˼ҫ 2Xi%fY暡Yq|VeyfK+rh Tp١kdeesGfY"̎8dցs!0˼R_M,ƱZ>9"S%h&9^fLsed+",^*[Q:QHhcfgtsR}h^ f2D,=fs0ETxY\r@f·?!/AIdW{-(eYp6nF,}B̞fIYh+ٲ\YmYG(ewf4F,6-S6ҋ, Q8KYzC5,g|V,+?9>K/s SYhqžB⯼.Zz= eQeT-Y@Z7'aQihb2;O63i6ɅSwy;*ev\9"]ʭ8 i0 HK@Zc`tdAZznRNWHK |W# -=i#G [f iWA܏RZ;G(`Z2WbZzҳu ԁ>k(e;k23@#Jˌn+2)[$>f+0-=Hi1e*c̋Ki<>9TjF^s;ےe6|JK,r/J˼^, ô)Hؑɴ̫2dG1-(Y0-s.S/qZ37r9-5;?i&NӲ\MŴ_)-@Zz ҲUaJhY1Z{8FK}R"^\E7DK-}?7)e>ʔ2Cgwb `95hY2Zf䯮-bT"-pҲ `'up%+WR-y;!뺌ctFU?"FrV'e1̓Vg-}#t,BK!h喿:!ғ>ly*<&kf>3̇;GjDi0ZjFwe6qPBZ:e+Od¤lI",$2{05;aw̖dI(+Z 6?TfdF -s3vɞlz2!Z:$9/EtTe~s-j q$:DKøT: XDK/Zh5oyrce&)eyhd`"Z:!Zfг-~%!)ebTN*r`FU2y͝-Fz3ޕQXMiY;2_UH |J1Bmqz~]N:J|(V?ɧ'Ye~T'+ - -QJ9 D~ս"-)c2:AZ: ҲIwem$!W@2YuiAhO%;)eFeӢ[JZȕ_#i Q]J3Ӳ.,8-BRY̠t=,8-5]ޅiYV9-h*ns9-2+cDd1,O TNKkI'`Z .WaZvqZp/GUrZG!4A@-v[ 2[ L>RNKO´lbZRmQL2Z 80-rTK {Y1zCEO FW]29Weһ.WJz?]Iw,e=WNEiO=tJ2};WɽiDgp3BiYFQZ:e)Di:eu< ,gCxHOr)Jafne\]J˼bH۲M^ɣXeoJvIty?\9VߓRܒFI_^saWee:-KeDjrudZ%H-?Ra}ÊH-5 Ò'V.8VdmE*Sbћފe,Vf: ʷbފ[z+[<7Ny}{+ [1AYoŴCO&$Qosq*V(ĕ[!8 9߁b ފ(P{+$BY,­deVǃ8܊r+֟ʭi@a }J&p+H ܊er+;r+;1˭ɭʭ~*rqʭr4r+nr+wc*TDd_5nqVXs NJBA{+uVHil@rc+oiyK+'J+H+M +XʪbNԊIX+g`+j+~E[9Gm!FQB8pV`VX[lFG[y\`+#%'Y<& VģV'dh+AJkNh+&쫭 N&q> /G­ V,[Ym;AmŚ[ bLioeywZlGzVtVP[9/XrY୰ފz+D,[9)[qРBVGoEoBt+/lJWW-gOp壟Rv8 V%#QpUpU+pW"\i%V lo%(4X"&ZL+IP(bQ2ȕ~ɕ;bɕ{ WHmr$WX 8Ers$RlfΚ+wJ] lڂ++*!(K @[Q~@[ ڊ͠B؊Rb+,, /rlF/ClJmeH[Cne{+0[0nV譌 oL˜O* VKVlHr5rh­hoegrmoEJoVcܖx+M`258r2$W-TJBŗ q q- \!~PrR2u J3+7+('B%K+g!ȽIqř~G)pe=E,x+-[ao [o [A/[aq/p+K!F[[inaVȭP\r+md8[쿹 V%ꭜ7 ފ2ʔVX[9GXp';:ފSg6d[. ,JV[ ܋ͭp5x+7\ ܊\s+ J` [q@ nEHnܣVZ[ʭP窶roD[V[٩V[QnmEmYbns[9ɭh8\ Lp\!0"ʽqWdW\!ΌB˗%܊p8r+V˭x r+oMq/bĕ}+bG8@rEwrL+ }@Llȕ=ɕ5VrESNr&W|\!(h're \hdW>dWLdW~ꝿ aW#O+ٕk <uŘ dAQW.++.Ȯa"&"h "%-aH ٕKvfWx8֔]٘'ʮ\H+}ˮ+}QWrT]!%dVW 󪮐߼+ TWV]i]uEQՕ\v,fW?^9au#ɹ+sy} +b^y!Jn +sgaWeW ʮR]A2]zQW̅TW=7eՕCuEQ u<Օv'PW\ Еÿ]q´+1j@W\]iu!Za+vSU]NWUWouQW.vve;9++b7++}v DK]Kîɮ]iRw/[+7q1ݕxyub nC.WZ[^k{$W\I^ymS+5_|9+̫WHOm镃ЄJX+TIX/bMg+Cb5>9MlWWWXHozJ+IizH={^C{kW6\mlZ 񭼢߱%Wly,+䴼x=^Sb5Hɨ7^P0#nwrEjwUܕ>+JAӾfW|_͕u+,bޭ 6W4WĠ\XsTbx*1W`L@\~[M[no%J#+V/)$aWvrŢ&W0J#@+1+ B0Fr ^rOvhrŇ kMorAq++w" {!X2R\$ɕ\.muAXK,k$WZM\rŷ\Vrգ6WS%抔Ń\t%q6W44W6*j5W @5WFU_s7\A_2EW>x%HB? W6B~B+_ E+$WB9K7 M<7'r=!&WG$Wx$K*+ɕWҴFĕ2D\\Jql)%ފ+d{/<+NKRO0AbA惸buf+ɟYjgZ\y|gĕ”ߎb҇45ʈHZB\WھA\IRC+gVgVوdW(lpecHp0T+ x )mpe2-())}WHD\^RQqEpř}+T!("ؠ]BBP%(L&3jrECr9Hre[rʼn䊔r$b+D2\8w+@hJ_%s+#yrɭ  0bBseQX0W_0W\޺ C})D\ȕ W_šl+7ͦ++D%TW[]“ +{|k]ՕԺ,J+FD4WK \'a/kػԓ)"HQ8h%\0 kr%`k+ȕǔ\a'+;$W^7t B!W-̀bv+;(ʛ_ b Eט+\>\T 5W"Gi.B+_P\\4J;]TKbW 4W+@:[ioUoXnŮGn9Z q+aViV4V nneL@^`j+q"|bEmӫBʘA׿ɡ'F nڊ"ZbCb+# U_ lRVXmm[asB >rnX+nv2u a|ViVE[i8^i2VV$[Zi&Ҋ["4 J&-G_K+'l[:ʸr eފz+P具rxU[y.n%;XĥbSk+g.6ilpVwV|XqbJwU[qmgA[aKE[kjV`([[!ϤH4V!ڊ%mj+>dߊV~YnEHmip[Hl:8V 7b*BL9؊ WVc+# R؊b+ \' Z+sM(YHБItMȟ@X,2u򚄇+qgV5B PvS+duJ%Y$ sV8Z ʉ2#dԫk[ZAZSZyP+)4R+'ڂLVYYܘYh@8+K b^1+nfjr$Q2+YynIFaVv`#PfVXoffVji}ZZiUiqҊiecw VҊ`J+>ĔVZB̵)WPmEmq)ڊ]ڊʃ fڊP Ѱ-AQk%kM0i jEPjVȰlj&Z+ωyU1֊c^Zqh sFkaԊe4R+,6hԊV: ҉7MsKS+|ςZqqzg 綵<|X+>VX+G"f."+T J_ZX+k8Z[+$εJb0ZqBJ*,}U[;<^pL= Ka+ϕ lEF"Gb+%`"[$ Vx\!ҴJB8>Z+V ʖ7rV*VZEjE jͷVtjeQI/Zb[F;R+dpKh K? I -.+7yaH0/ZaVHlk P+ҴMdT"8ZaP+;tԊAVn_jQԊ.VvPVZ+pV j2hm-h]b>jXe¾况B֊j֊f֊kEkoB؊CFpVW\%`+4`+jb+}]P؊b+39gVr6ZWkVSbdk|.V>gX+%Sk֊iVFI8ZZq&L`ʈ lЊ! OV$VȀjheďsb( ; %4"gh&ҊfCZV\QZaFhAЊ%B+F$ZyӞqV@=b2+VffE1EgkYq8+ରejC+oVVؼJ+$|⢌J5H+ZJ+ۻH+XJ+CVޯtJ+> iERiV-Za7P+ǥV@KԊ#KvKVJ}QZDZy!V@ZqKiWi# 1(-SZQTZ!9Y3+ hʬ YQy(oo rifP̊qz2+2+lˬ2UzPV:B$}-(RgETRg=DvVXWjglO+YY]U#ΊIi:+nb^bJ +&tȊ"V,b QFV]Dd\_YxYdYEVYy2ZY1ȊJXQXPGX"byNήJ7'Ȋ*+;x*+'eXrS^*+n)QVRY!]l2+V8؃"fckvdV]Y1GVf#}eVXBlfŜN4fVqYasfV~YPJflfV|'tVa]g-uVqVXlgb @tVxY-'Bi}AZY>*!ފ-qʝ V†o \gŭuVeV6DϑUeuGeM̊[^YG3+nM) fV^ 0+ˬwujTVdVv*zeV 799`V<72+dt6B,ͬX^*> B ʴP,aV|2 9+:+}EZh R+ ZٯHHX7-BhVDV|iX ؄֊;okB"͠kTMZkjA#tljMZZj7V>'JLR+IVZ+i c 6bɶ sZyۺtVx̊;¬o3+T̊>ͬX/("jbA Lz;+lt}'t I2ͬP):Q++LX++b0+V_UqVnFe8+pˬX B J(VHgohnUhEZ9Q_V]ZqssV,Zq!yC+wv Y][gboB+X|]#aVXUmffV ϩ~~я\}6.Y!Y.Hd;YYDVYaFVMDVkd)Ȋ+idEMd>PHZȬ̊D2+0+ rw~{^fr{kYY͑YÑYakYReEa4r ܮ*+0+C*+Abʉ@Y9:hiܑYY9˂gxY!l!r00 +""+VjlLdEV>^QV"ZTVТxEY!]e) ;F-ʡĒDzVYifx¬ ɬ쏟c@KfOfNPfnRfăfVY1x% MfE&LfE6Xf@eVtVpVЊ,Њ 6hhLEZ2Z=P+vjyY@(VڵVokEkő ܶV.ZјZZ!*2BdÌV jp JZiVj[KޕVDZZG .8+ 4BDZ貥V_=}J7JP+R++cM7J.E.  ;5!SZ!-^f PЊB+Us@+ʇ"VMZAZ1Kh "*+TZli-VnvTZiie'sKiehAZY͏PK;VZSrImjZ!VizR˫U+rJ!΀-VdNhexD;pVYgeXᩳb,Gކ̶ *Vp6:+F:+A 7bҊJ+)XJ+; %H@++b}GC+6"VA X%r]+V]he Vxm|m򒄇oXv T++FʥB;J6QY1%c++䛊Y9=aVV^Ȋ &}!+@V9DVWdFV.%acDV ]c?`C -AlbENj%V 3&VPBdha4Vn™BmPҠƊ(\+X<+,.ʝ~|+9+k/Us, 36V܍]/VV8TY6VY~ (+UVȉme r;& 1 +-lejy =TVAʬ|^`=0+ϖYҒYYxFf? S%\3+}ìml\@+:WЊYV.V"t +QV6 iJi|VHkie,RVK 8Sjiiek!% іV)hh$ΊDΊ7Ί|%YavVogV,Z@\iċVܑ]j~NjuZZJVj ΥVXMzVQ_ZbGX+!QVJ]$R+m`o m*VOk`֊ԊR+ eR+wV ke:X+vV)J_LV,GZ]k˶VDRV[ifl2Y+/bgr**VN&+Wi% ^ ""})|NB+RWꆅ8AVZZa*҆Ҋ%2WVޯ/VPgVens+^ͭ`ȭn{+9ފފگފSm-bE퀽\.Wnp% V]^n.wVZ [!!70[akV[qMoe_2s$[Qˑ[9X6p+}?­'HVȭܲAp+mE­r+vVp+}%íT4@peW "upe0>W`kqm6[\QT\iqEzSq%p^Cpx 59yǭ/i'OVVOoV.s!W*D\a~++O9P\Av[80T[qrfj+Ab+j+V(ViV ?A[9PW؊ bQlŕb+/V [8x+Tgr{+Da[q-ouVVTEoe{ssckoEpp)  lPJ2W\\a\ B4Brn+$4bA:x+T㼾_\A] \y@0WKu2#UB R%k? 2 @ĕf;WXkq"-4R 9?b"s+$xJ0ms}6+sJ9 #6W 6WH\9\ b}K [K\O w\ِYrren% Jh+]N]+o|'|+;ml^W$W$W,]\q;<\9>I\d\ x+\Ow[EkM2Zke2xbXOrqKP R+G`ͥÜ4[r9jZPj NV(Ool Bl c|BXBVܗOm88j+Vݩ0lmm!]mKmōVUU[|gh"9J3p+w[inns+JGͭz,rp+˭$ QV]meっbJnQ[ G7Q[Zm,VA[immezomE[!\mG[9Vٲr~V(!Q["NmźIV4V V.Kj+op+[P*p+lr+Vp+n-6z+zx+bv袼9 tJwno%q\IR\فv-[u>+!$WHjry%WErJrejz+TX1b ̠ʉ#h Sp}xypeHqS+͗@lS+r>Qrj&W\qh'Ӓ+Gs*_J5 &WH< q6W8hTqb|+AWbb PJC+r4b" bu ͮg+,3]UuRt+EWmtEW0W&G+4W,\Wose6W|䵹B` ]XHsH~s`U+@5WhH]%WZc\c\oWpE@per$ʸ ddNӓj i\hpNp9ʮ2~o/l*Vv$V/ Ob+lJK/`+ۋ ҘJ{V؟Rls"z_-s6nV:7RM_b,7WZqbJZIyV"0QZVZ/".ʋ< J+RJ+oB"d mC+NB+vaB+heۤW.+O. aZZȷIJ+>V6NviiŘ'˟J+ ZYoiVT#- V(0VPZaVZ¬[ogŞXgI;%RY!嬝tV VxQT"87WZTZ!۩/cxc[1B+2`Ź V@9c ("H@Y2TPy2E+vˌ) jJ#T * e|vNT,_2wTx`LӢZ&dTXyOX-#L 2R(+M+KߐeLcDk!h-ݷ$L#J%k (\ZFBT|`N徴!v< ֲWAOc-Kk*#㧱qҦZХ* 22lL. kߧRZF{sq|{-# &^΀Pe\OuG`wڝ=[FVNb/W[$bhlUd1b/lߴTyMx-eoe|[FV[F[_G2GDlfO-#Jr-#ԥbۖ2|x-,Fg)eQ$x-m_^zܩ( 2=RM-'_`˸[FY lR1fdckה2W`*?ZhtZF,1NLkFk=Plh-[~ZkGײU꧹~µ̧2@&Zf T`ֿse)+\w ZfߛTly;J k잿 n&X¥T`~%@,(@t`-#h}*P0nyާ"@r߷`-.RqT`<"jIX-re-/8AeZjt(P-vo: !xKQ-a/HP-sT`y2YF"O 2Wlrb}k,GeTTu'PVV]Xj__TDu-,KFƻ>iwuqZspg*r'NDe^5e^/?0N˸uN{\BqZƃrZUr㴌aL.񇆠2~dJu/e 2^ LK?aZs1 +B 2W̸AiP8-sq5J9-˥"l9RNK8-}J봌Q8-s_te ȃvi+K1-a%LKOzaZ]ŴN-G1-48L8Ue|v\bZ#L˘k/2.qZi'3NKOqZe/&⧝9O2/r@-cWBP3e( @-c~#P˘F #{8-/+f|yute_N˸r e?HⴌQs8-_jjP-֫ TKORZ,#LQgH-c%T~HD#T\yi -#ÃBZƣҲ -8h(e 6}h1Z:2J_a0I*&O-OFO2wԖjbt es2yv<c-62./1ZƧ-sh@FtVe_q!-f ,etޟ@.'ieiBZƃ-N!-cRHriw0;x胴,7Cq9 2'UT, iY>+HK AZfSWq7 2|!-&e'}S -sל[&e9PHK"AZҫ -.ҽHH iAZ<H@oegYe٦AZƹC\Fiw(J!-lï늴HS$ -[H-#Dk%Bˌ僟^Eh'&2N1V}e="< eQ 2/etpX+%\)Yfo#GF,a>rg)B,[ϒԇY2te,q28/Zz erl0w@K?Zzl2g݉wY2Fw4Y:Dϲ9n9eg>r 'U,c,B)g?>˸Ar_ 2.\mYn^կYF*fjlQeTb#2bbV6 :(miW)YFBP,#w;Ihq-~hPU"E,lc#,cR8 %mqT 6&–fD,,"29r%ڞ=LY-#6,>fف[JfãDfwה23">?fdt9YfG^S2'[,[Jf*eL#e/%SY#n'ŲTK#V#aYۄ ˲\Ų +:²U:eq/ez²̢@-W|BLX,?BlF١[8es9qYfE˗bLe,ɫxő69 Y*76q,YfJopѤ82g1UJe2x;KJ/,^8tYiL,yM,}q³RgM},Fϲiiq[e>"or#%0/eAu"8Afòr$|fcuJD).U~=.zqeqU^@%0y,θY渺N\`,t`,#]+;0U¼08QR,;_;,r2ʞڿa2 X+f0xbMYǟefn~&,kB b&0K_,oh⪫6,c!6,bLeNGsp:Ũ8,3Uޥl1_ ?M̴I:p%N2h&Yƈ5GdEFxw2ˈ=y͑EC:,=vCf1AeF Efq:Ί e[,|,fh2AA f11aYdd1atfeODfoe==21e7Y_1̲PeHЏ#efqRe6K,3 JГb8 9d,h>U,ɴYKeNî\iu*xY\1f1^e,x_4˘FCbl_e\p-I'OefKՁ'qf İY\f)6xxyvgwp܅,$)e@LeT\כ:Kc,,JRgJe:+,^ot9R),egqYf-RYfQ%WY!zBYw9tW'Yzҗ:KWYZmxGqC#gh>Ue,=L2ųtesdZlIK&˸y+9]7]e[ցt,G#$29 ,]֑Y%[eaUb0KψYCKf@2˴%W[}CqY9pY:Ȁ2n0qY:be.xh},nʲt<C,s^ BX,bE&K_ژ,dNe\{y t,cұLX0YƹOkd1PO,e-,K?`Yf=n-1l0pD/2z$Y90g,2ıY,40 ғ~h^fqS,=āf1Qe1Box~,fzl:Y+%6Cfwj0O,i(eW}*EڴŔ*%[!ʜ^Zl)*-*ْKb$\VyYeq-6f)` be7-Cd2FVlF*'rHdGV* clIڣRY]Vx@XdFaUYXʬp0UiX6RX"V&a ڀU|*/Xe$V٨bV#3K$ |BL7VlAf.(ȇ V/OVeKA+5/ª,QXqi})c|.P*]-(`*a&a0TU4TTUPUf~eU0@X[\6#Ve˰Y+DdUv`Uŷb0J bvv*Ti(<Jǥ_Ye\JU%iF,dCj% 9*$) 8 VC:+U'7J_^Ua UFQRVՑ$< UVi7ZNiDÛWmqRUsb J3tҵO*FUX]x+)7c,ʈ i*]-%tMuz"[pBR#\0[a"\d!W?Dk=NFUHFW1]MtB*cTWP]ei*uX_4B),<)*:$$KpjU5_]e[a€«8~WٷC:0Eu>eʫƅWʀWW^ʟ*PW橮UR %a`uvH*jUH#[yOsyH,썾NhB_hqy6:Xy#4FqU.5N|%6hg0+ja ަRFar +6:Š##v*_b@XiQs!q&4W1J/2$V\Yf:+XQX@XIXa&Wj+?Rʓ?,E8Je*8rD=abbŝ,8Vn+ci V*lwV,XVѬVXUX!<+RargZ$+!MasXb+|JX';X1M{órJK V1XXUXDX%c["H"XQXX1[ +LYb8+e rsX;XKAXQ o4W|<+VYr.o%+`sq;X1U`ő+k RI`wH? pBV2Vx *UU&Jp$V U[etj$ӼJ*U8V1I[f Τ#"xNU:V,-WyLxJ˫d&҂~x^8 5U ˫-)x^e|Ux*U&)_<||USRF_V_$ j*Uc"J'+]X~CrWګ"("JP$S=J)5MWή KQ+k!X"ҕu+++Z%V+BXY V`Jbe\B(XGXTDarV6(`q!$_E `e Zj+ibJqwXAWyhޯU^j*{]y'JUc|O|ZbxP_mu_NrIU_Q|FUAZ}*d꫘U__3| V^`E[`"+1V(XLX_b"cd+̧!VXXir$Ɗ+JG+f, y+w+r&wco1Vު+U#bho.ƊƊ_cŴ+yxXcS/2s_bPG ٵ c"+D#,>LGVl, ✱'9+O qm&a⫘ע"X_|}i*Unj^TF _ZI+`e!W&b~ԕfg3YY B4AdEY͠lf`JKReVاJ}0+,M`Vz_ìllJaVܯ ̊+eV0eV\ (rȬ1iFxHSLh?*cVorbi|,U^L?U$;[ec?qlڡ*e*6U#$Sn4 ^*yW!*dɫؽW_"S2灤S52 UPt{*T/U3 *Waou7uZt&J^ARb- ]=o~7Qu&*}suUXz*vTWT_i9dsA'bIu^!it쭭2x?2id*=+&Y#eU:uQVq؃\sy HP ,u[ pZel#M/8e*Atbbi!hvUU:V)RUZ婮#RlpycU 7Lqfd*dWYtfAWqW!-貸 U~PU鼸DV5p{YVXI]E T]Gy\UX]e\DWcU.n1 {&gX+' Uv,$NNJ l!)mUmi*lQ[ lm+*8W Wى͈*U *lWU^ | \~A<«ԴdUqWŹ U*Ȫ+Uܲ*c`UF`^ӄ|:رɪ, @VeYX~*M<-ikɘgVWp2 ݧcWHuUNBfJ*bԜUHpU VXEIYeDV4 {-J+[F(/ټB0)rbV X WeU1EXYki0W+ɚ UqU"|R%Bw K`m\`grt`QV?mUlw(. %7*;\*c~G%ϕ|>e7VV!he`UW؈z \,?!JU,TVq.d+lo)4 YJ]U䟲+9*NBZ SI R:'JwVyS9⼹u* baJ|>UHY)/g-J@*#0[*W]Uz\Ut>*nɇ"qu}* \5* b.ʢ!z* \DR!,L'y*HV/RnZE *UʔVWZզ*n,VZ"i :Aޣ^-* DV_4ƔUF3ow/Ba)iiHu 灴7$sU,Ң"h Th:s$j<+<~jRI>Ʒ ҂^p9Uڙ[F*.VUJtX[Mm5*FU>>bR| * bJŤ*nQ_EL_E`*y>dUr㫸" :>wc}p,i+\XAXY}x*` *0B*w_y۪yxjD*=Wb{ʫ؛x*__e5Wa*ʫ,X: {'bdzJe*MWW. ]2puvr|ao8:؋ '%QWG]Bu u6cy6*f,WׂD*4pă+ı x**;U (-*8UhV9Z[e󊎭Ri [d*U>/l*!U VVH[pUʥVWn`@*/V1bQGI[e2y J*-PV^ܦ*/ p=*_l&ct?JD*%U~GxU<ʓ3*OdyU U!%*.mWyq>⫌6c}X Wq}ŧbX}r2WK_EL_NFH} b*,找f$s[Ut[U6Lm|iU𰾊#WyEwRhKb%VSVV S"TXq}b,ĊJqXIg__a8|W)ob !x7>W)z?*nyS_ech<|g5*WW𿾊UOWi _UFk |K*5VU૔Uy7yxUë@W=}qUz&U#t&+;o "^7Gy xU «T]WH ůUH"ʩb,N_Ź6*tQJW1U_e0}}VdVL[PXiWanIa a+ Vj+M*>qU:r<#hc ʳӀS_J2QULVʵ)\l9$#Θ)s XLrV:mX[`5+՚UUy`*#c]"zJU*'bɕ*ë* *yѩ¢ku$UZ.@Wq_Nt衫D^xohhyx*f5ʫU,W)B9WWx iU`/Uع^e~O m}GAX1QRae'/ZaxREb-KrJӗVħ ~=/kbϿȊY +VOd k$Lt%Vzʍ]$QC;X )$VbEs!V^~ĊPhC4ϳ BcEh+Sc#-+sb Ғ7zceXi:2J_4V^+d&XnXctWVf#+#&6JHs[ͯ rA( V>l1 2fgKPYbXa$_b} -Ò5VwCJR Db1Ab=!VJċ+Ϊ%V_jC a+cc;C#Š`j SXP?ɕbmKJ*V*tw^ +°+ݖ`eGJٲFXqSF+W^M_W[xX}Wa_]î*-]=UYҧ-{DW+|rJt*cU2/ 'ʓ@ W\"mkQҁ\mi*BDWY̠/[\3WY&7^^^N^s)W1ʍ_ei*p_R, U#*J1|+ULU W1O - p>U=\eFUrUF##ZmW^ڰ qEXVF!yFaEk S捓@J R`ecae}UR1*o+GX9YV0U꫼NU૴⫘K_s:UFF_T\]_y3JG*bDQWQpp+?j}/6[IWf⫰H_}{W1B-p#t|Vp :+:+$YIU.kF2+]݀YQ'V{eVAìX?"0+<¬tO7P^~Е~y&Ȭˬ7 KxewS|xY4J_WeVU0+5{¬,WE aVJx fŵ,ǁ2+T, |qB+wV߀3qVFr:8+VTӫJCc]9̦YaxLM¬|'X<ﻧf\Jk `Vʹɬ`V]~gK8+tV>uVpZq QB+AIVfUHi +O (WЊ_W4'LqW)BЊ[ 8fЊ:{K J uVpV4,b^7a9ᬌd\@+!8[DV Q(lyF`+DZ+%VVٮjb#JP+k^dVޔ@7ZnhR+7?j\jjP+c$$P jxZَ VqSjJdV:?j4AP+mR+XJ+JSZZa9Qjyv?P+#(JKV%Hl_Z٣!okP-J'VNK{$J VgV,,BԤʗ#x+κ,Pk"V\V[P\!Rr +%WX/&+,s*+А+$W^;MsŢ\A\1Ts9_"urߢ+薠+&tR tB%ѕ'f|DW_芓+mvyЕ@q~Azt2SJF]ئR!$Ԫ+x$ʿȕ^R&oEox+dK JCz+w㭸\oEoʬGn@3 yVDVQ[YTh+gڊڊ5r+mp+#Q?;2V*Ln'm,KY~ WL\qĕEC$+-y+"/ RxW*{U4pŢ  hpNpy{(GW<΀+f V\12bXqg&Ҿ sDB|r$h;JҐ\)re W:$ފ{d8[qio3Y׿v [+[1sz[.W\!\QqkŕE++pʛYJy+5W/pQĕW+ci=WQS\Y91)lbHqW)S\ap6j+gW~2\\qs\ fUr&bE{~˟\aCCi+iԈ"͕׍qs̚+'  WL\a_ɕ`At<KWHQ\1XqŊ6ȕbjP#s0WsEDt-ty+i2&#c+vBEW޺2+=AWʑWyH+ {tFtxf+5te1uؒZuu{ʮ|+9dW+eWxnɮE]\3+I*QG]1t+nA]] K芁Ǣ+c Q\.+AWh+eEWȍ]afAW4M@WH[ЕC% p M0+bQ AՕRs)P]yaD]qJ3 uEWi]yrYo](sE5Ktkw#(Jϲh++ޞ%W0 W[sEsbm W(?\y&C) Ғ+~+㖎r4Ȓ+;27)b13J(1WYSsgɕ,Krb(e!WQ\Yv ͈E]Yx+-]7Ti<%芏lЕ'0]iݼX 8J ̕'\A\yqWE(rz$uA<1yɃIdMQreЎ "Lĕ_"노9\aI!+#2ʖZeZm+ntu[q4#]oU+VɭX [UnnA +fY(ܼsĕ!W$ʓB]Lc D*XĢBpHq%:\A?:\iJKfW{to@x+b z+{_oV+{RWܺRp]a+U\"<1 Y^'+Wj& }J!↹<sb ! J귓s0+c+|DWGuegxԕkߪt_u%+b+1Tؑ$ y&%WzƑW:ky J WF~PBTY b)J'+t/b;`JRWXӓ^1Fz^! C{傯7_qd'r]9ۂؠWZpV|E|w{)/Y^yGRb^mX0+K^ Ry +ޥ+O~W:IxJy+U+FWDsWNԟ+˷RTyTRɧ"<+Vڊ;IU[y@2tR[Dm@VFadPA![9[Vg|noBE`+Olb+'b+ڊ]V>VʧG[Cڊ1m-j+ ҡ4 rP[yR o {V [ m3_b+`+Yl(4F,"\ģS[qXGJ﫶2_2_?=an3hgUד?~z*~ĖBO"qɸ <1VN2D<1ݟ$oЕ+ fs9ee4LeғLer;LeGe|eSjYc ?+]߿"Ph. Mc@-+#ۼ7#&?9g/Ej8M̞ZT7@}z2W ۉM2q7l!<0ŖVq_yͳ1@. h0-c63;0-ہ eUɴOf|ɴMb2-E# e̙g䔡Y&2rpxpb*-}C2yQ=51b2W3[ -S~^~iIDZ%1 -AS!H>l|rCQee?y,(-^9rV?#_<0|g,l1/H˸Le\nNvvAZc 嚍L2^sNyVӇ2zh4Ci8 8 e SrzҲL 2NPZ~/&h 2q&eGF_x -KOeLeN5WhZ::2.ќ᳌&wYƹU=}q+2Z<2q~r-?us0 j9kU1{meٜ>s=N7^|h]}z[u) eyMe᳓2:Ig#,K3>~ZOe|T22^c,5>h\FgYYƩ2}97Me4ז#gYY`aޓϲĴⳌ, J1}%Pe Ley\L濮^v&eRA&βRYq{pq8現332l1HͲĭc'go,kb3x2V2˙-6Ve4_X)쏟rdct,K <*ʳ-y<³mHɳlHɳ_w,K1>32rF6h7t` MyF:&2#hym~ϲtY,c,co3|qV3'29ZR/@ϰ.W,hi6>˼5}ee|=GR&2uՉhdv/ZF\svJZ֮c-r 2d&2ɟ|e$e-vWdA@2i{NeVGg \gY|Y~(>20/߄ڙgGVLeMLe܍{4w^gGޡV>,-,2/l7A2sh7>O-K9@˘Ou-!c-3`-s-3~0 㳌^(,KYt\y1ki 3 Ⳍ!Vo~YR|ee鳴 ϲN³,K)Y,#~5`dOSg{Yg&2UdβdhDgy`qKMe FgYJ;(q,pV.FgYY*NC4e^s] 2ge48)&2zO=W)Ͳ6ĤYƑeLk6,ef:Yz1e\!abH`'YCpY2nf7̲p3٫c,+I4fSh%1i>@Yқe\R%'2;ʖ+v2ZÏX˙`;ȔYFH| GfGRY 7?2D>4}Y3ӴYdi#LͲ\n3#hƀYn~YOsl,y6K׫YFDi,3['ʞ6țу2FEYQlETyf߿i,`4Llѵ%[de,]ʅff﹂#ehqxOA 7 ĤY!NäYct6Xhg|6N 2W.W#4e\n0i,saxgvl2#vZQ_?j,@t2(+Y-c,3-ʖӴYzʾ,]gJY˼WeXUK,ÃLŽ2~6hh)c<g~,j 2ysetii=i,͆ D,ctےHO8egK Gtrg?ړI-6˸9bIƌb@%W\qؓD2.KqY85]ee.D]9fP2Z*ɀY߄YƁsYƬ@哞|2eUYOȏ4KוYFGr4V'2bʁ$ɤY{h1]9(4ܻfȑd Gfp${L4Yn Ґ=08q'̲)reEe,r}ee+ˑ6geyZe٭F 2gCeY,}Lei&<*K3PY X!oX4r,K:,˘5:ړ> 2H8_ok,sȶ,s.>aV灑F*>)1IZ/p#˲aYU37PVXmρ u2~qF$\Ek+2nٔ:Rڙ3*ݐYe;pfWv,c<2fib>2AsJRpE.:4KhYZ;hq{=5,Mf= uu[92@ڀdhfA,,#eUFfsW3kfd|de0eDfYFGSfiYd%id,kRn&$UOhxqh%efIyM2)fi;4+2^4KʑY*2KbYbd,ʹ2[sI7Pemc%Jc@72KYHC%%W~,]p%pew'x%.Igwko2O,3s5J2䊈YY Ҋp&,#SB%bn+EWlpϺRu0;8Ks8Y*KIg!I|2?Yv YFJξ,cNe4O9g'sfUEl5 Me !US׃ytsLV]/t=U$AaJUnDBY+V X.rʑ  (R%`eQ2VƴgU4U+WT@_(73.|R磮4g< VUHSWyN<\g\\*PUPb*ON9%aFUnt*<* F]?^e{ͬ*UkUJA9 *zi\_"֍jlW[Rm jxUdoz^m*U,W0jU9oBv'X\^pU}QUCq%s|=$}DPfB}U^3Ҍ&F|'뫐dR_EI_% UVWQWZX^_^Gx{NZ~^U`3*(UfgUɗWl«d-˫+bo-1[*f˫l^*'*uR7`eHC`^`JV` +oaqVoyV74 +_e+Oyb %!VX(,]Ix`HdUc+,Xa[c:0&|Bʇ4o_5VX)#bNZb|CHPVC -|AF_`c#Jt1VZ&šnϝvYYQY[d"+ފ"+\-ȊK0"+ dYdEsJd؍bJ6ʊ#*+3y%V+ ܀ +ŠU +VX|U+,QX1]`:19VfV`%ZJ2 NbAx>\E[,}m/r(IVX[,R[%xU&!r)WyWf2SDWnio]*s7}@T8 +>Sxk]ŁʗrctbUJ(*Ǘs*_tw [*DiH\.Y\0qWRv]ŮT]+2q"*uIUV|9l**g*hqiU0U[@PVVATVqYYEPYeOY`RebѥUNAhHh;*^|UMU>7 uO*dd0e;e:e>efUvdme edoY㈬rWYYe{avDV>\~UY*|VV9K"*s* UT5UvT@M4Uܿ OM*q* EUN10%JP0^Uq".rWU9wqq@Q^4E:Que:" °Qe#I*TtT1tb\T'T#WU1E@TUUY$QFEUO/U+JD[hQElLRHB) U D灏PQ8*="XIBfEO<UXbEJD Ъ *v*WSʹ 3*}T0* t*y*)b'ղx7{~ "YE~IZzCip*yuUd7h%c3(cHm+ i)>q#Uir&3^I#G] ,GU>XU QX*7pM`)?*X+`FXx*QU $ 5@xP ` X*,jV^`f* *e?U0pUcqUU5UXE=@X«PUtUUg SQR)%UyֻAUkgU۠*TUxV˪ْ ";RrUYB΂ nU SU'PU Ȫ[V9 0 G]YU\g^lUR3TWŒi\'A/J &uU(eUVbEYJ d/ueJ*r`*U Yl.UUNh#*V*$J0YV!f/UcU6,m Q7%B**\VyV1g®UNs#pc nU'UdUWa"WAW9N]edGWp|GW*Uj!u&SW)U]^]EL]E 衺 ۵W^BNZySz^]UWao*"c*/X9+2^+y}fS\=h+#<W^0˭K`YD?qo7p v )V#*錯$7r+> S,^l*b*Uxx᫰_ J|WYW9U>pU2Wq*U*2*"m*~*οU4UvVUU$e ! *΂UlUaȫAQ^őJyO:Jx'tgu=OuUj5USrEWaW)u׌U+3uU$aiqV⳶ U6JpUqkQ*$WrP[_sW1_^_kI_eWa8}-FU|,G⫼UJUvoUb)’uu*WWqyqyB7*U'I2V~O߿,UW:]tUDસGJ\*)˺*nrRWo]!*7모X5+ع *lJQWeVv7aLUVV1XYUVhUHTV*XZe&,?*%rwJ: \vqULAW/W٨bWq/*X%}q(πXSUܔV]qMuUH,†s*K\[[sLh|̈h+[BʶSmz"mrU,VcXۈ@X(bEs*o Z7Y 7 TBViݐʖziʽ-ZEiK ZɅ)PC[ CRm|*>U:/ +rVyn`*BX.v!" {CXn.rC;UVIs+VygK@pV9O%a@*UVQV.[Xkaamk뀫Cj\I'_[Ubl ʇ|mk.VV!!nBbdQZ%UUש$ߙ$ pt;m~ɝM5%MSY=fU.lTeA*s*D$Uvj4UTVI"Ujz`fi-V J0,ƒZʎ&B%Ki[lZL@iҌKiUxI|*_yhbUibV1R% u+rN"X%BQq)sV1*""J!X+_ UW!,bUʽ9/"+b 2*wU rjELR\ef'J$*O{ * Jr q@gIVa"|RY>WncTV9i*lPYeTU X)ة*X뭬ʓ9*\U> 5UT.㧨TLڢ*uU\Ih]*W Y]*=KXY*aUaU2_%r#U? "s'" )SV%9UUԀUU Uq|bSY>WV*lȡR|'%`BWXyU(`^`aX*ry*. ; QxaU6\ `ta*dlVqqPXEXnJrU>?aS^\U (@VVn&S}mXVa|+e~]XEX%NUjUUU2UA0(⚖mpU V!ɡag*l.\6`;*& *UUYʙ: VaV*Vq aHa4*JJU WdUBX\-BEq* *Y*^U9RWqNt󞪫XvVzXi-BV*OV\+ x]`k\WqY}v("C&B Q`庡IV.=l d}O+N ʩ_-U`Ek*z\*[;%d}}3}}KSVVZ.b5 }ljQ_~U_>gWB_ (* UJr|W*lOU]@Wqgbyo*U$U(ëorHDVqzm*UFU U*>jlUdUɫU\E[\A q*[|Pmɟ 6hVfUdUJcVV!﫴 CemMe6Ǫ"_b-ʞu*]**U.dSe{SYuU4 H=Vd_Y.PYE)PY**TU. L*Ҿ*NU2{GW(+UF<ꩱJUWqS/}}OUc*UW9&6*&\ɫ)QWb*/6WqUyUs^]x/W1IK^dMy*aWs 3**V@ȫ^ y<|+|$8*jp rWE.Q*,*6˫T bnJ*;JJdVJBZ-iJVVz[ZdV/Ydd$J앸*7*qUb+MTaMQKP_UGU*VVȪ\K.bbuU>U>**!h*^VdpYYϝ5C*OB'ukA]ktURX DaV7rdJU,*UȐBV!dSYLmomU `J[ \ULVWn'U(WVѭVw[ZbrJ>*|Uܣ *(JNmvkVy/*WSł$]Χ%*nk8VQV1E{*}d_ZL ZbDuUʾG.J|p\pԹi3'J {*w*z*[j0ծTocGIX4iKU>arU "ʇҧ\ Wa6X\tU,W!I\t*ŋSWax)ӽʦ1UHPWQUVyVDyY 7 5*_Q dΠGY;AVxb(?WYŪi;*m?=J ]G]xCYhUUqҥrX*W PYT UI4*FTT/TQxTmTlZSzp``Ty' 7yPXf^T%osQ噞!|þʦa)3,̶U/PTْ\PEP4KPEQ2*JJpu"\N*Y^DU!C`HAͽbuzI*!U RBKI6*쟰*TkX[b|IS8S1UF <0IDHq> UלH.s> U!U#!Ur 1U#JZUdѢ**O KUa!bq EU,*U?)3&0hU(U9o$*Lh誐XWEDWj*_{\ *L? T.ʝ^KVeF~VeW$HU KXT#+ U/l 0ɪ8Uy aUeU`UW< \_XWS2ouUdޥU)6 **nΡBOlshdVI=c+? $PYeW"삀r* cQV-:]Yűse K9uUҽaUB|C=,J!X{gYaYYXceUXVU9CU^EUfo*DUرATũ HUU1%JU=ثhbSOB@HRRD*lHrJJ&„JJHGC2vDTyI FTTg*}11Ud.P΢*Ё0.BUTMSEUNTz QkUU@ͫ%T/Ս)*5|PUCXUp C*-*/x`3=UlpU΃^W*n⣷*&C YXyanU=PWE*_RuUR*nEWй骼B]gcy0 ]d&JLK4UET1KWSTagL4U#JTa*Xj\TP2͋(.ۢ*ۍoOEY*+UdUSUK&'r *-YXeVRFXe+Xl*qX7➿*6V!^*{*IEFaa*+Š2 -𷰢pCaz +? + ŠŠ} JX+9+; b;jmXb/XibĊ MX-ʥ}.-Ձ y-DKVǼ +Jw+c>\@Xqay&JV|VՐWWqyE^OR]eS!Yt3Jt**F=U.,; RUP? By5M8Vq"/Zdra4B'rT-TVqBYphRʩrt}eWɢwܲJ YŶ|CdVٳ}_?eykߛ{>~~o;ZU=BPkEY /e`Tn*56Vr+$Ż2fG޹b5U)ʲ$t0Q ,_岌ő _U`yľbte,#0tDT>[ ,S2#ri?6oyr3reg;8I\[e:LG\hW+a|,}'qeU``|`c7e3J,y2lާb 2mhh+J 2NwlY70x#vKGLc2Na}vhqm*eyM2ZJ},Tx`\YQښI[2H,UB 8rKDg -<: x5|V듶 2o|"@>U 2Fl3@8w^SPa]r67Dg1x 0ogR:WL2zϗ2>YRkB?,e8+tqFyYƍ!:_:˸2&[-Gtm$Ձ1/gg~D,s#/;{PYfoyḅxky笿YZCYv~YBL,witq,qWg+:XJ2NLtEpA2;:PびuY2\Yg|$L;2Ⳍ'Nh,W/>˼@J^R˸,q@Ⳍ'v,By tgD-4![J Fo rӅ'\j~hg-@/eGZ}XoS>Vu@?Yh$2NjC)9Lʑ15%@˜Jh?)FJdEh8 MhC(ZF|5>|M:RՑnI)TѶq;@˸ {9we-dt=pT^uao~ՅDg9:H~:DgY,,Tt1t}e5 DgKgσ::h Y#UTFu|eC g9O,ch#>[^SNUM||oJ4Yvv!g7/0,3"Gg8.s,}㠳tBgN,uBgYqx,)c(e {dDh;B˸*%Le'G&2Ëhy6SweO .GF~S62'u8ez)eD.[1F˘DѲGU` ^m>hYJKYPZFǂRJ0QZƏ*RJ׿gd(-Th(-Wded_JVҲU৕1|e22bto ҁ}.MKZZix-ޫ ^KZzekٿe,7Yꃵ&`-}^''h-sUBeZF"ZK;Zzap- ҝ?\XCetY2Oej-isge. q-Db)ë_ZK~ZkD{$Zze&6Ny2Z\2hϜ\-3p%*H+=7HHK?@Z4(n (AZ\-ih.f9BZ ǁ&H͝>C S(DZ<*H |q'V2?*)ŕ7-efkbAn0-c,uD\9rUe~坯˿*e:-38- 2,J<(eDk㴸-Բ=_JCi QZF(-2W+'}4 -ήDZFybHà ŴQ"x"- 6 edgPZBl2S@)-˗⢆Jqz -3u|h1+We Thh䔷1ZF2ZBB^yOe˜dHYzbTGe68/a,g峌{|,f,$U Dֈb2gܡU^Y50e|곌VR>K`--ݻTZHL9Xq$'2X5tqla^GBX1!23CRmeV^|--yMCThIF{mbDhr8=D'8-3߿$y}|qVgz"ೌ{6H|ೌ Gc9tŌe|ח26Tj 3Y*g1&bQeM,2³*+ xyPUuAg1B`TetR9YFyªlIeUkd?Iy.,Fe9Ycg>ltYLg1B8>K1,fL*>,R>8; ̡M $YgrD#],NYfrU}Rx ƒ+[YLZg/eL,0)iuΓJ/xYƅ Q>Ke,=g=>y,&t|Yƥ_gm, x,,) 9X^SB|Lw>ɡ?SމgH2>*|M1Њ`R|ZɛV%#i&!Z/>HqI 2np0!Z%DKB Zp#ңN*JR|Z4 + Z\lWhX$:ѓWwiK#Z("2.lRVْ>F-cZ}@KJ? X!):>7\ tX W]^hei4.>9z+J_eYƍe<ehGWꭘd᲌ y~,r$벌A>;.x2Nr`,9j䊫㱸,ԤYkdY׋[9,RX1>ʼn,~GaY':RXeY,ޕeA,K/"teyCWT͊UhYz e2Zy 8K~̑[,3#t(e{,p 68), g&:Id,!́P$w~Pi5[/IOA[-2:3GͲ,{* lY9~WA`9o+{l p+Xa?q$p_ p|SSU"W#,5QUƼ.GxN[Ws# J{Gy5۳ּp*T,;$K&U"rJdvJM^ٽʈ]9^xUH^%%*`'Vqj߶XH TRSU1_T9-*kX-UEsUeVUDUWUq82>*VHTU!تJS*,b *TU`Uetu=UUx . yn*UV!5a_ *gB^|MXͪ/bUxU$V7r$.lJaU?<ªt;⪌U_U!}LWe bZ8*}uUțUSU!CEWeWJd"${]WRvUpU*i*/tU%|+1U'aa* ARVi? jVK)9r'3:`,8,J7/`qvL` WVUjTBUs*}E`1H 6UWhX$U|41rP:ܰ ̆U\ۙ7N\x!8RULy͞3N=JVU\n "!ZĊOa3$V|H4bż&VVaVaŪVFX}V̺PX8Y5KDXih--,QfV}(4aEXjaE(MaZXi bEXb[:ʦbSJ#!VLnb݅XLXi!~+X V(ibřĊa+3?vKw-VXUXKXˉ2r#DX>D +=PDXGXyX6bpF_Bb']_}b*Y^m^vͫ0BWi O]*Hbb^]eTR ^EU&P[Jw* jBn*4uf* 5X*GSۆVQ|;h *eUZŀU*  UV0VAyYEV9%Q,a=Z+E4޴ *&9HzYhסU#,w. 7aվUQU4⤯aFV*Vy(Z*4 b K*}ȖA^ 1J*OUzUڜVو~P*D^UfUq4%.2@\PUZ(WeO>ICUi Xe=?Vt6*c*ɑ*"*M Ұ Y }JUdX-U[-f*EWjtUf*Uvv PUԋdUl3p0Uqȓ%,[ W-8@SVqۗU(U"mYU!IV%UU?ͪz%qWqUdpUZPS\FXeDP>!R^ -J;и*˪~gaU(U! ʖ?YL*EtW(}@o*YH UdT!RK $U6 H0ToaZؤE* T^ʫFlŊ2E(QU6BUq UQUU6TثbTUqبʈyׁ*)*6G,(+*T8* 5*U]qZUiU8{QUfJ䫪Z~}*fK=H"Px*zTOex*c2TZ, OSP+gTl]P2AR=Pe TnPDXAmU:öEU0U#+t=IHvYHM%I R2*R$U~yP*7G E* HWQ8ʢU@܏J;"+btN 2IT1YRNRetA9Bg|9UUUQ"2V7!آ Á&UlI*] RP,_Oegw}OOOeͫXUPńiA&*c2AQjtR~qUT930UiS`TI)ʦ#łTIY*7/BBLM$4U i,=&c,`J*W*[++1U,TaM*ng*0U0Tq1UhtT5oS &UF(;w *}(dVQUaJcx*j@N:sEۥEJUXLKXڞ WOeLU>* tW**=* 8CT^D(D.VT!ߢ沓=φƈ*]*!KRe*]Ĺ*T4U `,SU*oTi uSAUpUv*2U)b ʢ !49YevRW"! UvKe$e6jY奇YeNr#ZViPY!JWݲ*oNp<- \o^\9޸\;paJ%y>W{W9X*}""X *vq%*>:U,WL\b,&qَLJkUQ]ed@WZXu)*LZWa>yuv*ݗG[tmZWAm^W &A*j *Tl*x/WI|VR߼* 2* #ijUm]|uf3UmWJc 2VٞUVqV[e׉`LpC*]2K*۩/s>s\tWWqqFn bƂP8ӸhQR5U*=fVcN]m&b鳶 {N̰m-Kh&UVi6[eܼA>۸ lmH*vVkaV{5 (U|@D`6ʄU#J 6UGU<xbتgT'J@U2hYQ &J~*-XQ`̽7r}2~bILf)0UFRlOSex4Uoa̡^Lu*AUVU1Q*W!.Q]VUf%JF*l֪ [BU*DUs:wߓ=ObM*]aUqUڇUq"W ɆU6 `-Y]C*6eU,kU940%6>*YcmXeN b2H`dVI2F*We Bd*d)yFV1ae1 S,ezddUY"e*ZV!Ie_hY=Y*U} i\8hUƑ -UNuUWUrBDW84*b**=}l^ȫLD^c*eU|WDZ7O(tvU6DnyUHij^MU4U@a{Bm0V*NUdIUt۸N8\MqC*?HUX#J~*5¦*4Jy*GF*N UJETlLNw*‚UdFU(Vq [#j*dm[[*Һ Ny2ԚWȫ`u*&4Ҫ *&P«e*GߏUj*$EWxU$*dn~**;*@WaѼ `Jg˫g/_e>@]˫N* WmXUZWqLx xSUbk*>Uoٿyty8҉*N" T j`eK\`EOaE{-YJV.eqK&6hhŭ(VdʙGZCgmpVY(vYgwQYvVpV,YlaV4eVƲ0%m2JTYa!!bXdice5<+)V|XHEXq6V &rb%V,X@Ad䧉L|K!VPer"G73PXqcV"8%綰b +dr",ԇηضsX=MR Bx VԔXq[&VP$Vg V@Ub|$V%XDĊQyE,JJNXa%bFbXVc-TAVJ5(+nD E +7!dP YejkdIbV(U"g'*3qUFWe Ua'vUU}WvUrtU^rULUiWe4*Ok,UHCPV'0?<ʌVBV!e7k*@ViXeRUCU㬸4QUo]b`Uʸ{]]PqU:XBћJ8pU]4~UcXqUoWJ W*vU؂UWg]fU~U(GVi^YlVk(vUW$#*Y'ʞمUIz*Ӳ*08UU1һ48&U!gYe3 L7@TrMc* !U@AUZUUqx G*{*1U>U PUJG0EUjX\P$Ҙm\KtU:D\>S,-] DJX*=mY7)0FVIi,!2ÆWH䬬XYer{߲q$Ye9UYetbAe$۰ &*L\UfFp*ۓ"dN3WViq$JW*X@V? SiʐVy V~VDPBLd*]ٴhhcUoU %CZҥ*ɗ[vJ7[lUV1^$UF^DVCX1FaeָEa9!n +*_a;UXi&LarNXyX7o \_X(};_P_n_%*? 21P*fʫaaxW1T^Emo*7+c<%Xg֓>WoM%b J*fʫ+ɫXxWW!پ {B«4$r  tL +JV apV,X1wV>Qr.F&dX)`eL &VZbA]+$H쯼I|oh|&W!]_eKd*,M.J(= pd*^#blT\EX\X\e˕@\E X\%Z[*TA[7%k(B]7Jov mUv6*cN_VOVq_mmVVN%{4"'B^|*o} iTlQ7u*3-&Jlvj[lp*5j0]]l [e W`Xîb]qV9*PUWqW*]/ҧ4JUܗK\]Wُ\ rt}*ʀݸJ%UPK\n(ewY*WQm\]UUB\tC\2p7;Ui\WIŹhmhYVRkmB(;*LmP-BۡblVVX*sCd~>NJyt+UQiX*M[l`"/*]ЄҡeEDy*T4"B(bu:$w-D tݥ t! pUVy{FUXDjZE Ze1QUHj[{J[]NULk[EN[fb.BޢoS.*4b9ʸ)9o[**~[LqՁ*/GUKHz@n״EU9"9A4^ZeU 0(Ju? 躄VY:*n A2i*lVY R\Ul*,bQ[ś[YжUim*oll7`WY4*&b>05l_p*JW㫌b |,lW!կ}*ª*/J~/: Ċ&VD+_+a,J2i4V&QNI.JI+:Xi)ce"XY~ BVYؚȊ"+4&/<+>4V{7ƊIl:++)$VJ.E<Ċ+ibŞNtt+-Ċh+L%V[ V, Xa3As$V 4 B )*FSD1Xdy%v}U1};VW3Wyë4V 2> qyGUz$1y~˫WW!*2/_%sUƑ|Nʹ[WiS_%E "JJU,+Xx~ [ǴS]V1Z(h@ZesVivVو 2dSU(b: DVy%Ce*sRy?-thYYOd*UzUߐUƳOBV ҷ*)*xo#۶BĿe̫ UҰca(*s媾kGcIּ IlM\Be`}V]U)jX-.а_*'UFQ)VjWcU!vqUWtUȶ] &]vUe)UqAXš(oU Vial.bJX**H** #b(b abbU~G#1\w 0XYylo X7V1/Xe=Q*&*4$⎅*n-yaUH;U!YU1We{sb ⪼WjnUa[vUUqU:MWWuUUq?j\b*ԥ*(Vp^Weub^\*UlUON2ʑaUz԰*]A.bl*nᨪҭ U% ,,Kx*n_TUUO27UPU:LVnb*>U ]#]Z*f5B.{*D*.6bׄbR̾DVqWItURת"p m,)mUZVYЖ*"ݶ CUm*n!GpUm^UFlUdzT Y5ҖUDY%"-X7KVUZUVɡVV!Ge&> J,J:U,3JGvU9!㪴턫l* ɣb@oSo*v:WeNJUYE%haU~]*..ʘ|*;,Vx¦C*].0۴ #iI@@*q# [u(tUcdesCVUdV8(UF*e4[e0OaTV[JLmPTҧ"Joޭ ^[|V 'l'mPyS%B^/\ݻUR$EmV J_lE@lP`E[AhJ^k8\yW^Cgo**c:b 6yɫ]e>U2Wټ2*mUve$lib̼UgU7TEln X[[EW[E"H[`D_bC*@XU͆UAUHVy3qVUQ&ViXeCm0Ya#UZDV9E*/f.*-KX -q,BX*jU3@4 }T}=hJ [mhU-O*G*CU_[ŜUnl2[ ’Jmd!B%BXU^P_*ַ5"-)"`&Ҙ ʮZ ҞPhhVѶhZ1iH8ZtUW*=8V:|iC=*oZŁ h@V*c$ᴭb;ʂ\X`VK⽀2UU(i^]ZWy]GWCU #xnU4O^EOX_WQ舯2#UXn_E_E4Jc +VJj`'v+"9DxU$}gU bૼl:ʐX9]VpV?+U+4+oO뫌_UWI`C^gʈׁ*J+ ;aŭVYO$V,lbʾ b^N">5+NVz؆!V ?-4`ż,rb"/ uIUt*R5ɫW醥Bi+ۉ`ŬV$V<`e}RؒVfmkXÔ Va؊}+Mg*6 aVYKde}+B@*QV|+F Vz g+aXJC]Ċ "X+XqĊ+=X9.3C+" b" V|KlbTbe} VL3QYZGYi=eP_#+z4 +EY&JUV$Let.uŬI̊ì2+'Y0+[=Vfy-ƈϢ~d(+Pʊ*+mŨ 2+t15]@zק¬{̊ìt+̊1fV82+Ȭ*Olsʬe2+ì0+l3;FXႲ2A QnJWYVF!%J aaeӾ2s|#'<Ċ9Z+⃶dV |XXY+% g]dTY!^ce!U>Rq2V% X`+Va}T9&ka#+ AVdYaqAV(`Y][YMUYne(+S.J`##PV̊ ̊ jI¬t99Y r1 QvV⬰L"V :"Њt YjELRjEUjk¤VtZqZQImmHעPme҅nr+؇ yfǭVGV:nV:eZoXps{mWƅ$2;í|>p+c5+'neUd :܊w ␳X fV^VVJnEKAV,kR[a]ު4|(_xpJ[?VΗm h+sǯ(tV4-V^'me}BtGp+u˭T1˭xAV ͭ0|kn`s+P_J Vy[aOn-PWq"7V>"AVr v8VG%Y%r+Cn6q=V|[a r+V-neO 0Vnj+7<VHU[ole3[q[]Z!4RcVxhEhdfE!J+$I*K,V ;h&ȅ] 1֊P+n&bBZijŲ<P+ j,2VF_[ R+6rQA<'akVNM~ܵHlH҅Z+;'V\?BrbZ'V7sVxa{r謂:Zi޵LV!ʨdRR+(6}(iix8ZY@+=ZZ#/h$xr$ 遲WʊhFzQV,PYYlA-G*+QVw-enI|)+=WVYkenƪY!I7MEˬxa66̊̊ͬtYy_3+ΒY\ʬ<+Xqd+kY! 0+r0Y1f 2>'X/DZi]iŰJfDZZVi%;J+nH+K2V$٤VHljMVm<;8qV( 2'ZVe/ kЁ֊cy`=[k-hJRɯZI!J,VDWjj%2U'%:45EDZ9O#P@T]h Њ VV:d0 ҊumYۆ)O2g44ӳx!O,!O,bBuȏ@[_(J?"Y˿:sÒT!nτ<̓ϲ2OFhiONMIaYU_ZB|M{Mol6#˫>NJh}QBlp6^+7M/ h_.WyR7в-UR2{⼔2&,D-mJmYR9}Zx"rcm8 hRU,8@OLe(]_n1D-}n,›2c̳5gY:YfUKo-ep䝳%{?DEA"Z z۹hYJ--jP-RDˬSJK.eV")eveɖ2->o(巡~$YF`t2ZfS SFl9hьs_ieTjuH˼5$2R>Y_JˬDW!YS*e^Wj k:R~W)-Ï2lѓ i= ZYv{I!-VKU'c̆9# RՌ\0WRau9?oXBZfUTFˬ: ?6"Zf?Yg=FV_vQM~-UFWSFn^25H-U Z:SDrh"E-+)e>%Le޻/yŌ}`2ZfI-U]|Ձ9ɟ(WjE#hY1Z&Fr1hOw$Nv٦øu}LhYFe!Z昇7hO;G޹O)\SjOyfG2~AQ hi}Z}{/Q:<;teyDg00ONky纾pJgY&2!! hYn-rd - RBK.sܻL"TAvX ^W>=廔2׈NX}%(eOϽ'@pe2AYf[_#gYf[Y&ڰU<52F̟}>u?iYj8yr7_,|t*sEg9dOtY(<~1{gY@,>LCteJer}f,#,5i2/_u,K#:tFWIk:z&,t'<ҙϲh(Y:o\,7U->KVegY,K/>Kg)u?/W8-˜2B˜%G -BEBQU@ٶhY"!Z_PB|,RBCkDe%Eh#%,s-Lx׆(e^#\ yKhʑ)>ON:A-%|ԁN~2O:'c-D!)e![ )] ,SZ|bhΜ,@K( hYFZjjSy{h>ˀY梠2N3rOѲv!ZgULe^h%W CaQZֻywZҲX;GnKi q\RZRji4BZf[J˼i7,(-qD)eRZ۷y*BF2/G}Y%BѲt!Zfdy9w\B^7rTUh#>)D}7Zs - %GPB˼Sg}] ?~=Jhk Z -faHyW2h'4jyNM|y$:G-ϲ{嬓~ϲ̎;?YNa:R>eҧRfYF)eO蓓Qlw/eCG4Wg>j娘~TaJ hYZ*z\ȑZczԁNSm<)eP>ShY&%V[|V2j Ѳ"ZIjeeChey6D2Eҡ-K2BѲBW-%D˜_X^9HC'ei'eYS(eqR+smJiX>ᦥ evLRZ慸cu~ViXG_2ZOܔwȖ X.F|ZWX-F˼h}N hY1Z;4)e啐Dc{-R%e]e_f"-%e68I~2Zj$~s.i~穏zIiY'⦏2W>zTyݲntiό4ŴaW@bZ4`R1- TL2Rp/D'2@E1-5(OWӲn´a2-5*Αv6bZPLzFi#G:'miXٰy;@i3s*-/a*-2|8JiL~´[߷ ׇ2Ai#uۯTJ OԤ޸J]$ܐU<-E(g1ZjWo.x-=hC넏H/O"F -38qBD)'k-")y+Aw/J -H(oC) Z:D R:EH͓뜿4Zj0]k{.l!-~`sO>^FiM:7B!-ݥ)G -=i!L#Ҳ޸gb uht`# e=N9 -= y;RH6̡KR!-=iwJ|}4;׼Ba̻aˑ>FK1Z xA'f"ZYPDlV^}w[YeO:wZzh-2O*gHq&57B|Tz wcɰDh -sW߯h˷s.XGh'9W~t"@Y@K2Yf R:l!xJg7'Ǔ \:!R#Sg5:˼C2 ų㝷9|"ugƕ<kNe]t~:|VOtD٢||9/e ,eyFgnwIVBgMUEg?+LYvoS:-Į)e0y:H2۪,WtuUgY/D,_+R)Ge~҃H|Zgk.Ur6T>rT.a4-goa,̍ґd|X{P@zUeZzu-H؊ZI%QBE-s9\eh@ZDv ǒ+@2.~CJzZ|e2T$R|g,r,|~ZiBɩaJhB|UHDhZg -!Zz=!DK-{ԟҳjj*D-sɑO}my"t eN¨$G,&\8&76P,s0{%:|2ɨϢt(-:KEgU>A*:Pt:22ts[<_:<}t:O=*uYK`YLgϊ"QfȑYjʻc]2J̔)̞ CܶeK*}-,=_tДR|o;dF,̃ԓ,bujYՍmR:\mVeqH=Lcty!G,i,k(eggYjy~Y* o\X̞jr{&ޑ\%*|^ᓊ,1Z:RP2{O*F$_hY"S!ZDy2b]murdY% J*=1-= RTRN*=-UQ|Jϼ+H2 + -ˬ*H J -V2'TzKAZq|&5sAZP8BDZvsAZ*%5YJ6e7eiAZfөeTaF$HKg, -Eh?m)sqZb&в 2h\z= $`J9WOdO9'K?rhoW_H2M-F˜|q\LFK=>ɦ⊇h$ghi%&ѲDh!GeF8#Ք2{OhX=lO:Xs~|Jo_e"dJީy$+b(Z˨]BnB22'G^pQeэJhB˼uXhY-/:s'u'Ri~yAU_Չ.IJh4Q=)^I'Z:g`Z q= S c,sqQI ,1>oϲL 3\Zzm-l]fZ*$#ɤ[Շh:>KFhYbZ-Z&Bc2@{2OX-Raeb=H2N1ZgЖN-5F!Z\CBn BKUR|2[^X3Wol-eh=NeQ'>bBTl' IG!Zzy'$~j? -@]HO1J~guJ1YT`GiYև&y5 ujܨ1,϶@-˺N.ja@- j- ԲL jc tR*P׼r$G ~jYz@-*%ԗ!,2RԲ,:DjL =ʪU<|7,Ƕ{:+0Wʳr+íX4%BYZ\BqŌWRe!.m\ Op~B'BH; (brj+$JG|E\y}W(\@p$€+&R+ )_"+Χ\1J BfJ8VV0Z[ime]o$ފyQz+oo&{C=o(U{+LoR_+7Vf|^sq}HV[tV*Wlv,):9`+KZ%Mmc+Qke U kbX+wj/= NVol./4؊j+VQ[9Jsh+ X,bI}s+ 6,&ͭ")rr+$VA[iEFoRVzt VdV[aݲm#VlY& 2 fQp+[7[[9* ڊj+ͭP$b%܊ p+neqpV [,rf]ͭ<ڒ܊nEAnVv[aJ.V[H)p+ ɭ:meV_gj+ E[9O.܊OI6[V!bJ'b'BA_JQ&r+7OVb_:+B E·E8+NFpEi6]\aŕSq]kW% ew+f X "l+T+lq}ĕLqr&W WWObX@X.BBx%W4W^ As6WH[Z̕>se8;kܠ&+ +Js3+ޯ+7u+7z_+G&ݑPW,/u?va^ nٕ],E]!$uCYþ+WڄWZ\O+5+FւspWX8z]QwRώB]+D vWX#]dPܕ qW+ B؏H4PB:r+ax~WxyH@V+W -ݕ)pWsr5UL _ݕ: K+Tq X)rѹt3^q"Dr{C7R^yW4W4WF^Kp+R+gc,[-Bnq+aWZBL+&|+W4"WP R_+W<^9>W> f 2^ԕW=J׃(r=I[aꫡWg^9Z cNd޵2VW@nꮐ>S+++Th5B)ʩ"Bx@xX + Wt&WMg%QdU26k:\\V蕃I 1WJWXMi5++\yelYKjr-=[++Jt+>WȠڕWU^Q#^q6"Bx{^^qń^XzgJL^16'zezWzEUJz JlW$^BB,^92U^q"@my+'fԉ oCze,5Wl6rQW z}ɳ^顼rƻ M(++C%_q..)rChl/~ TN}_Q_WF@xW8'K,pW w9wWYs$J]wS+RQQE + bP@wvW]Wv 볐WWy%ys WD$W4W J^a}e9WWׄ^!\zyRWR:JjyWM++. =j+a+++yix+Š+y2WW7g0Ye(>+W y^u-(e*mD^2+&B`+2:+Cc+FuW>dWiDyEH6c+OiyLWȼW^50IԄ)J` kxR܆W3^p+W,^ixBۆWNyz%啃RKWX[^*srW{W^!3_ b)ʇDZsWV_cG+8[_U}aC#Wt!WR_e+g^jW%m+^Yp*%W}Lٲ !W,_agE꣕Wؕe +}s#|h~+qW|4pWTꮈ]®(24"0bJDؕc5aWV]<i޽–ʭϵB" m+!25ΑF[4ri#|+.IJQO u$ԕ>PWƞ9idc6hA+S+kt~+4pAWP]T]ium+ VW>TX!b J3 W^ٮ^2W.^TxJH]^wwC $ZxWͅWn ^>/ mwR-)ڰ+7#ؕj b=~+o~ kͮ\J¿ͮ.bʘgf ~++fˮ<I{]IvtvW]aiݕPfH]Aa +^ g+V΋4r<0+]a!62y8,QWR]Bku:jO K1XZ+9+VaWv4a'rX!6qWl8+9+Y>\t#\2Qs<5WHZ8 δr`ila٬#ʌ WЦ\WrH2s>\9\\!?ѕGGtEHFu^!tW踄W-5+l)Ҁ p ^ '鮌}+{r$J72D]/uv7dՕaԕ;WrZFŕ=-xWᵸB0V Z\q !-:\IrV+ +v$P/Ȯѕ/t98]FW v3{]EW:XJ!<BV-?tFW#+<+3X)&BtzЕte1ch抃6WjXB _+lJv^ȕrErBr?r"#r+;ʬA! ս+{-@€bLc~Mtӂ\1JM\\|$Wp%W+8rV\q `b?J%ĕA\q<+n3+Yjp 7aRJSW\y3Wed{+޾z+?ӟ[[([[\ g+4WZ\񉍹Bz%Z +QW\Q\\Y<~` 94W.6W&J#+" \qz 6W5W\aCɕ Hr%dފb{+ʞEx+YR[Ѣ[)[1,"@r+` ne, ? pE\pECpE Zpe|+{.-!ʞWXnqECqQ\:XA\i)+ɐoq2WWi(HY+R\iqa {e- 䊝jJ+@m KN+z+h9+7ʳ\ak&WFR HqE_qmWFB'I/reH&W$b W.'7>ɕ>Ò+ %WWtq%WFm\qhIcѕ"? M+w䊡f$ɕE\9t\QjrJ>++Yzosb8sx +>]5WƵ^\ 4W>MseqJ"? B&|+c5@t1{AhɕGA<,KcR\ŕqZqEOqZW]JmKqWnt:1o++᷸" {(9q+c[:pO+WpKoo\+؂+>WX[9@W } 9/ / 0s[)VlEMOl[ѐ[qj+„x`+߮ZbZqKOVZP V>J`+4򹓲*b^؊e$#Z+nZ+N^S>90Vr'B+"g+.пɕ}\JX EZN BBޣqhqŕ C\aŕCWȱQ\g\cnpApV,[Yx+hb2 VNW:[pW9qWb ٰ+>fW +(+n`-99+ X# [ XªPߦJ ފvz+J;Ax+۝VVERp$? \OZ>ͳ[pLpKIpu/9z5A!%npp#p0`w<}o2޵Y>S[~y$ F<|W#WHhpźWg6IqawW\ٰzWVY VW,Q\qK7V\akWEVV[q[;Z˜V[ [q+OpIW 2"<,(ppŜrBb|V!V%YoefxxB2 /o[ɅoeOx+O=,T魰ފ315P'[fn%d˽&h\Bpt31: 7HE?)M 7ԊcL6VZ Di 8SZ!V@f[Z@+7 R@OpV gΊثI=IVBpVɣEpVؖ YJYaCDf̊ O;+:+>tVYaRoxS%&@4guvVN6QY!pvVY947pVtVY]Ying̊b3+W!(12+q(+ZY ۟EY!Ve?B YwtYy?H6 +7o.w+@f,2<"+}ÀΩZ +}$lJ de@_Pb%]++hjͬ̊rΊ Qh?l ;.JF =ge9g;pV,J,. ẘ#1 fCdVZYrYq3tjfAYafVD=eVVYazaVc鬐N NЊ=ЊDJ[6@+v|/~!¦χЊԲЊ!bkB+mB+Ws-i `UYd-YynQ\ !B aZ g X.b$ڥb`WeAWY@YlUYѺAYϢPVVYt+n VJ-fBXXZcEAcE![ce.4 r%__C|D|By^YH]UUUPW~Wh*.諐Ғ g6bbkN (Y*U^_+P+@R+ZI?G#"or]K!R04jh' ٠-ņbXin̊Zy>LV!cj+VXom%KS`+th+ƂV ,j4Z[a6,b(HlZ[U[4w쉶2irV^&j+g t2[a-=K xͼgZVZZS+֊a|Z+Y6 mxj$2hEXh8ʘr?3 [5-MhC+̻@+ V, Y-oTZ!ɩ R+VİVz Q+OmҖVXQZy^H+"B+)Y\ﯳr J#@+\Gj@+$}ZzZ9VZZaZ᥵ڢ֊iZ+, j+젪r\IV[1DBZ[+ n VFZ5jKBފKbBȪSXfWd6VyhV ؊mb+nQ#I؊x)B V@[!GNkmWVЅZa0J~( aZI* 1ư "I’nS+ $qjlVt˅N Ԋ)R+ J}8Ҋ J+fb+hi% Bʊivʊ*+_֚YyY2gYZtVgˤG% 6ͬ4H'aVXof2+ˬX'/ ʻIYA0YY0+Ȭ1 !*+;22+kdVHYqaN~'⬐g2+}aVȮhfh¬Sft.ˬX$O:+>qV0uV0+;JfVXhfŔ~KdVPUfrYY(D*tV(Y/{fVܿXfJSͬ?0+lfBY PY+eVVXdhe.SeŬaゾ@YfHe嫧Bc++d*+WY(oejAeEAeEBeEfŬbV-ʓY  +Qfe XyW5VhAS+4VXjciwX- ]$a2XX1d%;^Xoc:\bXubK+T i<21VGcl.JX bs1õA1VjXіXvX u ĊJX!w qZ#ŬXw2ߘ*cMe=QUVƢP㫲xowȢke;ee{3nY^ dV,Y>f9Z3+ 춳RRg0$:+ɬ|Ya_rc}?Xjobڞ&VNB\B8Rkb ѹ&Vc% pbLp+}@XA8X\+b +vvu.2Jh)eZ:Pq`!VF#B,GB-I+#%ShVca +,jYncłRv0VX9\+nbDCcK]c^Jc?6d .J8WW#y^CUWU' Q'ysͫ0h^s^@-ҲVyU `rry$f u +v+`*W*{,UX$JK0+r2+*dVrଐʻYپ8+.;ꬰ7⬼|:ΊK:+EBr;+[}ZyxC+ȼ@+zfB+5YavVCVJ-I \ 98+g)qV\\mgEDhԜZSlhZiTCi]-s%AZq [j1 +M`8*VZaVj8'Z!WV\ okeB}S+$V\Zqt gR+nk#D}mR+R+@ZѺljgXjEvOjLjEJjN[k{ra8Z1J zZ+JX+d"YQ+V"0- 9~R++ jS+c%J%~Y}ZoX &M 5}Vr7h Kh=uVn?:T\X_Z! QC+& iЊB+-o8#Bʗ,2p+B;+ߔ lhmnFB+tgB+9BJyV,Z@hl ,G5̺nTZa`2|QRP\Tp^@+V k t#)&YRgqNffV`YY*fELfŪ7Y-̊2+60+d2+Y7/ Yxg EYa^Yyn,8+#[Ί[㬰;ꬠ$^HgEwNgMeuV4g*pVYvV(qY!v!mhgJ@+-ޥ8ΊJ {謌`=Ke[ iUK+ X;B"H \*CH+>V#PZWiVؕVCkZq]tΛZQhj;Tj݆V.mMP+R+>V r~6P+0VȐY Ԋ`ԊUR+4BȠ싸H+J+K- hK)QK+,?RZi$ҊE-PBvcK+c%ҊC}B"|H+c*xJV0ZZeJVAVRU "dVtV bԊR[+'֊!}7Vj]֊Q[+V2VI:=y4r'TP+61 K7M6Z᭵b) m,HJoVVHiktVڔ[q" kEWknkQ֊[q"2rۈ܊`+`+VDVf[q'Ҕ؊3V 5)R7\!)JV\Yhm , rڊڊ~Vk[acVV~[釆 qx+nފJSx+dBrg{+4ފP@y+z+Kͭhl7BtVA7\UnŘ^s+p+}í^ӈ+ۊ+c+d5r=霚\9 \pəJ7+8HpJvs]Bz+l[v)$>B؊ahb+#/JW~%X; ;5BQfc+EVv|h0Ǣj+ͭɭW"VBZ]k+<_[[1"Φ `o"Ak+J4h+}V)-Jr7Zi&ԊVgS+R+NޚZa"B]iEGiiePY珴2ni޺*M?yAZyZZaZ0J+NVo8F,spncKK???F2__: ݿ2-c 㵙pRLت"i Wx -o5LXR!Jv}cGijO#-ȹb5ʝhN"FX.*<[G*WO-s-J6)o#;"Jw 2_eBo3c[ieqBw j-s٩mVe1[*009ZZH-cQ RA|ϊ2¯TPyij 2.-#hٞG*<@qgEhht-(.3H?B(R1qޘ-o>L(BҀZ{#5QZcTHy 2UZ4@X;CK2ZSZoȖ "B_"wNDhx&B'r>k2CTLy,m\*(0>2PZ=S#HX; VHx/AZƂ#Uw@ZTTy|V(-˧Gi?l2^2P띣2Eqy2-#rDi]kl(-?Lh&L˸پ?LP2iSrZƢ8, + 2 *[92]y(- TXy__UX aZƯ<2z+]i\ܹ%`Zf@5F]e/K'J6UiM1-֥c ǥ4=*Ѳ>h.R]4F|^5eU$FV)]iWeevnj 2=z,E""iKi-Q eGh?2Z9Z{| (ZY* <˸#Pg2:2x;U"!AZJ#! -fe킴4rvh1AZFS2'lK!-tJHs)o{\RB1+!J KiW`O_9PHKw -#Xg^:`HKi'捜~ 22AZ_+ҲxU/򧑖~,e|l˖F'2iU>蟣j?+EsjGpY> %R˸E4R|dŸq}b$R+PKҁZƍ RPs 2uщw)e\w^&݋wNx܅`9aZFt9bQqVri#i%ui}!,_8RxxħR'فSR8){Iedv 2>)_8T@I-H-c&Rx厏2\ėRz@jGާԲ#!~^;@jY2s)e;7Z:eL>jcp@-=j]@-cd"PKOZFP\ke\lGZz 2'+P7Bj_NܑNx7ZJ9-nx23 jfYT>egT2+rIq˔e<2Si`8Nxn?Nxi 8->wBMrZ|f´t`eN´e9/aZpf9qZƤ!i7tbZ$9Wz1-`)ce8-s4X;N,M1qmte7㴌h K9-K,~bZ: ҝ1L\ I,Ŵt/PtetibZflګQӲ%2|eŏ,@1 PKGZ!'_HZ9ұQT28Z:ti@-2*KY~&Px; ˄Ze.XdmcGji5RKOZFP-72n6T' $TVjCسzBUgP-Zz&ԲH-0aSYY_0RKԐZ~DjH-=FjẸ,{gn#te<5rZ&P j}ʑ(ose%%)8-.$Leia4E}NuZ>OEN˘:Le7iw'')LqZe>`sqlo̱GO*8-399(-2fkJiWqН(-n(q@YM4i1PL˜׿j'O(-^2-#$RV0-eZƽ0-#5_,ŴΒiO:PJh#o\JҲ񧕖卣8xSiSH.WSN}3Ye\G jqqU:!P7q`.)/O@-9qRNxLUZ:NHר^ItZ˴$LK:-MtZ\i1ܡ2`:-V͵4(12{e?r&{2*s8iÙϏ2nO8-W-#7@ j1F5B-7EiH-qZFoi1 eܩw^QLriwBlF# iqʛiٞ*Lq31eƠ@1-aZ| JhO#[PZ5%J˜ց; !HqRQ{ؖ4M#bYHE 2Ư!zBlY;y -㌇b>H4@rDZLiq|#b`LH{ϐ,izi"-W4ZoSŤ#'4ZFוb Le㸔2zLK? ´?i,鴸Ӳ]7HS uZDeyL_,+eT2z8-ḟA}xy]A RL#9SdZ\iq eibZF 20-㥹L´lie6Wdșu PZ&)e ´:aZ۟ey.´lweZ\;iӣ:qZ%봘='wZƥu9>rk[/RqPԲ``g?,@h13OeP+@;9Ə2:4$gHly~g1,2m³P!2NP)eK,ϯ2Ϗ2ʊbWe,=h1Ge УhqMu0qhHH(@ -͛ҟG-ʉE-. t/ 2~f-ˍe;w-rFM;BP9]~!ev"'$U~! |Z2Lt-chrqZzв&B˜9eZhf]-=h1Mż<1e>x1Zl J-iu[S_pOR=LK%ZƉ yeC.kh1Ne ԁԟO*Z:ZRƙtN},-s#,E9{e|zBxyb?6:KRY5k5:˸^X, 䂳+b%8 Q,vKg1YeVegxyMiY,?g&_%8˵,gO/t gʣw\e&gQʑ#>2Lg*vgG|,sg$>,yI,xg1XeM|s'MZVygY:Ć2:0*{*SZZ bɹ@˜w Bڪ>K,.W̌:P:T,sY gT,ֻteYz8XtrjEt壾 L2~RNIz6<%',.˳t8e\,f1źC}{eO—%>,}/ .~DxóŴ,}Ke4?|92j#gq%yYkó{:xqrU*,"2>7"΁7 _:-fJ HS8eO>KYfJ܉K?Yz, \y >KY Y,g>|.,=bg!>KYNg'k/(,bM%DKOlB-Z:bB8Uu"!UDV̧[9o܊%"!LbM/᭭BtFmE le;u2> ֊UWX+Sf5rNu–p+F[^hnYnAVajn.n"r+7r+;[­tj1ʜl[* wneV68ɭHGɭL6@/ yC\VC\1Kteri'<'te -6]YЕʋrh0<\5W\!W. F@>'aՉ+-.i:p$Br*1oCȕc\iLIryrI q&WXmr%&W^*"$W\y$W86& VxWm+\AE\a: ,ʄ@KVfW5SQ,~tts+5O[ymp+[=OVF,EJF[񗗶HCLm\E[V[aCmeXW}rV,nmEmX +Vᭌ̚:nH­μ3V[Y!J3m+6, Wx+Xmb ̾+_ĕ("t?ʬRxՈte+++]qx4AWbKtE-Ktw|ALl_t庐'@WD\!1p'-zpݑ*WH3Wf&G&~>b*W*+εW\"lE*)8yG\q 'JW lGa‡.1W$0W+D%WK8W\Hɕo#M5B,rRn,-!Qu+T-+Xr!W`rRWH6hqŀJ+KWq+[@\*WD WLmqQ%ĕWR z+vx+c"JV[aIV\1x2oD\1mUqZ\ajmqeKԿ \flpj-W$Bbu0*⭴H5D‚xVNx[킷b o nE䵹(>[yw~8ʍtBŌ 8[y74_AZlO4+'ɴŕOq/⊈1Jw+P͈+a-$;lxjpGpE_ p&2* 2z88+ܭv+֪*< Tp0+p+ 2+Wh V\XU[T[q-[VVMV+}!ʘ滀/"2 Y[6[yW؊J^V4VF*l%t! 2.zP+jP+Zje<|V+ mlE_lŰ؊11H[s[[1Gma4h4 P 5 C+B fb+ɖV\!mlp^H`+%lqk>2ENI٣ց3KWjâR+*R+VӒZZ3jeɁH+߄"'VKVfĦ~eV4heA{V΍Z9ntgBH;+AgYq"gel8Y?|8+fe\)`VDdV5:dV 4LY^VJY Gn© ɬ0SYل/̀o^iz2Tnd0 hz,Ȋ?WAVYF{K|Cg+c~+^X=V}Md3DVAY)ϢF4r|}P|#ìXR)j3+_0+?HYQ&YNWg:EpV̊ĥ̊R0+cmYB¬9|3+ìhY+_&J 6\G4V"H׈2(0V0VFy2+oEX!TcedTb4Ȋ/ʸ%8Rzj!7-$IeW#JO^EV!'*DV'Yɼ-'id g"+ Ȋ x|@VX2mdRDV>AVz/&~On +EVZ]YZgOgYi eZYa ݲJY!|@VYdce<+1Vhv Xh5+IawD+Ic#*"+/dI+]c9++mkX 6V 5VXYq)gYid%rg5]cE:&6Xfg^Þԑ6"S8yI +WABV>"JBC`EUa\`a U+wGXQXWNvc޴J5VX+?J@4VGk'+l6\JQVz\;(+#q,MG[}0+w0+`VYbd/ЊZ~6ƒ^hş96YwYiK'Ja:)f0b ge@2 P:+`(⬴r2YyKmU:2z@94AMI&x皲*XKQşɡBSQUدAQeETv*"#,*ƵU""օbz*Ί0UTqK6UQE*Ef"x=AT ʢt4Pe52'PG@UQ S!ݜ [ٜ ۨ-TF/8Y( {gtPO%8TBLzI( T'Rqb \Z*]}¶mRIaʍ|ٖ p[*; <1 01JImlZ*RZK&R9oKC'P*/Rqe)RR*bZ*њRJIT/_.A !xH=TRaJHL%3ZRPR韍RgrbJLrT+A,ST:ʸl>+23񂴔楧"RJ| S@U_PUHaTٞ@~;DTٷtLbԦ bQLLTfmSkb(jצ*v*5U(mS] T1F& *}$߭Mg*w&URעJHTiR қTaO&UrTk-*!QTqb1"%41I9>&!RefwTqUReDAPTy>$JuADTzW JK=z*<UnȪU:,mPeN*T!^2&UfQ9UڒWTyuX$-ӏQe?r9SUƥt䍯sgQ@REMRE˵IMIYBL_,KHCR*Wkn S%{L^97*h\H*֨6 ȇsQ+Ѭ } V)** Uaw6UFe*[0UPU1P[)BcᏦ\H5UGhoS a*h#* w*2 oI!U%U9I5b %J']@XpפE.I*-|r/b/r}{zP\l"HUe9U*)hTEWU(J'ЩXrR㩪Bw[_I{cүɪX*H(bJ)_,YiU% *͉xHXV,ʪU9, \wjWR\^9 qU#媌 \(qUF7EI(*{ݟWe5XECa1xWULV3U .'XU#o*}<G* 2?WlP# FX?* k-d+T`*Q~h3*LV Z=U o[E [,mJl3XkIXstIe*nP.bIJêؓVQXD֭ZTɸEkoU>;>DH!UX,bʝUzGFDET(PʦB慢-CcPᨨbFU*>D|͝ +ʨrx**]dsAQjDcTq{_EDTURVRe[V"\ӑR8DͩWmO =z* Kq*! bup* zRi|r$AU49zTx(詰J#z*P @~0 PeLr%Z b_PQ*=*dTQWkPB@TQikPyTN@F t*ėPL{AiQ.U(P|֢ ;Y*tm\D@P==UTK 0jP<, M*xATLUSkIOk*OcͨxUDSgmL]0SYp0kOTn/0[EL'x0Yb*XSASRTNNV`*N̑`*v`*,NHgd>=LRI)KEIJŊ)WJGJp% TشIeURQmIűCIJ|ԖTU@ILI%TJIa%#P(b Z[UrmH*lђ:4  ?JHe\%ITIT3~s*2 T9+ɿr* NS&Nhbs*]p*8~9/8Nj*ۋMT4aJSnwHjt n, C4JJa )}J*TՒJ*\H*m!SUI/5$-$w]hIŪT%=L$ P#{ ''eB*RRavT hX5 X㉤ƃJSIP*ͽ*"L;%1I+[RQ2lȐT|8%nP*JEkITR2QRRRYjGRfQ3ʭxIe1DRq'A%MR@PJA(W8?BSYI/GRqE:(JTܻHMe?ت4H)6iJRy7R&\%b*`*.=5ҟb=,4:b*SSg}KEKT+Ta$]UX*PRwǏ"yUP*ҭ ~R ?S|8VH'Rq}?K9ʘ~{l "ߔ\yQ*.IRqU)R鰬2 P*R*NT4BJe'ҁl>ږ zmڪұTo¨^NE[NtSY<@US$N©Xs*rp*=S門SSq)T6Sa?TF9AB9r*ב'=/ҟ9vSa59Ǐp*l^tɝSu}eU+BdPQoDEӈfJ_Lj*G% r*ԋr {*}]*t@̳NP/* V*U\lP6LP%b9UPzA:Uس@PDTTxj,JLTq6Uƴ$ZIf$UOCP2֤C}Iq4IO$Ub%Ut%U$U$4U\4lSGMN,1U{VMzRMT1^m,Tٲ!Jǿ0Uf^ Q3M}MlMTqSMMiM=0M}CL/ʅbu[*)hR0U>N=HTTAmS% mf*B'*b#{k-mTUQU &*&at*isJz'hRTe)eOoc 5mSw TUqfجJ*wқtvr*굫PbQ6J$SWaNl_yMFEL?qwWa U*nݷ* %Mp "%u*HtQZ]ec_>tU \e#TʼnU r>r+W U;?Nx* WqQpdIcTo[e{[ťU4mVQ*Z쮹*]bꎸω|(pWyHW+zOpMpBP%۴WEQl2!q'tժ۹W*UnM lm02 3h ʜ5w' J_K*w)"ضJ7UVOHeqIy <[VaMZZefXԁ`d N b⺰ m*S OFmoBׅ *wk+q1ULWa Bt^U^^5G:4GZ]u0UzTuwWt'&¾*cn*}*-`~ {-'D[vlѼ10U|nj$iEE!N|ȯdo k*l{R`UVqɦUԴ*#! RZJ*i.+i_U -Ztoi7S}\U~ 0VX7mVa7a"/Xy/St2*c7N%Xewt,XY*W'Ci ^*+J2smVm[EeA[}UƸ}UV؈'`e,[e<`qb*xn?JٵUm[BV5*кU* ¦8X*y*AXMbbU*nN*\*n*m:}塯~F < $VZXiMA`$ވ`E _ W9Ҿ Z*wbAMJl\+5AX[ ԁ"V,Y bߟbeDA[`++Bt++UVk[e et+@VZY YkQK`Vn1 fe9geJtVM,EtV4,qV+YFÉR+Ҋ >vZaZage@pVFQć/, ΊΊ!:+UCV lSZaEZN"Xg*PK+'ҊXJV&VD"tiZZ!kVb*4ၴ:<ҊMw뭴bҊy5OPZYxP+KM @4 6¼Ok1֊ofV,U[yuV,hKCH kBOgh+X[Q[q&~ʕVZ ͭ`ʭr+wp+VɭH{ɭn O D[@PP+R+MEZ">DZe欑VʔV"T={VlH+ci3Ҋn%B"Prg VVRЊ($m7 ЊzCK+; i <(J=P+|;J+J+VHqolE~[[aw[#[9X|il%bkdgH+f+9"XP+tdR+>sV[Q6j+ˑh+V3meZVP̯bbck+Uh+8"Q'b+4b$ l`V[1 le8 [Y(]acmE+rW+4+u+] һ6 !r+T( tJ,b E+3_ɕZVK#\1CM\a,2WzQqŐ JM%{6. (p+e+݁)PpE:Zp>AEW]^<W1ofd7sQpEUpWWصCpVz~bhHz+Ux+n JBFn[ VFQn>nEne,ŭU{+VzoS[ybAoUx+#vmފ Qx+JvOnkq]V\SZZp֊AxvX+#k!ޏAkܶVt1VZKk[[KGJ?ŤV^=MIB'd'"} 2ekF\lYԊțԊ4Ԋ% ¤C>.???F&w_2{k.N>>g.pqZQ~GAiGPrZWe'=(E1-jQL> Ӳ͑9GUUWiMsϺ.3.;-K)-g>Rf\(eE`J̟}TYFT`|L랍1e}7VkE+3$; W>> /e~OZ{Eim³">xȝ ,⯾˳9,H YsrϲcKzYQ]>oHRd|}QggqTv?gGY۵,-"2 زU/̓8Yt?,TS:> /e&F,x2Oq_gG%hNh,nCS<> ?C1>ON_x}(gG_%bg|<>zUgG[?=< ųeų)TdB,"U>+C^x}oQ_³ՈʧnAׁ9̘2>ty(e_ՑG4{[~wtɧMKg'4G,ҍdR:K5X}YQi>f,)e~'ʧ!:x-Gqq-2 Y~Qe>a|cvj,(c…g]7Y>W5e6WZyGI hY-;Oeޠwn!Zg+2[!}׼;E+e~vtZfN+e6gZVB|kZku0[f`yhY: -w^HK= sdfWyVS7usRㅸ-G*eo6)N ӲIy?7B1-{V+2qQiG-'Gj0-H]'aZjiRL>߀*{5v52GX)izN˼.0n&l !N˼٪SV?DBi?쳌φQL|^䨓tqZ@'̮"zF9-ȏ2&H޳}\΢Zo[ыjYď%ǤZf[ģ^\FϡZ-ϑ{Tkc#1v*+eVey ȑ+Uى㹼DZۡ}@CV{+2oU9"]R>B%TH-k»|")oTLC[~ZYC}+/ߺdG Un7e+ ̛f 2 WFyC_*e^au k-5#Qn*=Ka-s}"ފkY/Zf Ve kCv)XaWA+G5!PVK}:pUo E,ьP-=#h2;N`|S(.ҕjgo NeySmR-2Zf9W;2Gl5(e~r8Zf5)e^iZ,X(eva7 k̿ GQXxkX R^khg9fTV[l_Z U /OE/Px-v?!Zuk p\{7W} ew@ڡ9YI;ֲ_v(e k˞פ% NkR[Z:[Z*ֲt?Z*e^AsJkY^i-K'ZmsU3?h-s\́BSZ>:\֥KXZK^]/lboO}u`&/e8rpY-WKղb&Ffy*ּ}R j7лZ-e j#-׿r˜T<}<0)4:P-˽:p.D_0ZfhN^w2A7jYwbˢ5Z^9}kWf?*evm$JkYZfQշb}ݔ2QSMd(^K w2H#kY&Z8Qy-K2^/e^O~CGkgZ批kZOܗ␣"-3G 2}w{z-s֡fP^KobykYZ5/y ޡX>H]sTD%ersqZ[3\K]Z8WEE `B̫ڲe%$RzEjOZag90m%eyDj͗}L+) R˼b2OJjC~Le4Rrb:6EA-sj+}fEm΄[9-󓢈e8( PKGuZW!O%j([ *r9sd#nɠ[1-K忟"´sMaQe.PVCݢ2x%߈rV(=N˼2>,5Wwy V}sǣaZfsƬ\U ?´8LKswWfO* 0-o_TŹ5re k5*n0-s ;n 2n]9-Xi g9-]WLY(-|G+)(-AZjXW^U7'B/'2;BRk->9LW9? \FK%;Z1|I-ek2^h򡲠gEh˝xgyRq -h?*F-l21Heaޜ]ⳬ_xOSK/0A=-p|,˝Z*-?9ڷ- ho^ e6+ h<+u^Y\?8-?F`܂[ń{#e)Qd232≭E-3hyn/3pyGA6>s栺PIJ @o4қ_hyOh @K_!oŕ/>DYGô#,#[0ceRoe-ϭ"?cl7H?TmY,[lc>gvk3-L52*0-󸙖J LV/iHn|;K|?PAZHKp6RHs.T6ydӝFZgh5 />7SKYhe -ύGtFZ~mmA߮ fAZz/p>VFZWheRr@Z6˕S-/Fk29D-N?jm=.Ş|z|-ϲ`S2ˍ7gpv VZ ?,JOr=>)Ziy{sQ=ζJu7,?+{ے=}`Z%yle|fZ|fZs0'y;L$;hiyy/IiZ|DiyƊeʹ-nZ -H[-݁$ԟqZ&Nsʲ'Wʹt!K?,QZ %QoeN Z+(u#G-[%;bi hy 1Zmtc?&].gWJ2-c'}` Fr%2*C !ZA.O-M 22-IW'F>hyNla"s v,!Z _<Z83 |(P'*Bk]eTZfkZz¥˯#< 0;=h\,<@3X76Ž,95L@sX~h1e# Z&e !Z^7-~%W|CL<4Bw"J DԒ@̸5Fhy(Il#<3ZȖ$ ׽TDOf/DKaɡ_P%-Lh @< -^2b-ύ*߼g tkIa]>=#$|LI?hy-6LL@3Y=S{E@k!BKY@5 2,SׇZgy3ϯg>Ο8<Ԏtm;WL>so$}DrɈhy9 lY `Z+!CV-ъMn(!Y:}>Lɣ"Y6t) Bgy-FL :ˌ5Y^Y^Z:)nyNOK]">kC%+Y^crp'h 2uU,eYc<|=ۂ8F]-6@ϳbL*=6dNal*,flCYI,cnl|&6k:6kZfy͝bTmbRYe+e ."AeR+)+KVΫ r XVN`Jĕ@X!n6ʶ%Obe\ c}^bEeeOcibF=TĿV,Z9|ЊB+s(VLlZNp) QVV*AFZ!7 i"#i͡V4V\+LJVÄYԛGΊ:+Ty9+TBI} g>ЊB+d J]AT^wV]h%kVuZ{A+w<@+fs bŃ4 6̊ykofesr(rQzN+\tV{Y]wΔ:+f:d< VׁVFpZ1^h@++ B++3tf#uVX9( +X+ "$bmʜ!VXB-tKXaf +" +JG +V)XOr~9+2 +BXqFu1ʘ+Я?+ wXbŌ~-AceפX,Ns6V]c\c`2̊0+}aVRңrn)D)6Y Y!fmʙ~/fَéBcaV Jv_GYD AVdDVdE`@V\VYqrVYɞ +gY!jDVd r< T*مaVHaRV43V^ T*8QX #f X`e;3aXIWW9  o`~+}{+2+.V1V9`1V`lV@XwM`Ū0׫++g.1 `JvJ_XWYKUUұHxhxUUWNxLW|U*;UEUVzxuUU*U,WP"(ro) FX)s")דX (,7#$V;%V,mX9m V>B=+Z$VRJjOVVҥ`}XaEa,,Xٟ`e*%yK`ԕVBXe`eE\XB`ŪeX.E`) 1VGXQaŊ1 VvV>ae<k7SaE$RaVq;JI>NqIqu #H)̵";0Šʝ`e+VyN>}VBX{+G¬G`%E{*V*뫐6*S*U"$rR"zN|1XU*YZc^glHV2VqA,bha*FU0V<[${*;y*VUijjlom:}Uޏ$^;[rJV0.UOEW/**L^e9*ë5cx/6*gU0)UZũbi-'mUk]EO]η)]ő_\e9*dЊ$"&r`p !\i*;DqaPqD%VJV#*3[(UK`|VKW4Aq Ry,-`gR9? : 8K^JrIKy*DZtUHZWlKi\OUSWeUqaBWqUحWy)~\3J3`sM BJV  kr0J3*: *H*Ԙr UoUaJ,V 6!b* *bU$UF݈B$D[{ 'c#VjZU =iV9 K{*LU2ZXVR&R UՠU4d2WK;Ip}Q\e=UUR[U,CVq/"""Y)WW*Zk*]*^`trFWQQWqY"B/U\ҫfU^ǂsttz*XU%˫,!U${Uؔ^ ëW!%h|q&}+}+hn͒@`e*B'Vh).9=p |Vsi"'+d "l*BB+#)ʐXaNah y#ay(BV X7PXBX#Սhr&B+ +N++Ox*WXdyV܋TXa +zD+ԑ+VcP6Ɗ1Vzw++މ5VHcŨ+kF^]+$uPkweVHHK |أZh5" 8+ dSfsg:Ί ΊS,MgŎ:+A\:+do 2V JM8Z QWϙWu>?Ǽ:KdVT +ו!G@V蹡R_'` 7QVlbu(+,TY1EFen*+DGYaARVY9tW oIDY9IAYCbʊ*+Z*+pʣ2*ʊTV,PYAʊUV6UVeGN{!3HŊQY%Qa3UVTYS qVfgf79)qVF"Ya5AΊctxaV#f +Wd:s[F{v4V^XMĊ٦*+6SYYPV dGYje.NxA,%v+b+ԓy+7El + 7 T(bʕnl)8W[*/ %+VrGX&ŠŠ#Š=>V#X(b 0FXa~*KJesVl .b?EZ*V`RAXB|T Bk}qV_Ū}U, W*8:@TydQ2ʩ5W#_$Y_*W!VV+0uCI:5 `}sWa|T xV_(*d5˫1 2jOvGX9alV`vS`{ ^+޴VX 4)G5h VWQ_Eh_śeV/rz$45ʕ&*sa*U9LUAUfUTBU3UeªCdUʪxUI*먪X‹R56M ը4NVGYkAU!'tX eϕQQYŒ:e]et ")24 +ma$V|3\;Xp VYe :dAU4ʋtw;*-BǚUOb%*i9:*gʱ XmB**^*4RVqI JO³@ooR+U$VY@IU(tXeJVdJUDFVIXEUXe0d*J**#UbzbOsaX{XA* * v*ץ2v̔*rC8*ԮJ .^?JQUUU: 薪$?ʊr,J=W*cgTUU@UPsTWUgQUߪʪ}bq]}%]YBGWeUY?.qU"˪`XUaaXc&|QV9^y |*1SrBVWeUUqNVW E/VUU Uq 2 QUMvTq*΁TU`WBC@1ŗTmH쏌2?-J TaUh=鵋*E&BA΀*..U P4AP#1JN==5;8詬x*#Si T1J¼-THNrTT(dN8a-TDwT,SQS9N@J&'a@Å*53#Xr98*FU=AG}A*8*DQE@QsMοT99 b\RN%#GS`"S'TN@9SqS|(qDT ȩX?B Je^ASY[r @)Sa ݦ=Qp*?N`%EVNuT O x#0GEO%9m<o_0SaCw<7=-=Sx*x*|GKOTSQSq$S1'Ba p*Pz*cUx* |vʐ5x*1=_S2 NéܨRp*/N<'uVu8Dr*T ɩS*r*br*4O% zcTTPb*H Mǿ0{1TtT+a 0c1[! ̹̉RTIzTԇTT S$LEL{@؀ RSMVS12 ģ,wX9[9919$"v%Bp*&r*΍TV8t éP6"B9<Cz*IOŨJoL%?& lZ*+Q-^ -O:5 uiLBTza*.KϭS錩bNj*pR4v4.Tl# bB`* rRK$0l MRK'BqZ*Ya\PJDž:e<IL%"Bg$)b=H*ՍBJ/%`בT,IJT(HVRH*4TRaZTOb1 E|J*=gʹcЯRnU=qTQmw[QHr#**bn%F0*őQj$zkXpTB $u:*4H !ST@H@**8B*E}B*VThɧbαT6T@S<ɪB@%KCdJ;EZ*Rag,@ݱT>)%X*~kkTKEUK$ȱTn~,jpKŲ$-`eb#V-/K%cغQKEGJCJP*hɆF`*IS?@( )80 `*vya*T6+ro;San0TTb{w,c܍béCO.épTmB^SI⮜? Sy)Weop*r*4NɝTHeBpEOnt8x9 S!&=JZ*%'X*\Z*R&rXBK%`.TT$TДS!p< yUUB7Uz +0U쯿DOTTgR:I5Jֵ7xIA8ATUEIUĨ*wP/#w-PFUUTe UV}Pvaq 7ID :庒*}ɡTR=!Ut$U\HXO*!b'?$ʱ$0< *$jJ3>qDK* &*F*VU Pޢ? 1Th8 9+Th@4 $=gd[h<o*z*+z*VasˌFO%r*pS!gg<vHSٹ+ROx*8Bx*TT ޜJT<Tz(5'rP)bѴB rR$r쉛D!Rq0RѼRW0(y') ȩwE%DJ*H̼("R*tRsJ:B)cp1*F|(FH*SbtKIECWI{ -ȆR.RGJ囄TZZIT;ʽT> JA8z$ʽ%b>ᯃY^K^X*]7T,)֥TGBlP*ƷTPÇR!n( BԡTT|S*0)DQCT#TCTN(NR*TN(u R*Ð@X},r@I P*׮Ҕʼ =8R͖RaeB}H*J*T(=F1F4⠫8*4T$RI@*T †@*T.ƁT }%yT31 !yBX*Tv -Z*tzKT=T(SyB1Yh슩 YԪ5SSh*c3dv4Rj*h*uTR%3(??H\`*#G MEZMŅzSQSq&0oLO _bML}1L%Z*ߴTjhJŜTH<Je(9%T:)U,J`P*(: QJ:p)Ol)o/'2 TV$ >RqR!z 'B*'m!OHe #!4qT8CaTAS¨VQqJF,qTHG%8* 2R<HʵrRQ9#騜8*ׁ"rBQ!2Q)MI吉BRuH*,YTLWRQRR; CP*'JT5H]RYi>#J UBR,RKЍJeUT1RYW7ag9 8WL45h*;j*Mj*n\Bh*T8ߜRp*,TvTȩDS!x4R^ 1,5U75ף\ɋ\K5j*eMj*Z8`*IKū J'BR\)1q}-T$۱TR5 CLR!nr{ 8)\ #8A4ʡ= IEOIroVR-T֔RK DR*,=FM[\KDF-KT Kel -j.T/K2%-1'5 5)Z*wR֥T_HLR*&J`"bg)|T$ SLeNILb*l r(r;ʦyMe~46GS/B|4TT4wCS>b4u5 Tbʐ0W)bաx;x*Tɩ1Th4&baʩp*X"nS_KZ,-TSa0 =SL%gc$<{%rRAuQyQz= (*Pu!. QQQ2CDE;Ce % *:%TNG5Tӏr'`5?-`YqkTpP9Q8S%ͧăO!q S)ROErSh-bzӃ7$ xvO7<S"SDFODBO)i~1|SS>wnr)v=%y*G2GBQ^ʜk*P9h= I**~PPN(3X *Q!>gkQ;ʹ`Q *D9 *~ B݉ LLDJ<]BF8*ΐTPRA2H@*gEc [ׅTH $Ge㣽GŢ-8qT2~a਌b[DItTjuT2}ߎJ:*E2*0èQOC$Jmy{R{?qTG6^HZWHTlT UR{Jm{U0泌RWW-$1FT0TH`IBn%:+X^H**P-zBC* HV[ Y8m@*w&!]HTRR+ 9 eG6ob_!rRp{  T6 = 0R8*Y2N P:*劣GŒ4!\TO+!= SRq.RH%QETQ)-"L "#BD}ATBQd**i9ʪ!]TB% c$T*0KaBɍJCP?PGゆ )*Vʘ*r*TɨrHQ~9* :*U_GCH"*$ C}VCePao *JZkcSSS^=ȅz FOqƉaO4NB) );Z[2v jN9xTOєO3<)B)킜&)#!ShSGNR:SSSINn:Cz `/=z -FO_)Vꧬ")S֌))SlGɧYB[S+>&r/>|ʽReE) {)t?E7Y?AW?~ Ə<}S"S ȧ8k>eM*c@'Js0TN+gus TCMq@U2GP2yWP&2Ž sV'APQΓPq+#8 ?#e> ⧐7~ʹrb>,~ -0OqOY=)i2|Z ZX>=QL)aSp.L@B=Yz ?V`))5<(#xʰ1B9 OIX;̞S|N!rm).v .JC)$J#;P;p1vLٰSDNoN`N<%t EC:s)'3ډBcSnF^{3'_|&Ӓ| I㧐SSOqO$O!wTxP1TBkW=J::GNۡ@{Ó\̨TNOunT5c~St S?eCOqNr^~JOt# Mq)OFz գ(WرC=ՄzE AO=(z hFOq^=EgnAO)P*.7T%Q  *|Bѯln[թRPSAeE):G_5ҸZuXq?W.|_|OπWjly+&iCͯï,_ҒWj)I$1ڠu@e R<6D#ɵ$jII8&[`+M`r׭aS:0PDgܔ&9z^W5I^ /W+-yA+y^Xqu}e$JmF_&Wj+'Wj!=~uu2P_ 3t6L4NB_yvt0x+Fj'\ {^ԡJ2~v?yQ#OB'6s]a'2⯬+.6RA~+m /J R[j௬/p ϩ `@X-R1nz ,u !YY%GJ8 RcK+{ا J0ÈR'W>Lqc3D_]]PW `sGzQ,J%˺RRE7<:#tHy[_:/r w6_<$ J%|BtDy>W+u4J 1hZ^uk䕚g R'y+u_ 23SZTiw;6Kg݈2S/ܕ]y&>WVZmh#*zATJe:za`B]! RW*HJ LuW\ ԋl^2Js?Qr.A蕪gizƹyF_5&JR}qIim/LD}fPJ)z>epD_>P+lW*FZ>AS_Bh+kEu±4W+Dci{e0^YnV>+5Z͉R%i{~*`,qzŰ%sGkZ^ʬWj!I^]WfxԤG1u׬ۡW*Q#JT镧$lyٽD^#D蕚aU\S\W8&/jye+C^ym"< (-by4RW+'0+5_yoTWf{O=G=-W^gN앚y;;+yB^&JH+mL{=aP (J K WU+$J{Ry$Wf}qy* ^|e?+[-P RF}ՃT^+i_~e ïs'*BRu.+KO :8Wj=S%J/~S<5_W*ï0K/J̄_q~rlgR%W9<34WVW2XfU R?'lK,yO,5ȜyXS7K]# ``Y>XjtERdDO_ JFRӴ'J2ʄoXe骚Xf_e6 X֍}JC/F(2!Xjm}`K p Rivf|X*{T)N,5'Gja'd6K]F9\X&Rk~,'',!Xj?'z D`*^@ ̳&X*zvɑ*,ԋ?3 ²G`yXj;jϊ`%J,R# g V' 2 Uw噅W6X͛42g'DoVA,gL RK-ԻV_y֑yoB%'N_qF~&WïԠיL+uvR{+B9t~_W5RN_3>K+bqD_YF|%ןJ rR<}Κwn}O+u R<סRoWj!J]ƁRw;WjYbl S |tW*H!u R1?Rܕs䍻R1Iܕ4R F!g]IMl 5aW.}ve͑~j={b+uc#AWAbTR81W:.bVsDaPc \TRȕ4,}O\yt,!WjFɕZwEiuY<8H+GLindd+n$R#T"Cơ+#_JeZDR7lOm߸+*+sW;G+_xWܹV^!+Ix{WjP G{V?J}Wsn?JqL+p+\yaxN+e}e]_W~c*J]h+n7?z#+k_F_|ŔMi+u E R!}-xp9<~ж{en+4W z^Y{]'ʑ}㈴2+Xtn߸+Lhww^G*$U]q/R'ZT+6® vNfWv슩I+5%vI cv5 ]y޹ighlU :+k) RK+3P@7ʳʆd-Rg+ʝ z.`'WZUm#RɞL@x31(B&O^(ʳ ?oiueu/ՕmyN'o4R]yR؛k#JsɷR=F]yO®Bv 9aW~Uؕ'$O~_|Ցp]![uFŜ3QW 櫮WL^1xe7Wfb] Œ4#VW_y_yVlyNYVڃ?ef+5MetQiNNѠ,]iUCNΜ,2%X,,`wN,k >⩜BDX֎ b^M6",aAW:g'V$ lX",f(ycbqX\°L ua}N0,+ Z\AXLaZR8-ɕ?bVdM⯘2i+3J¯-bJ9¯ԩ%ʓ~e__wn~LB j]?ʺ$W^6(~5OCMl{w++3ïZ'+RQ5#5s_g$+ÉeM,(2W\⯸⮝z>Qh)ɖ?1XdjS $[{ a\ax&#lZe *d .M#L e{)K>B RCgʟG,,OdK !Xjʒg2GU`y?Ld1X,raXhR['KMh)S0$1XnL-L.~J(Oß~#)k L2@Ы]>ei{@+٠S>_g씺FLL8.J씭s^v9);e()5)%)+I)ft_O9@ ʣdaO祧,@ )-)O5*;zTV`WP J@JP9:.bp*kh'?~E6PPOVgbPiCzx"~AY*&T?O+SLB>L7MWJiA@OH7)Ώ=e.,/獞R$92)nzZ)fj+$M){"wTN% WoX 9e[WN9/9er5v)۟9ri#̰2zKS4SH%0T P1,'L-H㧼ԕ)*3j2Tn1g* $*!T `ܾI -* . ',ʁl@w_ s'rOBԗ~cX7R~ )̴?Ť(3?QOso?eDH?S,K)ٓ~CJ )9K^+Bx|3?l(v*u3'X rcPqX@Ú~EV?pe*&T@Ś[/R:Pr? *u*TU+R-I @Ǜ#D'8R*3Py.'T)*йm{ *T;ނJ*OR1\TH*u?J=A`ԄPy/*T;b,_CeއrgRI)QVZP(FbFJ-(|}$YcTbAe=di)r2KJP*UߟBCrf k 6`1p*J-SCPqCe,,1TKiV;G k!Tj)3;r$BPDP8JݚT/FT**Y2 ڷ-B6,HT,YPAeb1S.n)u=i>e)nʧʣR7zʒЖxJy8Ń'R9yZ8ExNNNk (l);9SA NQؑNy(M,锥At t^) )NS@|7 APN,y)^i_ :XtO:]h# )3 Nxf߅SS)S!)XISDz SjVF9M9\P[Sn)c#LT9vxwE3N9eANZ2g)xxm =!wrJ%^APP Q)2)ptJ+)OfW)NqG;y)vJW??vJm~vuIOYhj2I)CO9h:rK}Ҝ)Sr,;Sj v q&z[lS.rS O/r*{6N-bex )CCOPs0 X=\|7RNCO|)'OSAN"9DNO9d+t[RYU i <OaO9Oy+S,OIGP * ЧDrVT TO~*B9dk`Ԑ*׏Den>o;߈T,97*_Q$h'"@/RL2mB*HEHe&!*@TV=2B*/~9߀TRPr|VH*ߥT.Rt]JeRy*¤4|7!<R*+,R*CP?TT GRk)TN%F?%TjST$$J\T9@tH*I I'nJŽG $P#BJ1qTjšGP+^0*!EFe,`T% 3WaTɨ38ʎ @*|tL}&C8*O~NR Q_|HJmOQxRپ89@*Vk*hᬱT,h 2_R)-ߖʒ X*YRIB4уGQ) $ L:*C9|tTJ`T>;FŎ2*_N (QaQ.`'aTj0.ͨ 3"è-&R;UTDyDQYDQY虫bI MFQ@S (*l LFu0*Z2*,q`T^"Fs^ =eTrFi8ʟ#4]?$J H+ͨ)RuTHEG)J~MK'b EŖq**MM (*p**T&/Ew b r B #tEBPKPq+ȫH_;Bp -*4TQq BJ5t|epQy? *$SQ7OrilDQ-GQ+ ETT fNZuāAQYb5JDQyH+[^EfHEQ+d 5TCVWECiir~uIL)wiBbx(!T'gRgu *l(ضlDPOA<T,EP19^Ael*Tfш2.r`(g9T׎B*׎BjŇP}oԕ*C|M0T65T@Pqف2 *fЋ:JE4^a`<6 *[ C^c$q WDX 2GFQ!ڭRWo>M Een2*_0* Bz܋QÌQ ? 9XՍb_ϗrÄQ|2*l r*aT -P()7eT&88.2FeEI} {r%ZEe\)zg;(*FՌFnJ*I6 BCaTnaT8PgQ9N¨<-Ԝ̋l:qTilGW2b*+rz,{9*AT=S7IUo*ے寄ʕ@ L * C`HKB*P*CBe#Ї*I<<+&" 2gB rb,AT|'ӽE ˣ}ﭨ>򉋎0DʾDQy6y#2 y6Κ *5h Ĥ9rt CW*P/ԬB*2KNBPBJ*6Pyk(ݢL T~R|*J ìХC~J9 UQDQQfE[ */KQ@F7r+!BQY0G DP6{9"* 㨐:ֈ[#:*aGeRYܠTD-R1P0TlD5 @Z*tZ*+ܿA/,O(ْ1D,TTJT*HE~IHE^HDB*OfO ٘Ru &R9XAԠyUP*a$cP >?J+/H=Q '.QRִR$1I.+Jtϕ$A%: X&2vTC*/HeO@*dx@* Hb`+T> #+DGRy^ʗ/&X*X*KنRV KeT;J,K8A^"讥2eZ* aTc,JR[ T 2(HP* 2R*IiJ ҤTR96IĆ)Kd(1cԄ*  TEԔʌP*FʱR (mIJM}$'8%qRa3B<(χ)6k/JJeP*)HLJe@\!TcP*P*O y )*#,CH*C|.(.TT*L ʭITTiEl,|-QGbFX*t S{z~;;{^"r$46j*,KT,S1uh<bXdsgSb~`<ۂ t^x*OT*ΗwsHUjߦB5I *Axc+hELwTY. LjMkUAUrSZ'bu J^sHAUU|*'5MTgAx*]BUhoP@LRSBpU<ZV"ha{uB- _J{—ڨ*% ]UU꓄݉R5ʨ*돪2քJR|VPRUIZ-Pċ;5ʩBAUR2V#v`SsL/N%T/Re͘R<5'T7'RQ~ie *DL3]U9UG6SNܕnsQ=c7DPFUlQTuI4Rw- c(eLS!I"2(J;ZT˜*ۊ'1y &yL:8wLy24*f \l+HPS${I/UF+ԙskBQU*T)5]MU1H)r}}$ )B b" CyKUa*aU9TU-R3*[E"JGl`4H#T!EgLʯL"|M*5UZK*Ty#<9Al*3GU1[UeTUܔPU9PU*8J;ɪ,`U_*XPX:{U1IVeUqCPVe'.2pH*pDUymT*}U%]xU10~؂UʓyNLk/Tq$0)H,AVBP8 /RQlH#Ir4UMJ~r$SʺqcXNb-LT1 BAU ̬TŶ\;V&Ɓrs/CUy}YiUe"*OhsTLo*#2VVAJ;Ur *Y7R%*5ȪB+aUV.;2̪*ΕUٺAUTbEUj̧ bfMhMzQLctTQTj3M1UaNb!9xTQT`?R^RCRܕ@@xALWHwTӼE8UFTDdTc 0T*LS} 0oBUn$ P+}!g$gTAUU!+2*lNrg|Pʔ,*ԇL iU bwWYʪ'*4ZWUIa? 2*ʽebU4q`UDTejJFUPUqUżkTg2*F)UU4iGUdRUUeN*9*$\ !U b@L(.2*Z*/xSK=3J =aTFTZ3RU_!#WEXWex]qUbo]B|UsUV*)>`7I*˕['X&nêXrK.Vy*up$EXe */qXUP8*X1a nL񯮊+EHUYY^X妕 ڄU;y*5GqurU0(*/%ʚc]!RWźg\w<~ӰJrVyocoKWeUl>%aTHXο= 9=UNF`*9XVj}`p,ɢg/~`"XZiXeƷU*tDV!a)) e.*t|*6CEXSUI[UnaU>*5S ,'NMbwpRgWV*B* #ʃmU> HkcRoqë\>r<$zKUxUE)[MeQpL_K^6ë^^囦~ëcxDGWp*Og!0t"u]ɠ/6ஸsI\F?ҿSp쉫܀~*V+ B*z*P*#Tauq=FWr|3FO襫 «3&J| xɫJ ^E6h*~~ U~| ૸2Jn30|dI^ <΢Y\U~,z}xBCy\g ]jUmW9EU>3ʊL"*bp\^]\YwȽJ^e9+«3!Uff RyS-Ky@WUvW*⢴b*U^?> Eë WMUWqt]H,ws>??JWU#BGUf[EhWh2=J'pMO g|pM+|6> FGV7WkU6]pt*@_e U ꫌_ºUUv_EZ^EEW^Eɥx†yë|oCQ[B-UvU,GWy~**B*LC]:*WU?j`?jLx'rʎ-↡őZ|2US_JwWc  qFW7/ʺb૨z, Uz^ y "I֖W$Sd]e 3; G]e!2+tfAW*B*T۽̒\}Owmʬ<ζlGX&]ƠE8~d{W'j{sЦA3͊ȌZB_tTӈ>y,*׊b^e9W}@ b%^?*cRx rfW c^[IG*ӡ8vUhUB㣫d]e蔇ʜItl.R8m*U,TZW96,|UFYuO*̫0*cƫUW3+*ސUʵ~=grFcoV[̾ ;UQ2^R7 s_Xh@׷(h[*KN~L߀Va=- {VU֎z Z%tDXT`MBpmI_w:hGowdbxI7⿍r03UJW14ë,`U/XI]A\!VbE+K[؇ҾCtD<7+?%bw+Ey{XYwXs5ŽAVYN Y!!{BVgYYd ǁjbd@m2 d,|LƊEY1V,$cx4+'dހ@V d%c++6ڬ-9efś]Yy AìTp g%~ b}d%&O5r/<#+=wdqkAVYa{ +|`¯qTJ6@Y ++#elEY9QV*^P {,JRV*UGT v ruG/#Xygظ0V^_;[Ju+^r_Ud\"0VQ6V򊵱(1+jl\hG2V\nc%6J<`3+FʘY"f%fV6̬Y0+lKUige TŬ]M !1C̊bV`V¬ܢ2ì,;x .3+yY 7f%¬do`0+GYN(+Cx)NrȊ?AV[0DmYg>tBVBYy ;3+v7mAͬSX:eྂY-^8;fV*̊YlQGhY1 ef*J"kiMQZZ S`ܞVhhe# V֏H+jeUVZA2EK֊mZix:k>ee8'֊V='j%|VȂZ_xV+gjedoԊ|[+՟"id)b;֊Z) Vj%Y+cLn8UԊ/޻VAJmVH&jLԊ'S+0ʦVk1Q+§VK#ieKW$0iŻ^"ZaheЂWhUVwYyZYYٷΊçNt0+7fV/ VJ08+ vVX3boΊ?0jXK+cJ~KC+D@+!GVdh`BYZiIVV*lůK+C AZqT &ZԦV܋Z1T 29d@ZZ1dV2D2B!JF)Q+IZ@TԊYVcQ+碮9J2VnQ+aZmjj:NGD*< 5Vg[e_kkˍ0/l%[ISz#84Vb0)glajVa{V׵F[9@&x> VPZghWePIr?+&++5-w֦7߁CRV#Fʊ"\'70ʊ=+3+aV27\I`V1e2^22YO} rV`xHegLo"^#M0+MX$(dge_⬐fJ. 2\8 Ff 2fN=8}g&1̊1+#OBJͦꈒCZ)\SGvZghe}9#h%@+c,C++nV22fzie캛$٠MSW gV<+2~.Ďagd-H+JݲDH+YZRqoJŦVZa3S+MIIDu[9}V+lw'ZaSZQb{!hieV֊[+Ǫ'Ja[+-ZauVNӆ Z eZq;t [%ZؗX+&ʽ[Y,.hODx[a+sX.beڊ XʣF8VHPYJLK9ZZJS9ѰbRDҊ3('ieGAZ 1MWahR0$Z JmՄ3C+N, Qe֊.M le7lX+׮;Gu=ZV|jlbbkeeRA_/ AV luӗ'AZ-`?Z9/Z{,k Z]RV&IҊ'VViBbVjgfVƛ_O7lǭoYUfV0R}ΰP塱b6fVhbVY\Q3+'t8+pqV҇BLrЊVjDji%;VsimX$,V jE>B*Dʭw\ʘHZ!Jv$@ZfH+!Ve 무,U)-hv+5MOդR̀Դ2G$XF"ҤxӲT*q ^[. I-47R2Z]T rYj^G,FK-h1dTwflGf j?2&Ѧ jYƛ㱿C-P2h]pC-~j R?SP2:*Zd?W@ ˣ}S-hŁYbBDE,#Чed8қjYFȜZ]u ղާMV2^ؕŶZgCN>*[[sj)zGu42޸,,ta^#K57RQ\;MS-xvĶ1[jnn'm,ǢeROGun[JG,]NJqVR:ҸղdL]V2zt-fԲZѣпy;V2R,/50Q_TK=RAjYO#OO@v]VKRWjYFZ`KneolLmeLN?Hkղ\ZYAgSS)~U7ZC2`kYF7^jt)͵,C\22UײG. %,#;]7j-t0Ė:"Ŗd[~ϑoxX-`]L"/Ŗj=[cAR[M-AJ"gkYkfzzuzq|!}980ʜn[k=i-JEZK#C"zme=s-ӻop- Y\K&}G{-UKjNlY`ZO/(5/(z~;.X`2lT_z<\.t$V ͵}kWPDs-ӋP\YR7hkw[ZN**aԛs~)zuT^R9$kYٱȆUdk#ulhf m[T ,c0[j&Y4R1-6g^),c jsׇqR6-" uieCp@ TIzlChI-}5R̩#G0Rebw72u/wJ-7iT:gR+hS`.A`K5/2nvnXk-oyjŤ\pZM]mԓZjeSgl~[jrq|Ra?Zl.Z[Mj>^*2j9=-~UJ=OC+] 'sZz^K=BSk^p/A&R\bPkZqhey-uDD{-c9w)7Zz|.vq- D2RkTA42ꅵLMt kFԻZRk-;X\Kʭ/ ^uzrWV^bCZkVRFk-6=]ՋZo:"ORoF,].Ik{MZK}JZKZkМZO(|j@T]eRZ4 eHkѫ[krN-_kkr,:gSoZ2O >4iVKj\jAHZ vkV)e^Ka-hXK 2cZz\ER*m,5qs.|ZKbi-u Th%TwmS*Mh-ֲ 6.\KNŵLjq-J\2^ DkT\KOES.q-wG\KU͵uRͨ4RC[4suk2~_YUB`P5Rol L ԓO+W@[m끊Ė1EU%Kl#,TŖĖүje[ꂻRɢ# }<]RE,(,ꦐbKݢ=[ٮoU)%,cyX?ŖjeSTVߪ_{Ŗj&nAO=Usȡ̏x-t[&^p['eѭ%e9dYK=wm-&J~4j9BZl$Le- Klއ8-yk鑨VU覉l1-oz 2['Iij\L-!->g-IIi:"˳ ЖϨ-ӜԖ--e*F[K-JU&;oёVU>@[BoF>F[S(іmbthKˎm;[F-U፶KhK=덶\]ۉfK &j^p:cJV$ԋg-5Ssb֖eoŖjF%j\,UZiΙkŵLC q-=\KEײ5STVZ- RC|Z185wO;Z!R݌TUUX4lRlUTa-Ӭ[c-ѳXKu`UT5jeiR?Zi$a-=WO@nLka XK}"Se2ZrhFr ϞhYRZzBXK[-8diyeLIh&eg2 j?Y-|ie9jMM[-5@VK"%nROXZgʎi}ɭ0Kljgp&~:G{]ZKO^蜚cL˦Z1 ُՎ /~9ˡ wa-\32^-̷mU/Za-앰b>uj=ԔZP4R =4pQLs-,HkYF꼃U5TJeLtNRAz"jPoJLOtTQCc-ՃXŮ]2 MSI-[@s9eu-TRi:YRKYtΦi[ 8 4-LC*Q-UBj7苶Z)LK50xk`ru7bI[-[g%#*zuihe޳˥2A-EiIQpӒ"LK`Z2i1;-U.3iYE;-=3;-56vZ2 /%8->G>p4n⤙jU%ʹ.ҽq-(RZRVZju %ږsfZzI,* L2BAY6LFiŴdӒ^ LK6,*ŴZ4R7ʬҒ LKf%aZO=UA[ˮ:n%=&eU̩p`P< jɪPKċZW1 >m/xvZj%4ZgONK]dvZj|NK&ZEBjJOYzEE@-j] jIhPKͤnE=# ,"WOjЕKmUC-#i2_%h*@7`;-GRwvZ9g>JD7H= yi%0-YiI凎l]`bZôLMjɖ,vZ2(%S8-8hB;-50NK <ԙiZ<2Ddƞq.K$ք{KݯwQ-YjLZm$bԒ/RKrS):zN ,:{C2JK-%^<R/㤥%vvZRKհėZRmHjD RKZGxj0zZȭs:~M9-;ХR~R;-ut-%S8-YjyP|g)G (ejO;JK&YPZ:f+-9w;*soJKw [iVIFfV|F3+-h^[iID2LKu4t ILK}(32^խ~ҳRۣ5-5nzBѲ+Mm}jg{OB,gL>Kw]fvUdVK>KYOR=-hj- ?gTQ%O-uwGB42y-5DKצs.-"Z0q+],$BT3+'jTmԛun iIGiHFK=boFKOПpv"Z#|z@ٕuAhh&U~{ݰ--'P6-Y2h>pC! s($__JKpQZ~6=)z0,{.q/zc 9jkR:FH-x\R4"%H-UŢ.{GRK=XhnџeZj@PK-5”.RK[a;Z5-dEejv$$FRKC7cPR]ˡF扃P v/e@jEG$-TKI-PyRKM3ܘd6!( Ȫe fU B$X%A7@-%$DDRK~J"ԃoٻ̟ʰR-(r4 L4_%53-^!EiIJKְPZj3*%I(-^JK]2G[$ti<6ڛKj2vvoeϙiI%LKb$iNL4}-#`6TN((K2Z)R/ђz$meƐђ6d2?'WÍ,oUɹD1,RߣV+1L/j1-[L4R/efZz@ ubZӒE)-t*CPX2ZR=D`d$v%W-S\FK&nhIP-5re:+D] y}b<Z 4{D*eZ2x-NhI1DKvJ:%8 %yn-rlB+O7LoU-6S@Khe|Og@?㉴2$,Ah\[A*7]`-U'So}$VeHh26BrtP܇z.jfeje5ђ1{;*"Zz)HEIW1hnZܿRGZВt<̿$e~o\րh2V]5ѲT&!~kn g9^LKm9$ʹtRlZbQGLK7sm4ӒH%O6#rZHR5,噶Eڴz*3iY`V\(|ޏPt_Kt֎*m+-SAJKНX.{IJKEQJieZR?sՑKkK iI.#JKu/-nvnm)-DiIJJRkN֭\|Z CZYXYRM{LMC7VXp"lV7_ & aiH+vƢV $Њ# &,V7.>ĨhZ1VKzVlR1qҊ"( +VjY9HY90&7$9geFfML #@SvWQ+)UQ+Q!j%qV |S+ #ݧhK+ԖVi塧i͓V.L4 I9@+'#@+w BVZ!!}q7XZ"VOfWt0{~ZqBJ  l22;LkezFVr9VvƎVU ~MkGZɃ6I+)Viiet[Z9H VC{AZaie_鋴R4VbixK+U Igjř`V,giŝS+~Z93 RYԊMiZp< KVMS+;*DZ95Q+4V&Mig$$sA K+"iumuHZI򳥕 Ҋ;dZmgg-㬜x*xxwTte9+c(˜LbaXvV l&DpVXbf$A>̊) 0D0+4(1+I+S hidrS+&l8kXw~iko-TD aQ+5RSoM~>%jظVRP+vHެ>ibʓMX3B1^Om:b0VHD[ !m% NE[BQVh̭lZ802VέCrVv>V'eBrf)c+/F[akk+!mř>V[qgRJ7VNP͌⎭gx֊S mZa%E 9V *LHVjMCj%S"VsDl73Q+dQ+KLJDdkjEsZV;E Њ{aVK%> hcZI"!J=VXh iecwH+PVoFZ]UV" ZY1*h>ΙZIwjXLh& j@72ڵEG8#rq-H+&$T]ׂH+$EҊ-87SǠZq&h`d 8ietIZ-gҊSN-\^MVvS挟@&iډ,4iX%/xV?VahŃmC+ZѲwl;+$DM 'fV!1+%3+YĬSri[(+LGY ʊhaVHYgȁˋ*XZq 1V*VKJC+s"Њ@+ B4 Y?gQVYhhŁ߆V2'h%觡m`h(8+YɄ vV<ඳbrvrVX+YXa\/"Ȋ[Xåd fe1+Sfů1+pV=kS@+6UVhV<Zɂc5j!I&BZ|H+\ZZ)h3iiiJ&pt 7P+՚Z1fje_ P+sZc[GDxkX+f^d>W IMYU^X+')B `0dk>bdLlkE]VpjkE IVH VZJqVLAēZJ9*Za@\JV*eT-'|]Y^@Ws䌐ރIؠ+2 $$`6⊆Bt+鋸6bq!y2z#e<W|ˆ+bY\cSGWkq`oiĕ=rP@:&Wnq$U ~eĕA)#G\qn+f+u|+L$8"S#W+C?2"puI\9k*Ut1WqJ+mĕyÈ+cը WgA]"+Ι_!r%/+N2rj1<RR+D0?rH\!`z\}0fap-l+G\q@{mq[ĕ "q%ސ+E"Wؿ&WI\ WFa"qY+`o+iÈ+5}W/ʽ5+..+nW$o;[6rE} box+_a{+ہ"p%WY/ip%yJWT\6WDH>XK/Io^{+|)Jފ$ފ33 LI\YAc$ -pe$HE,o~VY"VF[I [!8)`+ EՏ[ LnVr?|V rfQV3bpnESh+|GhmŋVm%凶Ggm4_cmEۣ[@ {+jpxVY-"r,H[8FԠM[I/ڊ#Ppt, + ko%TV . VhR>"୰u^V҉[?)K奮:{+aV)ފsV#x+x+d6 XV@+x >#D {\Q`p#3+$,AG\\ ĕ7H\4,ĕ9α8OE$ĕћQ XC\9Kys[[ٴgn%g­)ne0.STN}/9YF̭ C( 6r1n%M 8s+bVmi+Aݬ7Ίy7;+l0mgv?ґR@TaC [Y1b9 fei`VXmufVxefer?`Vtnf=¬űZY!csRVv*VJ~wYZpVOt0+D 9@atgf!+l>;tB¤(x,+eEFVcX2:;<+J1Vd OXRlXBFVFd+."/!+jBYx[YqʊB++nNxdge)ʊ{6AV󀬬+5%d%oI#+FV Y biDR:df4bƊq+mmc1BuDֹDŽY_e%es!(+yO P(+>QV2%=++mEY!X3ʊ٭Bd@(0+멶f8n5\0+QV2`eeX ##+uK?_ۛrtڌ8 - ;YqʤY{sx37#+k8e0ʊ:hVVea++7]VVce%nʊk-QV4 ,X!+7œq12ސ 0+LMYOBͬʊVV`(+D|[YZYItIY9͈2?.RV[YqFIb a:oJGYY!>J!!+% cDͰFXI`Xq%hwc匟Z-6V`b%q+C`"VD2vhh}nMx'@+(BX|[Ċ%$XL2*"䫌W|URmuU쫨Wg^%;WqRɾTsЛM ,U|RUs_1*&=Y4Y+~ ء0VFRu\V4`幃X1m`%X!D&7X櫨Uv?Ujy4H8U'xx*Bu7B*#VIDm:URQU i}׻t,/*EW]ɲYݭ8- )8O ]tZWquӠ'UTVt{7IaNؙB_taWOU윅WU/0WRo*c,vh_Ż3WqD|1w+_AVxh+Fb~8JUPR"\ef&UF~U Y?aUb^TJYuo7h`A*Jv@U;UW@WqEtwf] U|â䎐r,*FW9OS]m-[eeV} #[e ,UU< f[%llcAipV9VHlV1ή*a`U/V%)aUn$*ά̪5rhͬª,U kVJ3or<=PUAUjkUeOs0t̪BWWe"z䪰a x gU+We<p\gU]*֝ I`XNa 8ٰJ*N~5i;*U*U [4_$[e2[D0gZetVqޑi_VlZxhlU..Uƶr.(upVr[VW!@-2*,_W2Rīxi^*U,x*!NN}Un>f^*7Twx62U[o*"86ҊӲ-PĻZZ!uЊw6r121%VdkeF Y+mα|X+Q\4Ewc+`[clũVŴ?&ll؊x#r UV.`+. #`+)wa+cX lt-؊c+3l3Vؠ% [q'ǠLJm1p+N\Ra:ny!y+:p{3@U0J$oڊ7E[Q67X+LZg]>gNw:rJSγ8+YqoP;+̽Y $ge ସ!B'Ya1;+ r] *8+ 0+USJsp O0+S1+.fen)p3+:ĬxѩdaV fn¬XpY9g<ZaJVmimv5 @S@+tlxhe㬌.­b g%bhimC+NZ#6ЊSVҽZqdog #ޫMQO M8+i%@B8{J?0+U|:"fĬ71+&lQVnp6q:cVVbeͯQV\QVȊ~% {Y!(ʊPV*4j@VK6 +fvJFVld{Ȋ5#+YaderBPVH'hex`h%IZEGZ rÖVRH+EIx<ߥZLI6@+Ӭb"hB8t + Њ~ 嘡;XJrU&a@Vhx +jdfxc }r؂ = QV H-V,;Xas+ @VlFx* v#+ƚm#+z7 rGcd FVb4de[QLeUa2FVȌY~'ΊSpV'wJ2U Vqd<l(lj[e*~Wa2ys+\zq6WqR Jɤ"O-e9OCe8NUEhUv3GUbn.hPHU2Vɰڴph""٧T"JX*AoU߰mEUCVF=*욊2%*7EW+"l$L;=*iӍUx*^U6*yUڴ;U BVUb'i4*`l4 J[msUؐ/ DW _HWBUw\e`7b\%OJ2y7UHVY&ZDtGhQ*wn*622VNUغֶcb%)* )mV!L3Uj=ݲAYgȴ e筦̲Lc!A%y-0X1*.#dY3㠢UGVٹM,5WsLdV6׵:|7_WAx}`y|=ׯWI|=KYh[Gg~Wo~_z]| kkWQ=?ݟï?|W6bG{exrkI^wora?u5%׌q_jF k\gWbkzōsߍRAF__zWƘTIow_6b-4f#?_?"RIJՏ_1b3{r}kj 7QP3XS9 _Ƭ:~JW_}']Z#^D?|xe¼Z+>d,r|]Y6x,js֫skm]6OF?O^}bט|+G#YTQ3+mx^ߦҶuTGTi#U4#Y,@Urd:KU4#Y,Y?1ߤF 6w@8J?*Y>2E,*Y92E,΢r䬟Yi#su@Q{4|t]STtGTEY@U|d>KU4ő,UtG~^w:"ȓt䋕6r͟sGd(gtU|dgtU|d:*Y>~fX7cGTgdzSCSg󧨡,g82_j(g|jh:#Y,Lg:ThL}up䋕6Ϲ2U|d:*Y>2]Urd:*Y>2E,Y?Fo>?`xKZy9#q๿ÿCЖCx-g쿊9ծ%t9l{}:~^|.d>z/gq_j>~ Ղ,=1UJ=?ǫ5`ߦ<Ɯ[/Wϖ?鬭bFYC,>Vtߋ)h>kDT~Ylytqo?u9\oԉ>sIsd.}oeȑz,Jp\n%dY7iz:Z]]x#&\#V_.$^%Q9 5?ηY#ފuT|֡*5?_=b V#y?RЯϼn{.\ېַ#slvZ6yYWeg#_(h|8ܦPGLѫ mr9^*V:vH8޾HGVZ}K8o?]Z wŽh3>\?{5Uv?M<;~}:2\Q[)Y͚gOg]s%GڗGPW^<< 9W߭ߦoLF?.?rc'wso#o40Gɥ4q]:yMgvOOא>ym Dd] rG⣯8=[\jkY#rvVy;kȱoo u=L|ZyVfozBFIYouYw5\8 z~0DL6>~VUY}{i}tD(^oՑ踺~ϟԑ3O.#y`KwTa[X)#l䏜u9_}9?t|=0wF.Alߌ^˧?|/Fc{~Z7<_}3Z>/Oܯ>??}3r^קw|xo>>8m 7}|͘xWO雽mO|O>x.?u\} (>}*߾¯MM[.#un_Rr_鷿]f=.v_h~p楰:ب|"?Nendstream endobj 486 0 obj << /Filter /FlateDecode /Length 8175 >> stream x]ɲ\qݿ_wk<(B/(/` cв">'Oj"߃ C޺5dp2SS_ ~ =_>4 f<=͝^O9xh8l/<}~?%cwӟ󕼿%x'Kg]4|~qm884Ͽvܫ/_u1}Ym>oK>?ߚ}z7ۯS!ksOz=ףxBO9zc~z7wҰz͆?]^!Z1@ >/i1_Z!RgAK7FO`HƠ/Ѻ33@ K;c^b%g )Db2k1llSfӑӼCbh~))fLb+: R!3_njNk%|l7KRӥ`/uO3 b%hn#9 lDy-f3BxjiNO{*2M/%H5V4˥[o*'8aU{ Nndk9[6`"Æݚo5=SzS&Rl ڑ>lalZ4%\/eh8AMӘi \D074&&~vX.F .;""$R]2GF<$j  f[So]I]\]9cdgn#7p뚫pZ(|!q\LF7 5b .S=R܉BڰU`xUäD\0bjَ*-Dĸp0zeER\ftIq,AB\ s+5; KMBdJkT33J+$ه`$l+4D`vp5~`20S@n6ŘcK j)nTa8 ;&Ěm;dfCdFn ՜⎎*DaN[8M@pK L\RN(.%WP׉^pK HNvLIm)l,UKUJ O jK`ۮlRAƋЃODbX%Զw iqkY+i!u7>O1Гv1 Djߧ*@*ΚN"jgM9YU PђpWZ\" 7BBDY:Bۅ>2@͆BKdrpF}D &$6#rRv&˾]k"҇Kswҍ"R򮤈]GuuDL}Hd]g쎔vJ7@];sƮ;@V\7'j-N!U3}[aömۿ qiH2fM&a҃g3Dl׀>i߶VpSĺ2\\1#"DKBbhYseED#XY>7G] w/rjܥLVogJ6ppM6@L%oi6sq*QRIzy'$o fT p0@$o*άX%@|M8Cĺsp#wMI gV8c (UߔVi̔TMLG|'NM৹7aP8<.'v7"TfbY0N<>ET Pq|Pٿ3)[nf6LJ!cl_!hɈأKROŐL7UCLw&.jLb̏$RU~HZeh9ꐢ~5/K &O5HIo{n!6ͣmǃ!iά 9#6 7NmiMwҠjV֤ v4CaA:lNT2=3H䡙y2QmM65cUE`h)fRQ ZYnFQQN@+P4'@05i⊩Oag/EWQ1=@d+D z bMB,@M7rt}`M&iD9izOt N 9}i"{?JԺrEJG]wrEnZy6M0@Р'Q%إ_b?;k#96| z'~I('iaJ%1^Ie@V0S3)3d~xO_xg}l/j<1|;(DI Q~nON?46GfvBE|"TDOwwWG>Y%SYz+` ?&GʌE>7(~CGMxJ&`1c`/nkVQVyL+9myT+ nFGRbBJ;#rӭ!;I6~)bQ][6p'Xl,, g,!7/g1I&{˫v/ny}.ݢǚi5ws g(_&2Ϙ\j?fJ'oFSZ-N,cA 9|8 C 7Sdwȃǜr $EheVRˑQ. S:B>'T=/֗G(th2K*J}!jl{+ !ꋿ-QC :TVH3HN=*w}m/ؒzp͜9|//}03p:|zp /.3vNb!Vk9ؽGJ+yoőGy޽|t:-S|C1ǒv=}u|xQןojO+Paz~{H+g8>[.pNS土|-VT.;bw+~'4T`2cp|Wj ^+ D3ܣ'@<`@E a'(G>L*eG HX)֘zv끀 F&Ef,8Y 1&D9b-R} TI,iBP$zIjTD/K,Ũ|+nj+;,W<sT#y+ nWVf2|kARَboEo'jVg(fOéD!Xjo+ d!4L*gH rezKg ( !J$CZA KuZ!*nXRg(Y>T řPbR*QH>V4X*ɠtsbM,~#[[L40 *V5{5"1"LTMT1S6Q:G wy&yHֲdRWWȒZ<"_^ y̷սɗ-k(o]|gFϲ; }-R'4R)@"vF31QTvHˆ8@ &9ژX41@YU`zٶUX8B"TRȰBb11TySz&b7fYD 4`ow z񧈡)YgIa6 Y}2ړ*]/3hd*L,K!U%&U`U,0ѣ1CU2\|^`cM ,ՔU lN(岤wUYyڌt#~րUj=3DeȪm~j }jLRI_ C>b̲jR!w0o` ɘ!ˡ!^-kj |.l{~ӷMdjgbuN Ї|r *Wh&0D_rmaHڥu65Xʋȭ~#r ]IvRDKz/Ld(M;gu"YÁ6dC)Gn7ifoC4Z}7ז%Rn,;}8 !q <"[zks(8懫C@Ee"RV,]/eUO-MZ Uz}xQ)^[\Luky8<ܾ|(K&'m94!Tre{8* uPGx@$C zVWt/>[@ӇAz\D +RЩoGxEDxW K/n r`X"q>,fq$/᎝e6Qlś@ڪmV`HTt3%4,}o]kĄYf8rn inEV_* ZNd!ëkn!^=g9x,l'8:J9P8ͭvP*N xZduu0-Vb٬9۸ŁBi\@Q}\"uM^D=ܓi3:.#"i~l.NT<L6ZvOUUiZ94wx9U4dYxtlX"eVqgvĊ\U¯EA5R051اl9;,Y򀶪b3e@NV_ƺJ1)d襑}T]j*V9O8iX `nj23@6G6ڠj<"RNSwD06*8ohᇙ9ʤg7AdyrJ,8TF &wBq^]Ub׸ fj_:5KsVVԘρ -y(P<)ƴ΅V2ETnT p==D c̒\?҅p= _]A{4)푩lm6B8‹1)sژzZ'SfJ~. Q_b4{Z2BBR\JevſmliÐ64rD;\fΡ; n4:#?.MN]IImy^I몰^LbHW@Š!J<ܘ6 5V-^g/_.Ts,//QIpo1q#P&IN' 2S90'k;@k-?َn]x0\~pkykTҦ'Exπ(Hp+F'z˃ȲH+-<u,c#' u銱9ÀdMaݏ|sn˼ӘCg*J߹9Mu}@Ыk[foy \cJM5/QXwkDh=^O/EaUZ"7|I@V M%m%[,o'sG+xUS?P x{rc6k'Jw+u=+=jE)ߟ•Jt@+d']1TW:YҏO]Nw6A3O/nC/u^?{9 ɷ9߾ٟzx>{6xqC6usW|[*7O9ovwu߾zv!O^|Y7Ǐ9w7|km߾2F4 A}Go6Ÿ_/WG[T^ް[xȿ=/ycڱW߾ߧ湛y+7~>:}Uo^ja'nDervis9,NI鋻XJMendstream endobj 487 0 obj << /Filter /FlateDecode /Length 4283 >> stream x[_䶑EX`3DA 66 ohG=3tj_UXj{>ö$X*V**MzFU~ o2;{S䪒(3FHo Je:p|'ޯ,/t)s'i?NQ&JS~cFZ Oď)9OR:4UVVexx솧=y-N}X֋Uoq17̺r#7_o P4?qN< m=]!6P6>·\M^?yqanwH;I#w]S󏰔̵׾Ya`Mޛaܻ7ٛ)o\/`?ynࠣ7l¯>dď22,,4ϴ+!GyJKSooLKPU(cl֟d*>3Y+|6̏YSIxݻ ?öt\i;C!`9vL=à`C:+M^8$7D H!ҟz(fW31s+j&N_AJ)C'FWN|?""QC;<6frдQ$QK*x$,Y2zA Ŏ<g a 2odAssDRaJH_C/(P8 ApcFq!q8"4-* hwD8̝=Ϣ1qUo.4Ϋ~3f ١r'R$”v- #T`vut)`qZCs-!$8t}^ƔoOq}-vTPN>e%D \;(W&*pFYX^nYd)Bp?,¯a_'uF M Wpޛuz3pND$O'ω!k>&hW{T̛b2X !ئJCXD7m'PuP%I`>%?uO "5oi2Aer=,ErU;tOFBȘtnV}J C>yqcp)֚RO3im Ú0 Aj}@p vndtj6S<lrwifr>B&]rȨ]M=g6L+^bubv!q4blӎ  \SNj67d}x!SR!*brY)Kp*$g-z1}CcE?/77a<7(+g~)8m4 Ё~렗 eWo" SB π.{T V " BgU{#^h-Xg*:d+v}@gVWotpfGYT#jX}L/2.E[p\]9o߾!ck!6saFDWR Sd} ɒz!\\P,J|}*xʋ\gXX[4U͂G6' .HlH7<`" a#vim; >a(z<ݙ_Q3=iiYT\v6mLJ8%%"rhZ^9[k NĢ adSTzpQxDbQc0oͽXDz Xr@n"פYnha3誋Gև(ڝyN t„>:K2IN;4dLC]K]LYLZ_!0.G;{b"sĬ@9 C;=cZN1t=JA4'MUvID̖)6~┻0y҉lrÕR7&]DոrCY\F  C@fsH̗wQrZ r+FT8J' *;vS1~Bh_uq8mF=?u/U}wmJ 0U/Qվ ) Okst"7G,*!*.%&lU ҦAebݢ Ng֧1©~{ry9&**:2gT.8_L)E7,8,ƆV9ᰖsO M׳|?U)),DI*gfb4u!d&Xr*\ײ`q3t/hsNF9@Ort@7*ԨK]; ʴp4r~+%YJM@XiPږ]EX"}Ń$gB&w/9m_:XCS" `U_޳z`n1EP>L'"U2eWU`+7o8l*`pin&oP>U?1qBL)1(}B:A"bt.-i TfTCM\xOö)zp(Sg.4;;Ja KYS%V4Z:nBOH1'lNmw؃Oރ)ikrXlﲔOKjL\F)+!B3?9^! /gr|}5 x_,4թ(AnT-&pDjq[4h%_C1?x!+ Gg|+*N?උ 㵺pJ7 !,endstream endobj 488 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 XЪj8(NߗNN>ˮGֱ4%"@cQ`}*Bv7ޟ@n]O$TVBoh)jI4J 6/mR+gh.q\b$Nii 83 /S3endstream endobj 489 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 345 >> stream xcd`ab`dd74 JM/I, f!CL Ni[yyX|?$=W{tF:ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cw _X_ZWRl} ߏ?}Utc ߏ)ڹr^I7GWUUg n)^}=i9[ ?]X@ߡ z&L0-ýq0"ϺzL=f*/endstream endobj 490 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 781 >> stream x]kHSawg[:j <@I7\RQTa2C7^ԖS6/[59s͜:cQvӥd]F})}GNP"<FjOX'XLeKҲRn4KTR N)>V2V$'C3L&$b( V mV~~z4.w =m]"T,1=|>Gϯ]{MEP 7y|f^qClDj*,#dDh AIH|a/Q?19cҤ fTd.zp-+6/]M rMA A{e^28%אּ{=rfڸU% Ul?m]%P1}1ud5>og ]i>#ѿ(B-<г hY-'ȩAף\h~ ѾH{o>h9צs-7myf7Qs_5bO]*f&s매o:o}ꠡXZ Lpm;}{ZOAR؟*Id>xg7n٫d@+QeݤG/Cǃu^=N~Loo\sN+#a߽oގ TT(ĥd (R9;i <}MVUl=ώS ;o;Ā$[MLB?\cendstream endobj 491 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 919 >> stream x]mL[umi{^ &^F9Q&o g).cm"8JO](eBq6DE]]]/<NwD(FD"^ߐZRS{̪oz >d 4?hk~^qH<($%VaL@`-mt6kM6&3=}wjjt>T94@ocǦ)H+LchfdjdX#SV*-WR/yE[T+6w4#s B$zQ"E3Tnݘ[L!I $|i"gk<}S4?'w*< W0qqi(E6>0!;=+l9$aޯ V.:?]pMJ"dkxh>;57@?YO!\yqղ ۗqAc'ڝug|40X/8o0{wH.Q1e!PčT ]P(dTlA+endstream endobj 492 0 obj << /Filter /FlateDecode /Length 2609 >> stream xY[~_> ۱*^%nI[lɮ6ư\RXU6BIUDpc_2K,~FE9S[ǻ{xP,mθ*&VD3AϷ?Xpʩ2+Xa %P}"i} 쟂QzD\$V?M 7=&$:.X_јs&Kz֋#kI6mEjMn5͠ >)7 LٛowU'!ʏgm=S[{و11i䊢TdWsѵB2$AVP+]>)mJ29!hk3E jM o>K=tbxn&Q6)_'KP&Pa)2v]P1l )6&.i%Qhbʥ16iƺ%ZiAu_M+[H1Zcn 饮$՛^9dR4%x.b0}.- YۻfpT#EHiI)BIabQySK/36Su+KL6~ >B'VЕᴯ*܃+Q}=S3X.yY@㜮VGMX<~@)@\hhjofkHpleV*g2ʃ;Od/mFK-up?Nu"O o#40~&. EBx"y8~0\I ܊(Q"ps4]{j-"- u/YFFџ`90ȌOZJUHK&yV,SE̊@g\;hRܓJ4Q*!=I89Mh΋k|^#87P{Ȗ:Ҙ4/ ap҅,aP1Zx١)jcuAc Qz #IY(rωJ A37@hIWbXwۮzXu@q%ʱ#]=ondWm ҄o$XX&p æ=>]}mRɘ[7Z^)%q=Bm?rKv$gPx) NZ ? )aP,28swz{2:7ׂRq/zќtz O=cAote &8-'UV! lJX87|%'Px}?Mr}5sHԌGeK&GANZޙT=@5"pO<q.Z^Œ.5Ph9YO8>ZTb݄1./މ B%,(IS+AQyݿ¨[rβJ=/k@+ 8J782y PMn|hp&`'3M2ʪ}Rp)Jqcnr{d`u#pYX&hw/!98^n*w'Bpf?JRw:^`^FjH EQ]$^$_x$H/8{8xH$.t?^&0_endstream endobj 493 0 obj << /Filter /FlateDecode /Length 257635 >> stream xK/K7 |Lhd x`x Ȳ>Հ6=pk-a]w?2 rq9q?Nyӯ篿/_s?=~x?w<_?_w软_??\߬O4}\Zoi__W^_}Οk}~O{~~s?_˯ϳ.x vƯϟk{{]9O8/}jEy`-p?חɺNȻB d# zNΟ_]1<$>V\wy=J`L?3~ r=}C' d ڵ& \%c8oH^Z(^N ('`893H~uc׉T'?`#.i_^|~s4G1L|0&ޟo{5ĺ@$+:y[ O#?_&ܶOJ{᯼88@ur-4 EokI :kk: :Վ%^ņY{E\ޛGij> G5l7_Ɗ(~<"Cէ/;N>NtZVma{2:~N'+yֲ:4O_jwsYjs'rY]낓eך:(t o'"c$EB`ukXc=lXqzad4 6]Nn!s]au<0jx 18ySNmNn,rzE_]\z '^g-D<>u:8k}WY[&rqokKЁs?Bc4F\l_Sad[-r#~QYU-o,ŎȎZͣb~Exy"Xwm/{+wm9k+ɲKة&2Rd3ֱ/ ,7^zD<8QikC q@ qlmDb1ݒh'<nV ~!lݺUd q\&ZU q\&ZVq䞴ȅԼyZ <"U9|ZAbvm_oU"թU"_QW!4U ũuގ"o#nW Ύ޵ą:"ku6P~e>^LyXF0zC Ž2~!.N /S(5; 7j5KVś5Ax85qojgol$c VqO ZӵXS'N khNyE叚*cmj˾V3PnZRfg%@UJſEn NJ*vaNAia[f'M.rqE~^XƷ6gGUxR!l3UҷCJOU[c-2bdnV]*A˯c'9m:ohYetI+[PZmPcą\ 5 aúFu}6+ۙN $jWҶ|BJq_u-t PPI i" n /*|(my"mӐR*߄"-*B(T2 Ez C7>}Br߄BJ I>d~ I*C7,C7*>d|PB[%mhM(B{Wy-6dn!`!Vҿ EH+߄B3RVr}3dBf%c ?H#ZIn5d|sЬB]k%} c+[hHg[oFJ7mp!W2E V(B\u@Y]'$ͮdnQ!w!TWҿ)E+[TH^oJB(mQ!}%c:$/J7J\spM)bmHJ79J1Q1/RFJ*h[Ph0-op)C0o>4-PN!ܩo*۞1B8jT ,%XS%m GU2|BcVpF*[>ѰB8>VI U2|Blp䭒+u-64WbC~p"m,dl& c%} BV2|B#p첒=q߬dn @ h%c55rZR+i[BJ-mHn%s55ڛD㿕-64F\bCȅpbumo).Oz\ *BJkZS'- h?Ώ{.Hc#O]uM7~slW=>{FΏ{ه|s|u?32Zu_{9Z`#=I;ykNPJ[ BԿByk:|YW8ke. |6#cӠ0E49Zo;H1M*'e5r}Z#A5<(=6ixF,č.ٺ_y3z#xΔ7|>9F:`3C9\ w;$9FhYCZn8:wCL`٬1BgP-Buctgak#ld~tM;(9|֦ LTb3mB4SVI2WUTdXl03Sݰ]rmSmѥx?6܄ k`b1U'":y㨆ښ+΂i[$L XR5eǬ9"(lXcwvmQƂo}ر@l"\##/@MBcBwmmlt{IgB|{ϥb} [<Mbѿ w`J/t|sZh>)䨗Y]w`h'b"ZoϦ J?$)vУ Mhcc~}"FDhYަjؒrއm+,ΖxӊCi-]nx+vںe8ʛ>ճ=Np AۄNvӰ'S6!ԋSi{Iwt3MĎg_+V'uEm>?9n8Kk2aC;;®+kl{;tX6ye؟;e>m:[vN!'GnI [4u8@?OJ`pIyrt#*I*, { #,%2RQSRZ/K5h ņe=֤a({@Ck@@bRD&q7(FCigqڐe`eYT1U'1[)c3bPJ#}}% #.9P UeQUC*&#'l]t|*j^*>x/b,wx-o rI9nL3_ ,$^͌PfYj)CfzW}($!C@7"]s;&IG찏Ҥ`i(+`Ĕ=93쀱ڥ݃[#^V091d>k|wNYρ&֬VSmQډ7[hJ6,h8Ag#|Y6ijUˬTy>+_oD3V+a접8/F?AXYAYF#ἣ+D}\sLH!V6~i3bFm8i; 6izze25e_3^!IDAʊd̞Zi'S j:ˏpi(بjDjH4Eksj5DFhjx5u6qbe!c#:DnpT:8l@x&{kx4miilhޚ62hs{ EL[⡱#N ur]͔t~ >бF,p>97@w0PL/Iof9`+N1(^68>Z~z $U W2vݐ qϒ;d>O$$U$ \$ѳDWx9 r=I&u#֖ $DXXh9 KfF.AbyN%< $i[l䎒DNLAbJҷ]0v$Eu~7$ŮzmA"=H" $Q$2$z(3 SeXALfsA"+oc!. aD)j!}["MlfIU̻fI,? Px(:2sO )H [PY-sWȱE iS Y6λLDBqN#z!<'IET P+۾EMRfQHIZKm{ʢdS:DաBNDmQJV!}K'2VȵmQaK*\!cK'DBQ9,dlDT DE: [:$ҷt"*-PV+[:jr!8WҶPẒ*"y%mK'Tldn鄊_IbC Jc6"*[>~F!yTҶCJ>M%m'dlFTI߶*[>aOVI"CmJj%Q 7rlڅ-P۱ . >}hs 5Y aV2P_+[>t!SWҨ~w%c;өoDJږOG_6 T24*[>!D!TJTҾAEOPQU-ϔ:mRB%J%P4BS3oB*J75O!TT2 E( (U2 EH PUU2 E M(BV&!'K"Y%Pr*im#DroBbB(ȫm!]_%s  M(BX&4@c%QzQ *5+[hHDJwui%kHX+[hH[BCB3PV2E( Z(Bt\ɕoFJ74(.2 >%v%s 滒M)B;^ܢBBSSлBM|%m2#$I$/}S(d~u9VDoJ# mQ 1.QH9uQ|;; t"CpE̡2-Yh楐'rp1DC:-aBgPM2|"S!m *dlD T%U!m *dnD %ѠX!}'bଐ1Dm-$+mx-6b/f iۦ#-Q$W,oc-$,oO\j2"F>h,9^$FP-Q$c SIum9+I90dnAb89ֺ7 t%9Pdl[Gۚ9}ַ9?$f̓mY$sysIP|}kIƖP'@\@w.^IPgBEEz/ {$}K(! |ZV 7HNJ$mK3"2twGpH2];_gEadnǩ-HW ᜒmQ,IƖQKzTҶWN%s(G$r WNyBB³O^9ʙv+GM$??H+WC ;3‡;CD·xt}G`%RV"C<+!T'CB+S!WC|r_*l+w`߱J[ V%N0*UC|w e9u- 渹 }`H?fw񜌺+hۂ8idNzI>^'p3a䮉 2rP&Lp3*ȸ`<98x?cy~;@:QsH'p+b鄞BELgArN |;iLi UwrU3'4fʿAu,p&yrP1IŘn <-rC&' (ytD;Zr Ƞ- N^\kbH/ZIҙV<|䥻΅NZj3MW$eiL۠:c oPF*ܿ_Ƞ-K6Np-PՙVl"PrrW+i<cy۴$*4@g캀PVاۏE}ܴ `ewn.".V}rGUiQӠҼH{ӻ"DQU{=={H:GUAld} ",ObaAɢ >2܌}6yԃ6 "'wB6*=t~{"t%cdZ׈WiYtib٣Y}i*8ZXgP,Ch+HLoZd}Vixvۚ _Ӻp7=v%&Ѧ^4 7-v7ǗAn_ mEh"2a{pl!No@dq܈G ەZp5dKs]k";[VN#&aqrbvi| ;ӤX ^39^D+-ˋ"4tEi7F +tGl'\0_DOh/pek~ì~:lOظ(pBS H')F,PPr'$Kp# Ӯvq@$9P&hͦbڕ@I|H6ЀҎEJǴSJDd$ӆRpdJ2>" (YY/ M( ^MژLDR?3-pYɚ:s50 Vg=L*z@!'xaȋkAԔ)9yhREI>C/]2^MN Qux{?`s~y5!Hn9o7jTc&X4܆NSu7is"*CWs$~. c|i&qx;q8GT+)V1~Хq FhrƏ|e :~agڜ"C?~2 9AsQ84mVTJADiʪ /Jk}gHeA%_y7ȤNCmj:^^F`+-X1/N\9s+4/`EneICr^`v7y) x^ gl>'t쟗 v0B蠑\JIDރӊN:}vDQ0'#X98rI:^ 9 G |:P^ٹ+..a0΋>cq0ftƠ;?=m`WDnV5rM:/*0 6 5ᠱ:t<^NWvЪ^zx:c~:·ư;Qh s0 ryI9:n ~N&{<Sjr촜= 4YL8&, f%'H*<W:u Н֖ Ui=?Y)S&ʒ!;m6)l"]]u*lʎ"Ʈl-+l#r`Yٖ G%f<*LJ E!$+d );d$D2|/ZT?$ˇLE!/" ,c>}.I C #ZV$_X ޚ ?@||u")R$HrDޒ/1"qDRbM$Q$cNZ%E2I!ϐH>g$Y畤<"܋haG-0Ab Ju*ֲ %њ$$sXh.x!\h?(mJ!{ ȟDch-7`w/ @% 0(F%$PgP#rB%QU$ aVs ҷ0 MT-t&QV\3$J +9B{FD6n>ЉQ@~4Z [P'NHIEؒPW~[PID ah[H'΅-(DBzT_܂"ITY(ՇBmP&T)`|$a=eQ)dBU +JD-RS!s2bDUBNDuk2ʖDBƖND=/ k~-,T;dljIToazg%sK'T6-JND[ z a9*+s%m +[:Bx!,WҶtBEJNv_-6'd~8 `;jkG%m:BdlVL!lTҶ|BmJƖO}T[LmPjy¶X%} *[dMDJ +i[>c%a}d-] j†l%}7խdnAp! W2|BBجQXMJvS<앴-PmjBQ@%}'$.dnA!B!1Tҷ|BzB}C‹ 7v;*i[)H%c4$7)zJ7]K%i<Jh*߄"8oBB(d|BQ%Sd|;BMT%d~hBW%PM(BWDڳJ7 [!ԹUҾF*߄"twPWIbCJR B5a%P&M($m,J7e%71f!lV2А3ġJ7 j!ԳVҷА.P[Ifd~3Bp%QJ+iߌ"ΕohP/]|KJvBw%REP^I|/JFƶuH_oJ. B%R0C%s:4DoJ\cƣI'bP̒TжBƶhKҿD2|"qhd1S J)BƖOİRh*mAQ-$*mA\-$+oD̟2|"ؒh֭12Dcu-6b [B\[T~O2" }b4$m8s=Hҷ"-* I1j[BQ DwۇIW M(yHj!2i)I1ۮqIHߢ-%$DdlE5H!)&*"mj"2Eu>dnuZXRiH} Iqgk+ڭ_ZZ|##իWt݅Jp*4q)1a஡U ¯L)+AyPO}*wRYV%\: R8ka *51rVg@8&N 7fk0o`*v9'Щ˖)^ ̌Wxx;)+0]M%k/6Z aJyL*aVSUw^rrbvs\yX1B{rls= P*}{X32ՕN@ɶ2<6ƈJX…]\'꛿; iX.;qTO;i7ܬWS]l'=HT;lTJ6WѰNx%Zv67MuI]vl[Z6mB~svQ NC5RQe=m/:iivaBuyXm[ 'Q$|Nق| 申ئ:Bta1ܤ +O~gQ͡ynIpaOcj; pއ|}*8zsmsrpylsؖ8s u`R{Cm`P+ŒIJWlXt|tua}圪>YZGF%ӿAkإ2  uALkC75L4A_+zoOsz^;ך  qwQ]G.z3;,rؽc6[%)̏,즚R u\kgS! /lܚjkE4j鶰㫏g#vI2NX 6j7;TXoPa8x(Y4ی&ipR#h6[6߰֨0U͜P8`ߚ9͸)(XB}~:CU%YAU^Bΐrهh&^0-=G 5cF +KE JrUN[|t6PfRZ"},t,^LPفXX»@ ?s@h hlc*FT%IAHm5䏶rI"i#?eZ@ji0՘vӕMVTujgW6Զ\P=j;%qi2Wk%!*XmKRj;,mPZ݄WZSv2UVmD]BUmPle*Ba>0+1q  ?9:@EIm ]w(R:j'rJzl@m FuNU%Ґ[ڈ)~-Đۍ3uIy?m$,V*G.z˗qE%0d}IRӲm8Po 'x6<тJ-vv&K!b3f5`VMCQ栥&&ïũsJvl¦S1ǯ#mw8 b-MQA[T0~s\r;d9hQ+8aԄ QUf9I;"P4msU;ȱu`j[ z._b-UA錄sjc|ADUsXMVPF' ʜ:\/4=e ɚI7 a6@ҠUEcu|`N{Y;lGƬ`Ce`납3 iVzb .I#3pVW֜U& flyLU`nA3Y ϻlf1B)FO#ZIEK)9hLh1 1iEQF(`ӘeT[c3*1i#QٍO#ccccs|sIUϸ{9Q,cwEV'G[#5uxpxײA{ ؇3V 焍Lz`v-[?fC7ܲuhf#c|BH[JK"QSYkj>i^[8mc'.K̐e֜c9r8n>7i3{7 L+y#4}44o) RFnMr#@45 4uʠ r`b`'辅ac54^089C &F2B0@I ='tFR ++1172;+V//hF4MÓ#Zn!蛇HÎ$:aY]5 @X QB*g ‰%Rd`P/q1L ue*?oEMPdbS?@BCBGJ׿"&'_ A"yHʕɫ-w5$d!!@K`LɠNBCG=?I0 <'3DBX;h} kPSI. l&[l$$ZƓhAFn+I$$$} hL+m1]6'QPBk$Q>Rs$k aS$ʡ aDX! aN$B?Iҷ.r$ʋ aDu!L aD|!-$<3C?JR܂"@ItH* q*dnA$:_!q,NJuI*[Ii' ϕ-(t/gJzhQϙjP J۾JFMRfKm{TׇW%a6U2-CUBX٪o d\ۖB[!U2tBBXmaa%cK'TL*e%} U;+[:i!VҷtBJN[-PUV+i[l]  a*W2tBBX௤oFA%P-P_>*i֡J%c b aT2|BBbo[ZU-P˫*[dV"C]$U2~F-Pװ>V2fG#`nANk!Vҷ}CMJjr%c'Ԉ.J%Adlg:uΓ^I u+ơn!TTҷ|BʂJR(BC%}'(zJ74(`| 7 噒T2MCJBFPT2Fc M(BS&!)J7P!UҾq*J7S!TDUҿqªJ7}V!pUҿ Eh*߄"4eI;d|ЯB[%mRM(BnW%y-6$dn!`!TVҿ E+߄B{3OVr}3aBf%c )>HZIn.d|sPB%k%} )b+[hHY[ŷoFJ7)p! W2E 2(B\U@G]'$ĮdnQ!Aw!}Wҿ)E+[TH^oJB(mQ!m}%c:/:J7qJ\cpM)bmHqJ7J1Q1R&H*h[Ph-og)3/o>3- N!өo}*۞B8XT (%S%m BU2|B#Up쪒*[>)B8)VI MU2|Bkp+t-64WbCC}p𯒶m1?XCLQB1XLBŔf!sK(b3&B [I4}ZHb-dlðI40[H߶-dnf !B1,\b#h0umoH~~'.wuHk`N,kd{s O'w',`,%ޜ՜gf܂%`ΝS|FW2N}VpEsVsJ|axr[\+hwu1 oQjS v&'esRN ŝU6S){2Sm'oU9"!p)[NFq/,N? qBGcW 儎=HȱS1'u?b _=t|潙2.rӢi,RL=`IbzdV'Xn:RTx, Li=NmpXeaճHc̳T1Xe9c]<ɌZ5YDv, +Ujֳp` L~Ue)gXEKٮbyH`iĊ>rQE1W EPӢQ ja4Z^#0.2n\AmnG: Ӎ!`oX:"^3b !˒dlT.p\Caj,rra%u FuKz0Uz@Qw؟bAwNTxYx uaM/$Ԗ9e0 \.X^Dua, 3н,r{e}D%yO *`NE&6[ a T~9h: SUbecbyD&Ȱ ɯF0M'no 4BeME3x6nPZ\?$Zqò.qaAi%C- t脠f0A,Nlx p`SlߚE^k EcIȄx Bo '#=E:\W\=.rۓ *0Il6A+t?NBtn:,h04|ؚ] ڷ10!5|E&<]*^d<\ E/?y~c0]7v=Ƈ#Ey"Z)AY2S+C2l [Ip)YAٌ<ؔ mk7 ėB x-Q:8bENn 9=IX| q'T~)K/Ą W~/yۀP0D̃:!T&Tfk#7 /mPr [䒻I4x҃:=qu /}\ v1Mi/K ҅UJJc.R:5QԐ޴qŤ܁rYPykC[Wg>I\ȉ5[*OnX@ { jEaBEEdbǘ<&P[!]]-"rʁbړ+RU=|PW e#=c煀;:%gV MWt!ԓ{JLaHx"cj9hE}'xP_N~9BAJr'v3]{< fY4P?l O{rϿ| `W3XElh0G<;i61uA ;'' X ոsqR~hH&S6x17ipO'psi?VqHeF>/S.g]`yaNS>8WEX`&Zp\ ^C>cvds0*E ` gAoe)P4E>Ǜ@y0ǤCgRyч8rr,K]ؖh^~//K#bNZqZ|8h6E|Z-kdgs@xJC9tHrplP:. L^z!:cǡADbS`Q{~Ŭqj1tZ!ɬj/^:9P8)8*!Ϭk T8,5i~w : N`1`**0Y[ԮX/dLhp+x3`;ك4dwCShg J8^ S8 3>Bvΐ=›ch:;LFޕ!hi]{qi<{v;NFӳclMj=^F'qhj>Gl=D 9@Pȃ! @>D= 0H H( ʇtiR W>dR eTˇH!~$CfĆAC-4Dgg~|kFN2",LI@5A~8 5NOrz?rDޒ/1"qDRbM$Q$cvV7')χH>C"gQ$WLs/kIY?Dr!)HU"?y".?"suZ$r-Ab7HҶ]%v$ڝo;aI]7I>ou'@H(HL#Hd#I&2$ʎDDYVĒ(YKN$s$} (L4HIe$R$JDjDwA"OT?N x^H7D -Iy $~hs Y a+jV2Pk+[>6t!lUW(–w%c;өsDJږOK_65 T2>j*[>!-D!KTҾAEOPQ-ϔDmB9J%P4BS4oB:J7=O!T2 EH P)U2 E "PVU2 E( M(B V&*K"Y%P*im#roBB(̫m!_%s  M(BX&6@c%QQ*6+[hHDJwi%kZ*Z+[hH[BCB(­3V2Eh n(B\5ʕoFZJ74(. >%Mv%s i M)BG^ܢ"IY/SԾ'>EE z$Ҿ)E21H1B7iBuPD(S(~3O:"I8KR@ۂ"FR -I4RH9ESIBOO!s3b( [>JAbEE2|"hELr2|"&’hj1}VbKIBO\MҶ؈B1DmӈBƖOdbM/ҷ؈)BO4eM\ҷ'.&7 [BI4%Zr͘6MBږPdk!cM)Bu4n9+I9AdnAbZ9ֺ7dt%9adl[Gۚ9ʝ}ַ9Q$Γm$sIP}kIƖPI@\Aw.IPBEEZ1 $}K(!s0H0~8 EZX$i[VIƖQF\9}Gq.N!Ii8dnE s$c($%H$i[T!Kei$_-*D&24!)5AF$o9N0!qO7 zп=㿏qzj8q>! ONjrԭIj^O\s'g5TrX;z`1d8qbƒ̻3$a߮oV}m9|QY'>cNGx˃.!4aqJ\++qOZY{ƗNՁ q}]?ruv d;N-0IvqNFsr]er\eVĬ$wA'.;8p7ureurW;='ν[ 8'-bl8E 'x8Ep&?N0 hD9-'9l:rn=E>L$Bb2BW'f ~9ȇyGԏg>̹_CM'xVw`~N18,Z7,hp&lմSM{encKqTG'4gWGnP'EN.:76}tQ>͜M5aaP=amXfCj?c*Q0U ދ}c>%2OTOdMĶq]1;6bF]6WTˇC﮿ic/F]KOCu2lb/̣uaǫY 3˟f,djYF?=9σީh2Fy||,~wways<-mf?K.a5CM偕.m|%vs#LS铤a g/D/UV+a(u-@k9ǸB{,Oﯢ->%cȞJ:uCls>0-V4@yʯڞb|O鳭htiGKmi?71ݥ)?NyMIwn2nG8NHn!Hog&H(Jaj{;NQo?1 ,l00e,*R~sG^ fK^CJ'aW0Y)'d\eOsYNh-T: ]{`᜘%b0mM9Z5/9P[VM9U|mN9(QbK%1*)<'t|%Ny-NAs7PZ`p0K.3* 3蹃9FgsNݓUpLLJ%jxY g:άkɨ= аh44O ͜qoݲjtM4ޚz;d9l=Ƙr (M:i_om-1/ ;a}5 \l18Ì s,t65/+#g  l6z$7$S&3wBlGyZ5Xͮ;%=gWsN r>ÚG3U~v9/iM}-t7j -# ^d 4Ivy'uௐ]zy0|ȅPo_^Cj Q |#RY oTxTfA>kŇL̨RUJE.d }(A`7 CENO$sK>B>(ґ@r#Hϕ?F^F|ȍ" |b>d26 \v ?o$^$~CR!\Dr=#)kH$v&[l$$Z˓h{BF-I$$$}hL]5HIy;7(O(D!7(')yK60)9RQ0J|l0 _!} ! aYs$W Byoƅ0}N0UOlfIx((dž3G8$ETQAU܂"lIt+GB-(YIt-gBxM3o!<'ѹE^OIt/dnAU$,C!ǶoQ@f*0DTSR@۞N!$QQk*dn[FԬUH҉rm[FْgK'\WȱmQ+maBƖND2HT* [XDųQ9Mj!}K'J[҉&QEQYNs!mb2t"IT0/mzgir+>]}/5`6X@HmÃ@NC2݀z{_Ō`<ږNDὐQO!} 2 ВXH 6*Ac!m:CY"JЯYH }2|JcZH߶2|=J[H"-2ئ+}#ǖOk>.df-h$]܂JЏ]H u2`2|JЯ^Ht}23煰Ǿl/dn[`,`!}'8^*(BOp$M,ً5FXHL,dlGN*XBPhe!s44&S FiׄB#9 kBўJ0&#F ik\h^i!cM(4T f׸ЌBPhVs- 2ׄBepm!cM(4V TmCss kBJ0G2`%*\H_ M'.d +BQhr!ךQh.n.dlB8'n7]XsMͭVօ-48"Gm+8BQhw!s(4\ F2֌Bȕ`\y!m(4f+,-*8] &הB [Tp2L/)+B_ضW5F`!s=IkP  kJ!]BuPQ5 kJ!-B]ҌTⲒ-(NYؖ7\*f!}'Y *s*zg!}'ZP%⨐\ B!*dlDHUHۂ"]-X [>bBO-o-] i[lTFHHXH62|"4Ic,orBO2 g!sK(B DjBƖk4-k!c&m!-dnfh{F [l94˅mk{9х-u!c:|5wmM(Z9Iy B!O"|!}K(Br_ȵEE(H_p Hm  q}t?(o E(27$rl(o E8?2 % ?GkB.- 7BƖQFIdQvc8YH!}0)dnE$I!c((%H$i[T'Ke$_-*G&2!Y#c,`5-ŕ`.Kz%X+P׹mlU`;+[^%+֙DkCmpv {#pQJi(Zy⑟?Ĭp @DJBBtH#AJH15#Jеi&ɽ_ Z#eܫJQ)ddPLMdWV%$~+ 2ˎɯbZ} ,i E7}Z-,PZ/V"ĥpmkSjuGJjJl8[F0J,kX6BmnMIb*Ib#BU r }pO&0Hd&,I¼FI?+B|.H|-0"u,e!LA(M-o ]I¬92B} =B'a XF8$ЙE}|TPAU܂"kIt+B-(XIt -GBᑷ) <:W2+Q+[PlP+ Pɱ,bTl ƚMR ..Sݦ$'GYFTI ՚*ۖU!,kUҷtBJmPV*[:^!UҶPB&Q*-PW+[:"m%sK'T-JƖN\kϕ-6T®dl*ᅰZ^I +[:}!,WҷPp6~D%}'(JڶuR"CBجm>-P*֡>U%s'*=JU2P.x}#ǖOgXI +khYz˲Y:-(h-Jo[܂BB?dlЅW]Il}+ۙN$WҶ|B}Jq_g*[>тJQ(S -4D! ƚOptNwTҶɵEE?@#H2 IPH2H/ אo E>$qnoF ǚPE鄑dlE:j;ř#Hw$ۮqoQ#IQsIZT2"=QЪEj*[F!BhUSIۢBV5-UM)V5IdUSj ?ivUcU/OM^Jp@@ \ 2+ B<DBr3*YJ)T6࡯g=f#5f|,,|55WPHYiOKCi!0}`ȼUՍ*R2jVYu3b>zLD貥Au#.S} y͚.0t`#mbAq'th-PuجC=~b'}ηWk ~5s6+߿@bڱz/'ZXM/3'_Qm7GzxqgoE4b!gXq,h>jֈ#ƛw;D:}{lZ|:l 4%m)GbB$~IxYߒm _G|rSn2i#Ngyg @ Y?wx|(0j/_j?]hԬ{l;M>O?tiȃK:xW#G.4cmȃZ+wٚ:>~ޝ.9h5ezA4+髚~x.Nf;-q_t6 ]>G~Γw~=Q >~E۴GQ#//;[ }l~'g#]~7h7W@e9":xhds Mr6-r6d4:NcnGh%ȆknДo6qU[PG(Gn.pB" j áf}u 6|Np@@ h0D ->"ccIb` v~u|Dp*C 4:C |䑅Q|LJUZ`SЋ|i+O?8A'؏9:NhsV;\V, WRu ); HL9䟝8Հ9 9W*5^9gEric#^ ^<ȡF<舃q3 o#i4ؚUuo~gA9 ߳rd"*U\B4 Ϡَ}kF:>|+8BKm0f % d0J>`Z[hN9Yu|% &#?2Qf7_œS$AjAۜy9EȡP zbq%|Yd'tga6[60=0%2 yFp#L0)qj#,Ch#r=G.~)"8i 5˞A̩fO9G,q0Kp#Ԕ2iH-LjZ#7ÏݞBoͩoφ<8qZe.= sϳj8?;&?2xiSzw~.8f04g`O7qp@O.|ga?D7 A o,Dia6_#n_j|kLJ9P'SN]%H.>"A=e~j:~Bun#;VDDX/%I%6)^@4/2;–#)Hޯ$^$~h PUI5/ t&[l $V$Zɓh;BF,I$$v$} hL-5HlIzޝ{%A"IHD" $Q$$J~(A 9TYA"Kt-R:L-02}L3 &B)oH(uuIT>IF̐`} x* $s <CRYEْ\W~[P3,Z@DBz ~\܂"ItP/ǰ*-(T8(JP 1*hk6RHc&TO)% vrSjAX%o鄪MmPѪ*[:Y%׶eVq-PM*i[X|X !TYIB%JNrZ-P*‚p%cK'TW.J*aW2tBBX.m-P+[lUP\o8 `GlG%m:EdlnL!TҶ|BJƖOTLmPzX%} u*[dSDݼJ u+i[>d%c v1 XoYvA @#[I uv+[PC\ȕ-P7v+iw%c;өDMJږOY_65 `@%}'4`P܂B p(548|QX pJږgjTm9)c)5JiĘL! ET2ׄ"F{ O%cM(bUָJƚPS!qcV5qB8UI_ d E%Z%cM(bUm#F*kB#xpLņ*[lhjNVׄ"+kB)8YI_3Z3,Û-44DuyJƚkj!moJ-㸕5JQtp! dEL"iJښQs%c N pv%s yAJR@y%s JR|!mQJƶuhnW֔"4\RB(G)E*!yDI(*kJRJߡ`,DHF )myƥ`*k>rJOHS;-RBQ%c'ZJ"eS%m *[>!U!cUҶ҆BX%}'BdnlPVI ņDz-6$+JڶiHWX  ņ-.+'f%sK($-JJƖk4T-uk!cP&m!-dnf{F [l9ʅmk{9х-u!c:|-wmM(Z9HIy'֑$s |%)OrmQ !O2"m}b7$, m8s?Hҷ"=-*ҋ!H5$[BIwۛDs{ı&amifza$[FAsIk{ǹX$[THeպ؛-6)$JEmQQ-YDƖQTkb"Ҷ62"s(诓D:_TRLlHэ?i_` _#>￝g4e0^*#4ݡ.~-FV#k.8 #ta C[lFFf ~wA*MToJp3 ~-tyw'P>%r%Ĕe\Q.Uyյ7%=ʪ_0͌&zui6r.˷[x\u`Iۉ-Y%#cq1ҟ9;6.YwR#'kuR7gml =)DOkGQ RBT=05S!L*AVOqxWr FDı0 4%eM*íYp2 #ejʸ3g*sϰVvksbmSBB:Imӆߘx iI8ڬM:*$x=h6/mb?h"lY`WMꠢa8&=莴yV7}e?ĴKxִ4vP]ۮ}ju[G [Kdݲd{2T6|\nu&| y'xY9†Z_n큮0r69-yl&4O$vx;%iҡ&&l7*,ŻiӃ LgXw-=6k)ir!OpZ~y~1(WWn00dJ%/mz01_9$^26U]4F9qrl&w, ;tO@6LK N YʿInŸdžrj"?CCSK'&#[M?ٸ NHK{LQYR &+M,M>`ÉNY`\5e7OՋ`vF{uq@$RIh:pm_nq;@'Мt `8O5|g| {C|(s~C1 ho>Ǚ%)p`pxȤIc8Y^Cw-g8eM#xaWOTg]NDH?ѩQRq!Uw &N2wY*Dj팇/Mk>AۜᯠY`My)r =f9_;*eƹUv~[iу]H>c6xiXp[N0hуQr-xq>;IG[Fbl_!h5ǔ'm[mė1{[I|ii}ۣ9É~SVu( lOx{ zm5mu ȠZ{2\CIE\&,1Z{m°@ +H=| zJaiZLJRIA" ň*+K?32ղDzOh%`~X-'!}eO{|zˎa8C BWr XN{h{_Rc=F!+/v@Gd:Pkdg Id l,Y7M>f.=m~L|Yٹj@>)Zv:{IEo_ 3͊ "_ 3[ 3PfK W lΎr^ ३0.twvچҼ8S|*$8VbmQ)Ԓډ!WPhկNJ %o?:%^d^ߢҊ V";$(،%۴R EI;O+ $m}iHL47,-mWH'Wz67iZ$RW#)2b.k)b_(5Pk1"%吕H-lwŶkQtl>;&[Q FCA7TV}GԖAjSlEL*VĎ%Ѷ,v[R(@}w("+q(47(`>DFE[i'|Lo4z 60KoI{Eſ|w89AFyGx0E@â] Tow􍻌h0L@*D!#3Do" &!#蜄YDtWP»4$/ZMfA4OWD( .&ѡ bFp;f4aX]0[{D< @4n!hHÏ$zY5Q{;O)(ꛆJ_ek98 ӭvg9A8ʼn9A=8[j @"JC`Sh[O%-r@ N?@o.~~q :kD; %w$$$ kII"IAH;( q'&͚D7t! XI-pIE.Tp1K.Ip2؈8B>6BͧnPIҷ؈npKMmoڻ ^$J a"Q$JH aҒDM!L~ aDIT!L(+ [!LDWH#Bc<4BE›DIq!L(.w!ѓ(/~ rl5PfQKIrKmˢjS+;DBNDmQJV!}K':VȵmQeKJ\!cK'DEBQ:,dlDT DE; [:i!VҷtBJN[k-PM+i[l|] U a*W2tBuB{K'TOA%sh(JOQmPE0USI |*[>Q!.UҷC]JOU;b-2Ydn]u*?F-PǰAB8,QI[C5F쨤myC*ۦ9B{K(4$fZ*1p~&1S\)3?5١B8_TI["*kBNp&EVU2ׄ"f W%}M(b&1WDg5B8VI[d EѼJdnIB8MXI_Jd f c%}(bk(blV2gV#L $j!ǚQDk!} -dnI4[H_3-dEN'p!c(r9f ikF΅5440#,3م-*b; kJs-*b=f kJI4_Hۢ"f I4_H[S2s] HPH_S22#$AB6QH_S_rl?NGcI'R%:!G)dl[ZHRH_2|"8I$)oD~ ۞$2|"JABTHۂ"dQ-yUI i[PK"X!}'B|V%Э!K"Q]!m2_ iۦbBƖO(1-6BYR&زq!,dn E?H Zr&%!j-dl$ҷ#t-L=Hj~-6R;dn SN2碉NҶ"IƶumM(Z9H)y'֑$s9-HHJ\[Td?H-Hs {߹x  $c;ν\,-H$s`6 IPCf)Q=X(mifX`2"4H"ێBk{9 B [TH!s(°$;WR/.f&Tx':A43cs8dV\f~Ngb餑ܸ55Db"YUMW$N Lz?rҥz yvB$N$#0dAn;3y'l Lx"HT'7^' /epppG&fuqBOtN CNp`-; yx8[8'wu9m _NzX[JU~š Kgvxu"k a1y9 N7yH2saɍuQp>e9 p,N/Wʇ(:򑡷 GOXA6>Nzx9#td`/5 62GS Tz@PM6@}dBJZ +Y)Y:mųPo+jm*HV>"&V߼xṃ"(P#lbP b#7"K\8{GoQSL,*+޵ w\YY?m9+ YHĎG†]``g ۓN7}ywqڠ. fitA A4UN uGE _|Oss/vN=ƝTAt2qD ih:}B3[bxUMF󭡧eķ*XF?6xcyhy߇gІHX֠Uml}g@#ʶ<͗ח'i}?ڢ٦V'W6;̦H8ݠmꏅ7 [W_QhOGN6G:7vz?61hI<=9G55yӱ}}4[ [XNmhd|$S;]Wܿ7.gAO*oufldu0*5&G >"7&ϑc ~!iц4:Oq#x #Nh3ش9f_GF1`%G.n8N}>4ѐSr|7}uq*W͊CCI4X'Çud{JN,!xsp"P5/W1T%CW~աXS;>෼FdGABC07>rȺt~sWT9+48K%>9|'`I@A_:q|4 [prՠ"nfWF7>S0@rxM5)0}{'Őir)<)20QO߈9}4=6,[ ä7Ŷ#RZC+s{-Wߏ_ d7k]}>.V8j)<8ԛi=ODI\4Ǚ!- {)"{NB,=R%2?rE8OSA@49] u0YՋ`J#7#M?B0M{JL{F Nfrse|}P?Mhs{8xuc崼Dz?6x~+n^y1}?`~i|s *Gn/ iA\L*,ɫ\KE N^\BI=`pQ35,U9]Mz`y_(@U"//)a% ̏pP u~8(U?/I9z@U; LުnRh%C3=<椨b PUND 0&!zA66*:ArB=#^Za [QijD J'(.9t^:A-dX{:HMԍ:!xS6>uȁ>04z'$[ j9;u jmQbΨd׫wqBgHsHMH Z@f:A I 9;&F[ȧ#uvp$vB7H"bHo/=$-ZR:&UqulvI0)ٽeFGD hh>{q;t1}wҪ%-( A`j0f I٢qA1en#BPtc$ 1)dTf pց!:7N^|\H ?Dxe!@7!!9ZQy\prcPYل5Bj]bc!j:V  $5e + @h/B-yٔT+yA`9Dد䈀,Z22 K\u#v_\ P->$_X 4A9x 5DRH^oNH&-IE2FD2HJd8QILT2MC'p*&1R\7)45JP`O!d E AJ1TX{*Q5.bƪ&1Uǹ*kB3a5ٲ$?d E̱YJںm\%cM(bUҶМ_%s ™JPlb%sM(4Xg +kF\kF3pn?hFn1kZXs͘Y-s-44[BCspf1[\3 .Õ5)B8\I[3xd0X]\̮dnQ B8^I_S&dnQB8^I_S/C-*4l_ض JښR>BgP5MC%s:H"D%}M)BQɵ\;%HHJJ h[P2-opI"L!}'RMS$p!*dn{FHlT- aS!m H2|"tVI*mABO0,c- Z!s'B˖DzBOn.u-6BWb#~I$,mF [>$R0ҷ%d!s'BQDBvDžz%M"h!c5CqDBږPFdHH[H߶P2\3TAB[Hb#ą-6BDbBƶֵ\IږP:ض5Sȝ E?'$}:RdnQ !Oҷ"I-*R$dIƖP=@\lAw.IPyBEE0 $}K(s{3*!0~n8քW-ͬ&"c(q^L9Hqvc8/&!"}j6"2ce -*2 ZTK-*2 Z$%N%)8bMO-ql8-qyKs?'O_K ͉] o;Ya {ުyU{??{۷9X}aaӿÿs/V2N:o߳oe_/_Jy+6߉Uپڿ;7?M|77/Ͽ.?[4١wͿ5+o߉8\y3&-?}3{Ě݁ej'iJ6f\N3۲? 7Vry&;wyt>G$u^W}T|T;WY˃ҷxP(KG}~!_:14k}w_wL~i// :Wњ(_o~V0sQ[WLIʳA{xTi%A1eU$vAiyՉ"c^/o_dCII9.惂ăU8$^n#Ž{pi~?rbj6m]?:#CD8yqlKz^c!Ӧ|P( jVx|Px_xql[8o[}]?Y^v.~[~=IW,1@A+%3x@\_>_^kWiClX u^傊n10o$Y.((V(ZAʣ‹j;wZg\U_-@ \_%/xNlx+:\?mh$X߳>*H>;SW>*Iy^4/ Ku+ WW[]X ݋8}5^S^ռlBYmyAfޗ䃒GM( ˣGPGG˃ql7._sY_"߿IWk>E^䷃`gy滕ˣG=OߋGG5<*Iyw$fB>׭r4 Y֮=Fǹ,rG\w=TN{ĄiLs&=z7g׺ x2_ {WchGխgi k}Lm uK\|d@o\2zTm0u/7]n[3>MnC釓d=/zt6zי$nJX| ɷK1Nk| O}/^xݕ_wh6Wos'CN߫aÉ73:VăZ6ݎ'DxN| )~eS}@LWCUx -eɕ/fU>Ƈ8/xtE S]:IK]Oޠy['.US׍aG}>5!nw/}ǿޜ~d*`[n<4k(nX]vPArx\r/? qЫwn\^ 7X}ھ;{o|A_۰ō~גF26\5¦!/?_&[s8|XqQ # t˸z'Am_uSc^|~mx[,n_Æ|;n13f$7z/#/gSʣGw9ZLJ:;_h|dˇw|o,{Ӂ[pNN-k7G/]_o5 kbvSaֲ5l̀_Du뺠j=,^.VI:~ x˫5.5ƺ<*y<9!*`_d-XJ>09Dd"ۜ^^Yx|AX۬3UW2|k^yL Ǽ.׃~ WBgox#$O>SCܜ5VgMwy\eʲLu'G&Vk;Cr=o>-ߊuptu01+BXߺ5Mb}?6ka KbQ$Vk~:݆{=/b ,F\ou:Ć51݀ēՆoaM?l 8'!&i>A:_b\)Uw i_Nش/ǣI͎g!S#'u|m:mv\i&je獅mXo5l[AP^kX`n2S͑߭pAr2ym:νfK73]_Xe]/+Iܒ')I{_R|u6/VIL~C0Ʉu`>P8Y\V\@ZYoElvEW?UB3~0+:HIwß?Jxh),[ 0OT9m&yPx/,MΑ͛fLiAAѹTozoY<;黹Nc~C|yr-/~ݖlQUʋގsy{\L\7! nS=AB?obeVE:ɼx_dpsせ ėL4&FC"_t!"'҈!*5t}g]XY#QJlnJ;}َcT <ǚfn@{wݴU_S-"/7**amXMYwx~ODa5r?Lz?͇R\>9{PbE#طB-rݳ$w Q{VVeOXiVnTw}b}qXn$#XMz@>jgV} - ^*]7ûNݬ}IzH[*>}"/owèhyjI&*͑bVC 閻YЊ5zXw4TګE{p71Z}3R%|fs/xJ(z֪q/\9Kpk`x3\ޛA׍Fd17B.e|ײ`.7 9܂ٗ5,bלMXql@LVxD҆Q|S8j8:H+= ]ou6Qly/`k&>.: W~XlVAy6kY~d)Ϙ،l=/=XY~1TdxyREqCXe9utp1[G6e4|IqlLJg6~NQV><[cz/w;sǞ?wTF)゗_PtlƣzWy6x~)ޢ?5)zvOeYtEnwqԳ| A^Én@Qݩs7m]z`UEe7%5aϪd\vc` lAh~!!Lg ē _\,Sb%I.F.mfz^{*$mο;^ȱ2p2Z//bc@/ͯo?#O8Gr6 Τ3/_'8;Z*V!i\`Ǜz:/:PHĵ "& .D}r,ٚO0/_<:G3'>@lQz`Q6;wD<;E9ڢŠ%ҭ7㇭ hajZ\OO>EdGߌbr~Ow(iM_FX譊[D9VY|uG~!3Pmڧi DQ,r>:BY6nɂnm&t|KŲjWh_ e)ǭʽ܄Ɓ^Iq:9U9pufr^`X͔{ 3aχӁctEkr @LpA lcZaVdӝ G^6l90'l, lJ>=Njså1kÒAWXm*񑎰~Xvk2BXt>rڬ8[a#g ›5bG&]pc2f5 ¿_㎡WW.rQYun,IB1fw˱2Yu#G}am5^^ JH y7]7}a qgaqe S7 b6޹5>OQ^mVO {ʿ8[Y:|vSs߶[ar0oD6f -\XA a,YpIS=ÌO&2|;oggy0e/0Gn}NxT510'U Nf}d t:SY ^Nq WOpʳ?⿗S<h™~u,2ĭɲ0͛OJg k5`3Ddsp# +S8̘i%faqO=ɨ0ӗc?֎s,/ j` ɩӏ`UyoFAiG'C|n{ vO.E??њhI-U4xP'A4^|14dm#_&v\ȱ"5XÜo890̋+oI󼇠N0K?Ԝ,4kOifOn†`1,DǏ,c`EvMO&B`78~lyzda \ q?ɪōh)V=)o}ҪKfYJ ᜜ ĵTÔ,G[bKYtޔ,x[A>`ږ)Y,>,_8jM~AjRP5TJB1әtc&0ocq&ݑetEf|}N~ fa~tTP똅hGszÖqgih#ݔvx{몑LFvY[1mOMԊ,4s(c YPhVJ}OiYNv$m*&oy|U}5jjbrGgI_bo\$&hzcqR i/ixm8gvq2ˋd/4O!%[qr]h|U*hsBt bzi6Qy0GJYMuuFqs,?Qaw9NB o\z.irJȅ.I{ﷴp]iYH?;4)KWW'kV5YME-}ט{0?xD%P=XzZ^&>.rf"H7(SVDb=.m¦b( kyx+_V o F9B9V ag(Ƙ`ia;39.0 05yKfkƹް%U|n<\:G5pf?x~ ea?z&RQڏtT-M|F8md#TeVfms<Y< xg0 UnD.RBӰpBEi  Ӱ*Hc+O9#a cϓȺIA~d <ŵ$-#\MNjlV^8,>LR@|mǒe/NI/b~0/]~ t*zR9{QTPQAXzE;8:LraG>6x`6b7[a?Z8XsXqKol)=fߤJaؽL>c(R#wLO(0kyɸXdMR$.&iA3Rm NEEI^IX< h 6p0Ûqb c)"e\x8ʇ`Z)Z^{Vz <~ &_9+u.R9wLZMrR'|iX4U^xz} j25 WN>¸Bh7B)WN[PgÁnre|rƣ#V&Mm[N*r[99cn+yp[ P$Vbv+T k]يRdRIz\MV ZQ@^+!Dي$kh+ablmV8a.+tK,ru:[F @aATQu<|1#厛+t77[6_!DZa%\IPZIY~a&J˃V^ʬK1# h Y!4w2~;gcPI Hq= qH1QVQ]>kvީRe+'6K1Vˬ|W?v|dZ/B8ʽp `V>&aVȬ.r]G0+꬘-~apV,Yi4]R'X}Q r`VO(X VJ@4j}feSU/jC^jliŲ3#J%* ʸ*]OMS#kˑwNXf!XhEGhY@+ hEjK |Z Zhŵ>*|V HhJ䅼ĥV|"|?JH+DI\:v2٬rWKisZ5_BVZ='祉Z(ZZ>ke\x,%cqZ+cw~eϞ$pg왿 Bʶɯ|[- %%b+ YlZb+3`+rr[(̀d]WkQP+ jťR+>ˠVZ!Gj ]PZXi4MEZyebҊpҊ@J+V*Wie_ ꞿRiJcV"ҊSKqj,HZ"ҁJKV` hD}n]he\,&9{B׏s j-H+*o1@@0,]{ bf&VXШE)9K2IsZb5JK[*`~ Hgi\7 2~6[fZheξ AYQPY$MemMwnʗf" eV0<\r cf;k^̊;2+VX¬#҂*zPV,TY  _KeEMeevy{)+璧5ˋiRMee?x [¬LŬXbVQ2+ ʉ$rkTYz5AV,w9lYS%2қsde0Q Q5V^HM1$יMcXy1VX9nr_^ X1^0VFt=ʶftRc+NX$Fces.0Y1^dOƚ(ȥ VdAVD0V\>$FcƊ' cO_"?Q%ʬ2~B+#Hy++|b[cEPcE@c8s`\WcX9XY`i `02bX.Xi\*Ɗ+dϔX9|+h8Xygy +/cŹ ˉJ5V܄X),B܋X9PK V!VXyRnVYxVY͠XXeEe P\gʸEY*Y/gń.FUJ>:+VtVz Yy]VJzZFhЊQB+(Њ_ ~.JQ`4V]&w|^ʶ$T 23VZVZi96IVhe?V-eAT V %qXZ;ҥB+ $B+[h܄h%0Њ6LqNRH܆VrEY)29AV4DVY1TeEyet/F4aV57 %0+Oj͋Y&GU YeerSe 'ʳl:k<]QV^ HJrɡYaiFcdRZc (QYYXqj,AQV:Y&ʦXg_QVSG&+klX_ + YaE"+O^2\cT)FY:SeE]$J*+WQVHeeh-(+*,ࠬX_e1ee0y+|ޡফ$Wd.@FAV^~Ox!+F0Vscf!c XY5VĬ0V:X1V΅*vr`p[}ʝ,zF^b3cvNds✀|+DYܫʊUVnsNHe垵UVYWeeە?㓜Sg—7 DYSJ3eVV]0+Sh~2n3yͬvUfeVY֚̊A1?`Vbb+&k0?3+GdV0E%V6sIb0Ċ$7ee b881vPVDYkJkQVZb ʊ1ndAeaV\y1+gˬ7!b Qyif2+k~ʙ2+VY9qVk.9:+PRfʸ.Y¬\W¬X*WfA@ qV^ JkVPZI$Pg=Ίi&/g gY!YgK1TpV8+#hnYsˬ9Mfeܷf JkMuVNqV.z YTkF^ʊ,ʊʊV1o `J)+X1J2>+xFXgVƽV7bI&yTX1e_?ʓB;?`r"CPcm4VʥlY>эX6ܘ +k5ʃ_-rOAV4*¿#@V+!\dd>3,r&f+b<38TA5V Yyemһce}Udn*AV,Xi~{/Xq$i[+c4cE(Kcc=͌+-Xپ4XyN)Ɗsjq-Ã<\<X2._cї\YR*XiƊ+JAVzYBdƁ"+ujI0VjX1tVc+NVEV/@V:$-r!YiE _EV?@V(?" /BYYkCY%}fE(_fzf3; i5Vn-+-YA)(FVȊY)l#|C6Xyצckc5VMc%IL%V*X*_6bw{!V:X)b3+`@WkR+|#I.2+c`10+#Z$w]Q3`VYyVAbi@)<+pYvZ5X% h݁Z1l!֊JVE鹏*[[G{qb4"rߘ8X+h+&UC["x@R m[?W[!CmfLmTj+V^_'J#qܷ0 =5V:F[gAX}2P??ڊP#JQib #ȅފ0J/`9I^|+EHWƊT^UcgWm)?k+r)T\ҪM%U*ۈ+6Jr\9lJDWs̕Y%Rպ zyq EȕU$WV(rj+f/\ARq&NJ+5# Wg |qWFHm UPt+W^C4kP\9ygLmĕF+pWF\iCreSI}crO"|+:JWAW,]1Rtz?ѕkS,z]qMt%n+pqn]\~\{}̕\+Tq ņ<~(Wb\p+-\i(⮂+t+V ⋮zI1WZ2+_+MBK+0Wثrnx*1W^g@̕\\Y\y毌Ҟ1W\b>抬 ]tEmpDWd"JO Еk]JS]ivЕS}2MV+Hԕo'<݌$Sԕ碮ꊫTUW< TWTWD]q9n+-G]y)"+eB}\e=Yu%.Fm zAH<S /rŜb䊽!W\1rE Pr %W r%WlW_s{J+ ݞW.A\J+,s+TWP+|D\%da+V\9+ce<E{t̕ETٳw2 rQWſ"6Ǫ+ǝ!vuf eW*xdWYwLwBljbB]IHx ԩP3 2BlQM`W+-]H]{-U+dtz鮜ׯ2QV⮐'ٔ+qWz ʗCB]ɱR5tW<{Ċy,T)㮌ËwweW.®PvZns|kD]\+}BOuݪ+D®v r}9,qWNJQMٕs~+<$zK41Mveĺ 2FY+sfW`WzOv7Jb+g@מ8 S+^;+ 61W4$Wn++ }%W%W %WHZ\2\yȝliO䖋_y$WVr9]rCwɕIrYkB8\InJ-ʊ+iW7D\q2V #8U\aQPqqDOY+a4Wsi͕K\]B|ik/B4Br5jɕqOr;/r3ʨV 2 O+lCreC\ \Xrzxɕ5rJW\"qe s#R} q%+'YV9VڐmEڊ XVjs8B[imeTV[N1ڊl其D[ڊIlj+:U[iImFV:ڊv+mJVVT[14 [[)8rK\P/Bkt>W\p52ʚLEW^IQVr~DYp"XKbg8q˙ 2}u\MJ @WNw Wʽ\5Rʖ@9+LpRRQ+4y\DʾqG\q%hMW })\؈+>pW*D#ҍ*Qŕ+vXT\[r9F\qRq$ŕq)7kx["y+Q0^ RJeU,ˆ+cP"u)WhW_ Kkv#AW]a띿Wԕ<uQ-JׅTWHR]`艨+0*R. ؊\g+[L+4\yr@", \S͕sAψ2teAB@W' E;UW W++dWs?ꊥUW4D](wKEse">/C#"bJEW2ԁ]9QWXTtEt䄢+׬Y9AW <% 80Wl?һ芕+ tDRMuiJ+7l&슐+슝_ 뼲+BTW AWzv]hAW^"DЕj0W>͙4dg vƖT V\-TIq boQ@[/dĘ>)RmRnI 7r+.V,-nr+?ɭܩ/VX# 9j+Jb br|U*ZZ19w=֊e]ɞ =bZZipk%Jɏ"tR4Nke?TS>ޥl"srn9_skEYH LkIPZ)kŪ:0X+MZhkEbRkEk۰VƠdfaEke\SPZ6@`KTkZWj^ԊiP+ Be3oV%rRs M2؟@+LVZ 7QWTxZ!{WY'bYh|UB+<묘CbQ]J BX+/kuV0=VƝe2t?ZiZ+77֊5P+-ZZA>(B+LR+R+/Gj6~T)xK5J+;g#.] 2!i%zVnH-wRiROk.V@4!ҶVN(P+4Z3VQF@dg^ClT."$"Db[P+/aR+MZy2H+eK"XNiGYN19c@e%jŲR+G%pZq+b jmHV"ӭ'JV\UZ1V6ԏH+*XbkEX+fZ1BkB[犭|S>bFk1X+VެׅON aVuld|Z+V@ZnZiOnCjFdJ+-zK+9H+DZa%xEZi)&{˞#bESZ$Y:ᬼ8+^ʕ#{A+[gMheL=³ZngdVYaJdVYLY8#l.Jl>qVzYYi J%/1_8+c.7/$ΊjuV)8+wuVY٩YY@+ۚhEYhQH+B? J+eA"쫴BieIRҊ!8AVieW)V%JV+^i{pGZi Ҋ~nJ;_‰1YoH+-VAZyiuZysCՑVArީVZIUiEiJ'sJ+gOj>TR+j~R+J+[2V}.FRI 5H+hWZqBDVsVDieLI+K}Ƀi呥|؍ЩBmEEiTYieEܿ~eZy+VhKZQ2AZIDZi2JǧJ+9V^NdJVy91DX+jjTuj]ԡVOR+~LBjJ8 ajq‚ZSBZiY'<-X"x4b1ȑg}^*+ +ϚQXVf+ r YWdRu +F0V::XUKocedJV `pXJ+m1V^(H+,!U&JxD<%Sfr(+6Ĭ2J' fΰ2+aVlbVx.Ȭ|7ԍ0+-.YYͬWYeVYceeΉʱ"PDYeEc ^Xѫr DYɊKY!Jl GKfj0ʳ ' feO\V fcwg%+:+ů^%qV7ZhhVVHLVGfH+- xZq.Wh h DH+MXiAkVpnrRi֕ `J+/p$5Cj+ dBZhe|Ebvo 'J0+¬Rg5E8+v {9+:+XpV:?Y1?Jg+yf7?+ʬ܌`VlB 2n?[xQiɬ2ìxX.ҀJ1B0V tޚ1V^HgQ%uT'JM+ObblQU61oݮaDߧuȚ‘ƊbZ>Xپ71VXʘ}r;7Ɗk83YlY!4$[dUaq.ALeEd|BkǺAYLe (+yJYI̊dVFmkn=?P¬ԭr4TRKytDgeO:+tY gb:+_gQ̊%meVq6$C[i j+ͩP/”Glb+4Cxa+ [1؊ħk`+7ΕAɵڊeh+V>h+ptU[y!h+JKj+ҙh+gA[ kȺ֊E_Z+ugbZ|ju%ZWJZiiF23JV&UXAV^KI`hEY%8 D3Br( Bi21 +<φ +2JI +DVX~#-Ɗs4 A2EVږeeTj䌲 l$Xz@(T@Pbz'Jì\\IfrYQZi * b(J@+'[gECg%7eVYaਲ਼BtМG r*/fe~̊"Yg Vs:+V:+Yq‚ m tVƾgwبTgl:+'l ΊlJْ=tVUUgE'yP"8+:+P:+-Z9?nY.~&B+ϥNh{jT4 )B+9nt'r(+ h3ZVjTZCZ9.ĞH+zRi%DZ8JaQJ=y6ZgBbG/){rh"Z%)BQ-JJAJ4$BJ?[jyJxAXTjAoFR+Z+(Z+0R+̔V:" 2n&2毂Z9Nje<fLjzR+4zY+ܐVbԈX+URV^ HVV^<cha< Wܴh0ҢX6Z@5Vn\X+d-C#B\7J=R+ckgye3 l ଼Y׾IpVT4uVY15 g '?n´HggeY96nQ|}J%;k$ʺ$/XffQ/f%rQV蹃!K]&_dFYٶ\"+_s +5V AAaE,aWS@XaVa V$V> \!VXyYVE~+\+FgVAXiXXQXy5+!a"%VI+&i< /b%*By" WO%ŠK +=+"߬uHOjŞR+$aHfVb񕓚T뗵Bʭ2kKc4ykucr~X+a, [ [ivh[@lb+fЕXk&Z98VB[IjR+k VL\*|Aj' KbԊ Vl B8 º@X.bQԊafIRMkZ+OZ⬵R'ʳnok%gWX+-Z3#VŨ=WNriBBZg =tR+XJw V8jtVVNg5X+ջVZq5TkesVhVk<Vj7`.ZlYl5_m4VjezxO?FkC\kR+jR+zHZ9<P+^,iUJ+rX!c!ʋ6AZYiE'"Њ> (y {N|Vi 10Z%HLiYr9< p X&rY(YΊ&{7묔BJ7w)B+~@+Y+ҡ~ ZTVlSee?^PV8ZqVVh%ڎҊVZ1k{b+bKlL;$OlŦ5/mhrV7b+WF pfx@[aJRo'̍oꭸƇʓH??*ފi[iٵފ(Jz+$Vc";&-z+z+o ފd#w[to"p yV|#܊=ʭ_n[/܊|r+Oqp+x˭0[yW­< AԖp+wyuJ­Vn8Po\_y+܁Vjڈ2V"9?í4RCnee' V|Nɭ,K Gm 3VkܳŦJ`+VFpߎrMx؊43o{‚ /og[v=0ފB?ފYV< 󝧷2mN+@\A\_y T6WFG$%W +c_ K+^+v\1[ +q!Ẁa7?⊡!Wsu+/peP\H X"J +u>"t q~W,p+<Ŏ \Vp+sW*\yyH+)Ŝ9+~xŕq!ݩWqyኸR 4+{+3cc+H8)F\yb+sC>ZWaFWn+ Xx+τc~JXp+{ʭ8[1Hne !Xo?Ӻ'b+'48p`+l%{VTYV+[iJlJ-8JJ+\bJ+K+VVF+>G?JE7mM! ?W}?v?_}hʹ .~+-=gV"-c7-ۇd}7w̸ˆ0q\ѲCWe7j4ZFehyc;@ZFd 2'0](-O  +LFia;5 (he^6302(#AZƏeAZ#3_AZF EqfJLC 2vi35 ҲCiH.QZF%Gc (-O/[242YW6Le(:Ps qZQ ze\'AY&2K,0-cGLXeô8Rq u&2>ô=2 㮇c Ӳ&L˶0-4˦aZ0-f2q42tZ:iɝN˶i{8->Qϋ(N˸g3Nx((qZ-i 4TѲͼI 2e#Z2We|μ)'@8ZF5 s?h 2;dZg-Fe"yMqBvh _GLY߲(-??JʖWX ecAZI 2 cy'u(QZF=NNe`Z<F>aZX4~F1j 2@se<ɴôvZƆ qZuZoeFVwQVei3|VXis|P-yy*2g{nT8R9.Za|P-㱚#ej-3C 㱺X-w"̘¶3Zjxe1U&z÷l9Z!|`-S>4 \NGxy Yee͎1qeJqx}1KZwD?&2Nky2Le ;sk@.< 7kyV63|Ww^wa!\xV'\n 2T'ZX-kKõnZ:kïZư2[:ly0Ⳝ8" ke-".#bo`I$CȖmǓ2.3P[` @&b;[X.JĖ1^WGB |-ctr%Sl;pV >pIĖ#bxE-|e-oM薬ndˤ[]| 21bLe< e9ny[23y-#f-Ow;g6#'F5yq e9[nydC<#lt˸s Xw2"+l-K-2[FnoD)o!2Q[-cX 2іJA[\m14&2'u-#=%/i+25[Al12&@@bKe\w-0hK-)hxiykdD[mi,I-l1Oe۹Ulъ( zl7;[,}c<9y;KD[2[F{[M:8L[^-s#X,ZƁȾ2Dx-pᵘ2.Le^}+!͗|l橀ªLebldKk2[dly`s7r^|fA^z-!]e;7؞x-X̤Ҡ:`K㓀-]+l3>W%[:DleCP]62nUpdYK-`Kۀ-.k<ҹs%2'\Z|ԝ`31̉b ^˘p-<5DM)ޒ`1XgceFWeT;mQ'VKX-&SjxX-e.ْªK2_ťn'$h4X-FLe5=;&I Zj-]EkyXRWEZ8P!e-㮙Nw3^Xo5+-]lieϙU;#{̖'r-\2nllռbNҥ̖gbIx7 +@^9iNv[^9/%S^YX"Sye ++l栝قrp)Gt+j*\(%+bmy iprbWj|A Rzer ^yK+Lz~8B^9 rWU^^ye'oyYy%+/ ZxCJ)+# Xn-J  WvPx[_hrx2+7~+e5W6+W^39KWV mW}e=Z0䬾BTP}e~+Z&+o}e_$Ukf`YbN]vhc+ĎbV):^1+`{ҤW_yE%b)&>".W/ RbGjdu4$X^ إWՐ^1i eI+cL5 X1镑6e+b<&ٓìʂTBLXrT"pg@_@hW|n +"+ï4_~eр_PKT~%+U>^@W_I[KWqWe2[H,^;6^a}"XwGܷ]b}E+<쿰+ 0]Y]qy&]MMvU+֬ɮl+,fXz3ٕ+0dW(T]iuea濗ӽVuE[MuE+!n-+7uEʌ]sÚ抉5W><4WEjd! !+ht\Yw,̕O5W4WhT\qbb̕qF0͕5E'm%/WFbPt+#*Eu [h|VQmeY^V>U[IYVFf|`+䋭[!؊c!:3`+ROaGVRX!{\as)+ʧ V Xyx^`E]`H&'bvBVSk `mVXX1-r PVdQөxUWW܎*cz5U[U+=_Ÿ«|5ҥ6y*uUl4W +OV_ M~>z#PBa(ŠyyV WFa*Uae@,` y+>c ,)V\PX1a幞VxUU.*TWWΫRd_EU_H*PૌU *_f*cK*"W?%*~F_eW!I_*zW*%}UDWWWpq,[{5nО Am{`ihlR_:Hlz*^[絴 EΥUHUpU_ʼ U]e5.3Z!cuUo%'J)l *_BCu*UJAL9hI'gThJXZ`S[EI[E^[ ɒJ &lA/9ThdFbřϋX8!BL\be"M#Vh-BE(;@^b%I5V?ZYceSͻƊ+:+-X^GbQ1V,XaYNcDC5VskH2V᮱BV\*"++z FEVAVڊ\_L SB>n2+Oa/}rrì =aV^" ʺ%Yfe^_f2+Dʬ-^f6N2+' ̊ :JH"BNj Lhh}ZVhh!}by VvZE#z#`NVZ񉠴OVMOheZЊB+[ XNJr+d )VV_JVZ2M_hIVheK5DB+,uVOY YA/ɼp8+(uVίKBY9w0 :+ipVXY1Uf1'=bʬX'rФ]fӸ̊VLo(*+SY1HYeeQVVw;ʊ7>78Yn/dEdJi +3DV6*EVzȃYvY9.|QJV"+mAVNڐd m Y0V& Y,+X$EcŸʋ8!=Ɗ dV})RIf%32+M;ì^&Rg8+EGqV^ G篳cYgu:+s=4YaRg /GZ/gV4V)VF\'JJ:s͙B3:+ XvUiEzeB+Z%ʳ87l?J EV$DVhPPd:Kz8(+UVw\RVxkUYѓSYYy8G;X ˬfe}M*mfeС쀿Re]I) hUV(bI^T*+V,0ZJ//T7ЊB+\Њ`4p-&RhDVZNh[c /KBB+JJ+ 1F*F!t2fҊJ+"bie翗@Eh&*B+B̡Z)"ѮRi,J+#hni]2P%ǡ+c9XV~Za*$b_hEM qVsgXh !J (WZүAcZұ(ˋ:*Z+2qZ+ Z+NAVB֊/ly؊mcBu5?J-4?p+p+{q+VX/B;gr+xij+ƌVVNSj+VVb ڊ&nEYn3sp+<ƚن܊܊!#뭸.sP\oz+>x+gimEMmE\mA[K[!lBsj+/% yC[g^#=h?Ole&:j@ZʙKs寂JUlŞ`+c?֊,֊hhњٓwXkeAVy k|VȠVZGi%sB+6ZɳQgRpD:+#/f2+ dV ТJs uVlp_gEgVLqDb[qDbIh`MlT!2] ,?RY/eŖ*+U`TVFPV}WY@ee} I}MwUXf *V,UQ,8EtgPeV`]O\Au!\eQEIP kUuNWŜ];eꪴW4}]N2*]uU*%HUsQʘI;B"<_!XZa+ڀU:RVKV^YɆ Uƌ(IhXCZer *_6VaT*l**{U(FVҨ8UV*^u*C DWW0tU]WHx\Ur|Ax+&UdU+UOyQ;ɫWӞJC*Otduu=K*/] AW9xgWI duUD] UQ]*'J]H]u'5UUUwWt'=7 )+нH\7*e*^Uf(|ΔQИ iU4UPd,r2sXXSX9[c6+O#f-/ ,DeeUe=Ȋ "+6J)r\AVLaY21+4ܩROEavsq +X?JA7 Ca* +4/b+FR%V<)ЦGat-Š#R*,Š ++$V\)˧XX{]b%]J8eXMMXC!V|kiiʇx3cw5VDe5V BX9ԥĊÁ+Hg+؇@aE kR=+ܟFV]dEV,B"+˖ Rd𕃬Υ +E2Lof06-b=̊dVF5D8 *+. eŚBY)re?eEzVeEh D J,mj,Li"+t*rHȞRj dE'dEնʊ>̊̊Ɯ̊s#$Bk2+EY1& bN'2+¬Qfŕ!'aVYw0+.Y9o><:K~@e(ʒ$B+@+,Y!∳e?gn8Ļ$)ݛ8+]rVΊL Y1(wwB[J+))LVZ9BVl$^2傘AZ1r~9ՑVv!ԊҲ'J|h+諭ځ`+Yp=V|d8[~*h֊ זB`VڵV%K;ٞ9X+V6j$Mj#FR+KZQRZ WiJ+fo)]tVAqZB!BLсVsX@+)bDÅV"#bs_h%JB+Z DZ1ShV|P+2*VSZEj4fxZMs#;*llNiŲ$KސVjLje/BtV'[j%K{M2#mQ t˄VkX+} ʖb+滊@`+-2sV,Q[؊M|VC[)P jj+,?3ԊR+{V^jNZyM;MvZiAMóeORZ+KlVrZ*Ij%V^TZy*B+N Z"B7QV]hB+,zZa! ꗴr+ X菴Z9h#@+Vg :Z4ZZ,1$V;VN_shQlhvB+šZ!!bOpR hEa hEAYhe\9VB+ Z)}dUx)-YGx:+cbbgŞ8+pZѫ y:+>YYcb)%(#gilbi rL3ʊ*+C+&gAY!EJS\X9XHb8\BK2K$V^[v]%bQ^;X'vIXSBB +~jlUWRB' L+޿XX!ݨƊj+Gf+u&EVd$ Mq%V/",z%+dUX! j{@X!fTaŖ +X+&XpnXX9Q" /a% +6ԖX(Tb<'+/b&J8*X-qq+dTXY(CX6+:+bf"hV (+D+#U"XBXPb%_X8CU'ߋW!ū|U[^W, '"rݩӑWO|mGWUcJBUSP om*#;3X["*6lCEV)Ё¢Mm ǥU*VYWV9$QR| Qfj*o_e@TYe-dU_+7U[W)*=[eMVq"#ꪶ8׼G*VVaބ])FBEFmkK!Jl*<^JNZq.7k @ 3a `P` p\ކ)\kU`uU,iU1Ṯ ٶuUhPWt]qUhYWJ1\X;H+qUUU]V}UhUQXEnYX6U#TV![Y%|RX*JdU,]U"*TUYUUT7UU|hh=VU!\RU5g*F`U{*P*P0 V6eU*.FVr(`U_W;g )8gDJWK`** ؂rm*itUX~e9eMd?+܄XU@\PDjTPY3lfkN*cSsU!zR_SEXS~њ*ęI*$'TN L*ITL#jjPQSG u5Un"n*ʌ*[*ʸT1.:jIjͭ2rS~ʗsSŵ M>M*6TT/B@M|)}TR:*4#TysLt /!NSR%%D*v-! ْTJTp[R"/B *X1UֹWS˶*v4U>0U }itRbZS`ն RM9|M*fjTq~b(jxi,jx- UUb UyoTJtIRRA#Gd*?ʳU K TS'˯'\Rf$ULwTn^2Isr*J-VReT+W/S*U)!BuY **PBU4fU;5UUY3PUyꪪbIh1%ʕ7bq4Jkj,*₪<,G=UGz|*ƔLTQN PUܣUY~2TϕV(T91*;zA{**~;ANoa " is*dK@Y'Ao@#PUtUpTBQ=tK9ZɩtPN O)\u݂݊[j .슻'\\8X9!QXHY며Sa='k* UHcjPŵmE@* - -0RT{TaZO =z*>T"J{ނ*BUF R-@#"Ѡʎ,*GTY.De~D+.rȹ ߊ C`I?/R]HUU2hQ%@D7FTaAHTq3AU*:. /ZPe bEJv@hWMV"VP[XeEhX`+U>tR* FXv]Uɪl|ڬ4%NͪʚUBVeBVG fͪ 3U~HWgef/Y eSYeϊeB6T*Εʹ#-UFkI*UV hMeAV9U0ʚUqeMUe[VYYUrU' ne*tVA}V*GU2iYݲUnXڒU(x*AzU.~"J߯*(,"r2|i[ŏWQ.WѴWDWq-U;*QU\\EQ\Zqq.q*֓«t풼ʞ=^ CrR}M*JW鍦Uv=*J*mU6X1a m`R!TVq-V>HX{)¶+D_ɑ3g1V^_Yc%+ XjUa titcd&Vn+֌CY/~+TImbd&VSsX#BXɯXXq2s~^ʊ(+TbF뉲„̊C(YlYJQo~謜ggPΊ8+:B;8+d_r/KX r}謰jΊ1ěY&Y]gtYIª Os{̓eE!=ʕ|Wf|MKuVFZƷB}$Ya/Y::+kRCYm\4V>VVv#=+G`%OۃBL*>Uj_eUػ}WylQ+QV> I&bů޸ jI5բbs{(JJoɀ2 /EV򬑕uAVwȊk&"+^RVI1+DPV(0~!+: +lE!$DEYQod_dÜd +4 (,dY6 +#jȊw#+4K+n2VW+kyN1V5V~"Zc>+Xui4\V_ͬQife?jXmzc#o}{]C?m?-G82Z{i9F=A%$ҙ# 򄂴| -@.(2 kPZmr\ #u2݄i9FʹM1-H +-ZJ˼,aOJic$m2?J1*?8JQoUe~ʹ#W;`,rWYMRP|㜝 7; #]?>II-(w{RR\TJj9FqC.gI-U?J-(]H#(eދqDJjs[jצ &"!RQ-Jo5]m7cyoM>Ev&?yFcT1r\U#2*wHQ-cm/I%a}K-c69#xK-(aʩ(ޘ*jKZ~?Jâco9'k*G f|a~cð̦_yUB2J*jܣٮ*6VIO5=T˼y-K5:P- RTXE̟ys70/xS-YRlU%T(P-HLQ-(wMFj? 'c}I-ǨH-5{5G( rB`.30_SH-(~-R1kçr=˞6}Kj:EZQ>6QRǔ2A,%0OI-ƌSR&b2+%1PEI-uW2̜)2cTUqA%6~ZZfk*evhJj9)e~VLZf/evJjg]ղoZ(BS"R1e2#bPxe@E:P-A[0,307"j#-GflavJQ)j9FhlZWlfZfgS %Qb kc ^W`-5 2C }$Z#/ؼc\Kŵ̶]\j!&Jl/,[^Oވ-'ՐM2Ukmy2sLZӊzGkyph-5خ[5KjےZc@,Z˫2o@!52OT]ay kZY z{y[-y(帖μ)72_~va-sԕ9١P)eEkk/\Zk ˍ2 /?Z'qpDky_^6MkcOkϨO5W̐ kÄ4Z)RMb[kJ wvq-qX9S^ė)M\BkZf/y,>̓~^cLZ:E;ωrb=TZ^!h-sDb ցFOZf)/,QV>զLj]e6SX-sy[-3Ti룋jyS-wP-9ȭPT 9rO2,Z~{42'Z!oR-5Tב#]JC/e6Z٩NV!V>iGZfT5lZJ -5Pno0;uW¡x(̲Wޯ b̓s寮j٧EY-z˓[ZmսedR-sQjyqB̏ /3c|ϜZ4:h-]kkܕk0kW编_bHSY5ӽ9zX˜TTa2(jHa-1Z|2lr}9R9sgO(X-3`P-sV}d3T˼RkfR 28;L+\PL˻R#,{5aZk>yZ,FҤ9LˌQ2[잿:*r"-y]HeޕU>U9Fբ'%'%i9F_[1H Dy(e>̯S8-2_SNˌp-[ TrZF4 4N]0=l]aZ核NTZ^0-sU'S]oܻQZ3_qiyAZf/d9DiykEiy-i R,ܚՉiX.{ g:-;irZfKQN˼t9TÞ©~!JK G]lFi&qEi=f$i3߯r*Xa <⸜0e61''w])-r%Ruu ++: -5M#VRTF1-l+ :e>IG2wR"J<)e: W$|e~VZ^(-G(r J|P})-`J|]flkyL˼_L+|ۉ2cGi&rex@-3PҢ`Y>wE."_Z^g1g{V ۀUr~Z^C@-'킍ej`NNI-Q]W 0TlMcjp.w,*eތpC?9=js}S-FT)eVE7WevKl )ֳV|lOeP+h@Q-uJʽxU k׹v4İB2pZ^QP-[j׆KB:P-W3'Ø-kve^;AtPTky'TkFeٴZ^P-\ﶜZ)P䖥p i e jO\%GA-\Y*uo>Mt1GA-š ?5WhfR@-sG=)^7R:78шme)gDԘ>Z V ;N˼+#1N1 Gi_zC-LjySNUzl9̌2Nm9Fe9lGbU{N]9-X7)Vc[rZףNi6NK/ϊ!r&\rZf˪B8-&Ri+{E|htNKrZfϘ_u%8'L1fPi7P`bZfLʥ"Xcy1-se;M1-=$i镬0-rVf 2'כiO1ŴVhpFK0Zz~2o7<&%c(gA-evH[: &F WtZp*V "C/'F˜EXz>,G6z9k^CAc-yWٲ hW. -Rui<>zaZf9*LKǗ`Z:~Rh*Or1-sΑbq$Tl;_ո]-3dc:- 2'yVm80LeU {{ i\BZf^AZj;Gj،JAZ3%MٺỶF0BZ*ߦ/i\WHLK !W֬XԚjy ŵ})e0W8g(uFt~i 8-8-_e@&0NK/fP*GL\DST~%Q 9T=^M)-g5(*(-5}+-eN@Yt'g^i9_RL˸?hOX%et2!F28yցbq{G242Z:1Zz ђ%(Jh1be^Z: 2PNZ*\QoS@ˌ~F)evih @kneqZ:hQ h9 _WeZ?t *@<5h 2J{jOhh9Bq6)>f|,GcYSR(WSYfqGlIN?>|>Oogցǃ(e;g.*Kt "%ҩ,~l9ʇ23xM)KY^,sVR6;2'Kv`Y3b[Y^,پG!Y: Ru hbY^#,e>Rre&T,eު52Ė#)bc, ,aWR=ur򚜄f$/OQ>Ff@ kFEfA`kHA2Kgf YmYIԇ,{#8ken8Y>r7hN)hyZ@*nʞ)+t)BX&eX-x=wXIZgeY2Yej8i,] 2(wyR=E8|$2G-O'S>ߣY:uFgt%:˼b2rYzI߅G,0Z?bIԞtXYD5G<"889pR%8ˌ1'm27jo}f}f,.h*Y4 _9f=ݯh9,g2#I0O=ȤYd dI,jh^fRhfyEfy=BԀ^S4zfyjfm;͙*YȕŏJ>YI͏R3Bj[Vj嶾ɢe>@i%̱Hܜ>jr$uTѝ: :BK t:B\HFnSG|B˘?bVųXbmmpdgA3\Kʨ$򡳼t::Kı萣t:AY8KWZb=CYa"YfftctB:6K-ՁTQm'T4˼H90er'ǟ .eU.*}*N9 }3TVɫr|*0[x:UDVyNRYS Vi4X%c lTXӰMKVX\XX9*MGVN*M0!h4"2:5U^J_h\ULVVN e9*d⵬xVY{evUdtd"~idJ-eUZV9} .]*6BּeV*DVOV&Z[2Jp N\UnrpG;*y* MUv #VF'KG nrVڔX.<+ H4“TauV|F+ &V( UXYbώb K X/?3ʰU.*׎4Bw*L/_%?_E~Wq$U`eR XYn'{V*kbe_;BWcQMtҷĊY+$VlĊ=6r@obED bae Kp"VΛ=1V2E + +k|bRldˆ, +M388&WAV>/dex!T ϖgd++AV%AVV*TVXkeQV' bRV>lέ%8Jq)+ ,4KA+@,-" +` X 2>iCC+D-Z9Q>ߩJ+㊃r%H+*Kn3U̖V(ZSZqLrө+Jc1H+%i%VVhE(Ί:+xn⬌*2+DdV|ʬȬx>QVȊJL+VV"ʽy$Š +-z*+Iha>x '9:-ĊM%6|VWm_ʘN`e7Sց~'+|U#Mv*̻y׀Wi^~^ʮ<^d$," |K^E0@_E\`gVf X!0TX9VVVXh`<6V b84lceNc2j@2nȴ-cCcy0V6\w+ 4ɞX"!bM +g 9tPV]琼MͫD b*7M^]*'&y*V}@@Wy *Z/*t%*E*fXX]ER]E M]B y<<WŀWi& >`U:#*XAXwĕ*>UUW}*>UWQkWYyJUxg*{g>}3VHl`ť-c M7ŸLJ_+5QxV 8VX鯇BfR +-P台@Ċa* XYn-++c8YQBodKX]Z<V*na1  !lUWB_倩W1_7YW1WC_e}2W!S}h\_eˬJ^eCۖW5ʫ8vW!y-M/^%>3!*TĴbQ**UxAh;MJ Jm[N* ȪdݳY$Y&}ͪlV SfU50 o**}aUlVŒHYKUi*b*'7>V8Ysê4@[VE V䂣q@Te{`DUEU;*;&k(i#*'Bn*AUTA@UHUPT;HTE TH7M1MMJDUƣTyp*D_TٹyUi%ʝlFUh@UskTELT U*L<7 A*8zU9?pU ]h @!88UAnWQV~*:*UxiK [V~>*s*hhX&* VbZi[VYeDӲUؓNYent _ZEmCťl*UDմUt=Wa/]% /]eE@WJ*gj\ER\J6l[]UVsV'$dDI)iuUAi HN[ Y./[mćVSv\[)mlV>/ZEZ!@txMZEIZIZe*HSZE[ZŖU*vU*J*'΢ íqU,W7WW\%e/m$gisB Vѩ(Ze;U6@jiUJ4m(*UqhZEi_F=UV̥U֍[E[يJK:**ĩ2\5UNjWDi[eAFVm[QwʹdP"RB[bNi߶q4mcV!׾mvh[EK[e5杻r\q=k *f*#PGp*FU5U , tBWyMY-doU\oBX rtۮNGmUV0Fm* mV12 y*UUԵUH'n[M[ES[O[UmUVkj[*G_? b%bpRZ(}'H`RIJ*bTZiL*UQV!UXvUVq۴J6soZtU"Uؿq1US8= <{N FkB1h*זHJOU(WŔWq6%+&O}*=gX!Wl`\`VVI+z eTp3 XaB7%:}*_ۆ;{̖7Ĭ7>+ #rɃXaN`j`4!zV;Xb6BE*H=C$Hse'V``oVI`e?Vx~r N +)ū4;b%*~6i_唫WuQU܌}W\7*V⫄k^tg;0PWb0uwSW1N]KWA]u*᪫Wv+6%JBlr^!BJ"C 6߰ *ڶAсN:KVq?mmK탶*;e۶}b ;6J*}bDr)`T0% J.?) *NJ*ӣ@D\Q|UtT6 UHפS4UXjS4M4ULTqOWMf*T+IiCTq;4Eq,bEWEw[WTT)lQNQUJ̔TTaI{ 2W2t? UTMAlSqk0UdA U,T Jђ*VQhŻAhe< rZY/]hZd+O^gCZidis)-D*,;iYȌVHjhhE@hE_he|ZY ZV|2 l'F -B+( V o#kM$2@I+EAZyQuX / 0*VsJ Ҋ*"䨴Б*eWZPJie B* 2#uhZQlhQʨh共NhEXhmZ! blGh%Z>p(@+lULtVꬰYfe_h2+,ʬAZoK+dJ+n !rᬜ0 8+>B+ hewșP+ojP+΀^ ⦳X+ZYlUB獭䂷hX+Z9Zq#VR?UVMjV-FZjiPpK+#o~!J V&ʟ8*V35%Z+DZ+7bԊR+R+c:R{YEgE CgܸvVZ?):+,`tXΊ8+Ya_vVjUV^JmIkC+OgegM!YGkgs/gr+-0kgIfnYCgz I0y9+ @+m (H)H++*yvlifFZZ9USVnH+DVišoK+q@h VFcȔVܙ+ZT\p@+,_ 8.b1Њ)B+  VZ1ZhV\h峤lh,&%-DāYYSY Y1felΐffV bA ͬtCYyNQ0+ì(JE8+ΊY\fͻY"9~3+9fLב8+mH,'Sn#LVXmi%2 \V65&Mr@P BZGK+RgH+eJ+ I XB9!S0%P+JP%E$$2O5P+7+(VZ!ˣ* , oK+-8dhiEiłWZZa稖VvU>9H+b MBJR,6$>)Z!֊cZ+ OVNVEkJXҘΊ-@J9+>tVB1+*FF̊F-Ί3\{ tV0Z"jEsBj>Hks0jlsc+LqVѺu垎2ZE> melI% )iBAc+2~`+V(hlŢf"[!ٿxa++wʅBr [aVȭi{+@p+x {4lxRhSoeTUox+c/3Z୴Poj^ފfފVX8no@^ୌ& ɩ 4<bV^K)򰇤 ʨS9WmZ+޾P [Y~؊I%P6`+D^JƭZ+VfaSIX+im-.ZnZZjԊR+Lxj\qTROkh}ky(VZ}@[YMp+\wyi+O3Ԋ0`+js ʨJӚV';bOH+*"'޾B+*>B+ȁV_^h>9B+P5Jh|wSgJfvpVXamg@W;+΁Ң pyC+@ 8+RVɏ124Jb謨Vf"7^B+2Њ;# Њ!V&r)$P_plYH%lk\M[Cl>`Vn*#V3FPlp kř֊V֊OhlET׉\VB: rV[Fc+l}r 0[Zaa' 8 29P+ y*M/}Q+ld V0VS~rV_VX[iBlbVN- *eE85VV_lMb+SV$VX7[RR֍V\=ileV|[qumb==vkmŮp?FzVB7"=%BNLS+K /VAZpUZRZu ;5\Kh JV[Z> 2AaPVlʊMDeʮB0^PY1>2H,QV(SYFY!Pd>IdeO@VdEdELTd%K+-Vj i{m(j炱&Vbey72XUceCXY(XXa_6V(&Xa- aCHa _ tV5CHt+J + Lu+ `eMG_elWiX_-'W9rV5X9I+l vb=,}+ Bb+l*(U\+Bl*g<[W@\^dMYUNegec**C*.(PU,Ul}*kUf]qbUpQ*r(Z5UfU "tU\.mW%WܩvUtUޮ J\,r&IYGY%:aUT`UȪnV߈Bz*O?*?U@SbXZEh NZEIZDQUU'O|m7`W^\-ttUb\,;FvX<&*zr@mU"5ªv*Jɫ}&*JU5*ȫXѼʮ^m=W9|^u~WxOUN^B *䎴B)J/*׍j"B=M*7*nrx"TDG`eL,c\y `J+B嫐b ;bWWqx|u}}|}V2oaOBXv6 "`{n &J_MV(Xq\ 8_jt}rSr\9|wWk_fWۂ8WY |BǾp΋Ff*?qg**n,b7+bٗuո * R`<7}>J+*,*bJ4*m`Ѹ m6f**ZC*آJ*Yo]NVx*ϕY]łluUjUXn\e=S\+r(a(.g\[ŧ*Vk -[9Vabmd:hzU4dUiYݍU6v>oYe}UِU@dq]g<ઌ:K2᪰7K*VV袬Ⱥ󂪢5%$-J{*l|ҬVeF]П˪Hl* UUԩժJ3*mU]A[U9֔U7TUE%TUpv*ǁ?Ue 8+QU,VUaWVU.Ԫ /Ueʢג0AN ( U!QvkTIS6U44Un0UT5UDaTYuWR9e4V 6J7° VMdUayZVŵfUdLV.ŪX*07BU"sUeUvAoUecfYJO*EUU2_ʊbXHUhg\4#TƣAU60**NU䏕U؈eQ]R9w"#׮_XŲ~eQejURheXB0DV9 _UU`([jY ʒ쭖Uʶey0eRsZV9MUiYFQVQjSVI$J+*d6",N ߲  -ܟTUJy*-V ܈*-DVpd̅@VZYZV!e5U|(*h8cRY=ZVy.@d75@Vh%r&+KV!*^ndϒ ޶]XEVq@}w(V骰H*nlb W|vU\U1߰  e] \kha Ǿa|Wa5ac*栲/Y%J*gVٶh*qUi-M]lUN**U ]ŝ+U>;XIxVGU}C*#~͑*NUv4BN*#׾J,*NU J W9;+z +!XU>wzy6{*H*ۚ^e,9«Q_k[S~|e<[:JHk_~۫h16X/eKC 2-* eHfYL޴YF,Q,cӠYO E3&2* e2֗B0f18YF(t R@"4˺zeXDe,cu&tB`G岌 岌 RqMeaZe` RzGXr !.X{Ɋ˲TjO,cIk f_g2~BbYƺUՏJe?!F,|~,G&,8{a'"XYbhNo@W?eId=enYYȵ2V72G}R@ع~,2w$,/Y2#dֱVN[tx,{ UeXMXF/},㣢LeJ!dԥDq9u׈"Y,r@e-2.Dz\@ˑ'c=T1u +]'r/p,7ͱX"XFt"K8qcX8q$'3h$Xv±~&(XƮ 8(~cX|@]zD(yP,Tih/YJb=t`H, Q X_e9<+`Xƨ$>N$]hHٍ2#XFKTH,B!v$ %jXr$\X'*0Lw29Y/o22\Jfs(Yf,YFR:HY}':)MDgKgM. :\^_:xtCGtq2Y 2`'Yog+)eL>!\f³^ >ˈ[og|Rxd\Jg!Gt/2'J,{x1X<8qj³X&xj<ZFO,7,=gegg0?|u0=ⳌmN`1fS@ˬo24h#.iZIs"̱X>1?ܿ -~ZFz~fh"Z_h=C̦"Z:2n-d_DQi!Z:SeN-#eCEв'M -)=Wqy"C- W!ZFgr~)-|BҲ~p@aZP*`ZFqVRZ:2|T)-#\[)eh AZi̼M!- >H˺Bv{*fL]!Y iwY=d@ZF7#Hh'2Z2 Wq$"HhH 2zX)QZՌe-Oao쥔YToe<@XJiO@-_[FPKGZF?UⴌB8N˜ׁ-NG1+e>ybZF7e4iW@-nbozilEj6RhQZzbe @-K2>le9Zor%Qe֤EeiF9{Z: ԲWPˈaY j~e|#JI-FZF[P˸5j PKyZfm`}@- j}` ˲fPxԜeRy6StE qZzTeP,+CHYV"-cfKe9mq2"h2Fϗ21X2zE^RJ2-d면v299QZƴsRZƢ_.\qJsiU Ӳ 5E=K-+;/a?Mh@ WȖE ZF:3B-#:)D-ȵ b\SŅsV*-svVUJoeo68WiY# 2~h -@Z8%Hk"- iY)LY2-=i1 EetDWxbZ\1ibZ* "nL0-ˑHLXIgg_´ɥeiil0- 2z/(-EbRe\(-dZL}iYAYMc2-|5w N(*N˜i1!2i7i+LdZi%N2d?ʹB2EuZƕYIe.*uiZf*sŕLZ\jܴP-=jT*2~y}TiZ^-0Vղ<6X-VhBV6k3Ji-[Cb9bNZc-Dk1ZeX ZQkәLe·W ZXk1(.2n Z_"XX@˯ 2:ֲ.Wr` RXX-[h~%TwVCZ#[&V`-YI-ժ`-#Le2C2O*M'ZxֆbDew=.Zg\=%!+kj*kvZk9ҶAZ#,NWZ µv)ڡ^<u`OA{ZioZFƁI@ eϓ#ŵp-#[-SՁ#Medqy㪩ZᵌyC%EU{PpᵌPãepd[ާ-Kztѿ2*bGeY lqЉb^e 4lx-} k[; 90̑UnaZfjؗYR#ZdnQU*,;EKZfYX21^kC-8Kq-cLR\EԄe -ⵌ1嵘b61^ Yjr-˵hdp-õpd5Qn$\KGZ5pr-3 1GRWk1 ҃M_'^˜aHbZt eL{s'TIeIbõΦ)C!2ZZ5UY5h-6Z EcY 5G%~(7Z蟂Dk7j>μki-=CkYe:"b\xlE2,Bq%^z-c{UIu+=Z3D+O=WkY ^K!p-F2/KH}Rh-t\#5RGkq8ZkCe\k*H*FZC>Dh2W@UW>4?*Xˈ2>ZzWSZLk8LGbZP9?k4BzZ)kpW^:p˿Lkv X졺b- + X4"BK+Z^9@^釔 9+ySu% y+<_! bILѬ銼U+/z +sM\Z@+/(Cwn+F^IWxemìTT`v7u A% ܕpW ~p㮌,5< 2N4"ϭ+{v승ͮ(T;ٕK{'J?eWOn+3}5J+sZ ɔ+:+ +זˀri`D]YݳXv,ٕ7&^܋]m`,uuPW,G]*QWn+-"5|Fu~A]Vʑ x++'[ +%1W=+뇀 tɕ]B$z+v+c:}Ä́\b]ʶ @H\12é|rŕ\10rW,;\}{+V[qVEX4+P. 2V-[Ѽ 2h+KS[Y;1"Q'H+̷ 2FzAVVLnnA;J蟗砶rDʽLmYV}Z[!2u Xw "Z+RZ+z#x]Q\bsb`tI,ՁP+P+?{VZTqV[i[lι/7b+Ǔu[!ojtOk^k#$V*yY+ʹFAZZr8+IYTPYqYga2G5,BQ~O@gyS⬌1Xvq&&ʽA@0 ( S52+aVF#iD2+ɃYw'ʒ(YydpVaV}GfIY13{f`Ffb{3+!@`VnkfEg/Ί+1C?_lC+V%VZ "VV9o [hEVhEGiEVZZy V(WjiG+4J+N[ZCZ+@Z(4H+H+*Ԉ#[ZYЀV++ie=xZIԊPcLa$N J%ǢB@k j&x Q<^V,}Z?l êԊaAHjg MojETjEUjx5,jE?jL?2zsVAhkpLkŰIFk`ke>Z19E`l.N7Z+Z[bQlpVZ[Z[Y6ine[ (܊zV#?op+ :rMVfq@[1­Ҙފ'x+z+B8Yq2YK*[!YoF\oŢ_.Qmo%V ineT5Ij+ȭh"ȭ̱Ok+]D[kѕ#@[f*VEm;懣< 5rP<)bֱ YWK 1,+V`D\y + +c9Wq \y@BtJ +n QZ\yH}D\WHE\QWv ⊉ˈ+Z-P%b\W:TpA\p張SX 2:W^ * [vĕ)JW*4[ nEZnebu"Vvp+k-42Y ( 4܊ep+V<;GGڠ˃2Vxooev2߃bF[+*\kpʺ!X)+.6J:]1W\z[̕Yc]gse\!.ɚ+At\i8As6t ҤDWryt+ǚlؕ]}Έ0j+cB7Hn*[;l+-T7bO%BxCze3dJ^^y )+ W^P^ 蕙2R?v/9ȵgg~ B :B@olk1 FUi""#thz%z+pW^a#+)Lٵb,򊫑WsS ⁆#\FT$:MLBPۉ 3+F^7]+/CNf,W. vEOWGKX~ғIP j%=1*ۅND􏲴hi% JI+cۣ}`VhK+XZP+\RTe$3$gVl1Zqc,jőP+L`B0O4F;V4 Vj%sS+w/X+4 x[[C+^:@+V<Z5BZuggeul?geiT8+xvVxYrOcY^60+aޔ̊jX9u?X9X!W a1VpT+ŗƊ6| B("V\b:b1M%+L,P+&xMx%+6r>"V(b.X@Ċ/5Ozº|v2.8Uddad4:AV(s `Y{!d xFV4BXq"oj #HXI ea1 +wBSXX7jkd;gTJEKĥlފcՑ6V뷱bc+6VUrmT"A+ `/YqPdYȊ0+aVؐJD(6t?¬0P5b f%d3+^YqƟXi0+@Yz*ͬd fF&aVQAkiѭ%f%B xMN)+XYeEO+Fb;.@ʈ!2zW| Ċ +[V|&b=C8+3!g%;-HkC+̗-+EҊ…VniVRchiJ6ւZIzױ@y b_GJ2V&.FJ }`@9@c/M1 2֕ Z 8 X䶴21VYʾ@Z  22tVHP+rU3H1`rVjx( 9DvV,gg%g&]ge!}ghBT7wvVR`g䬐M dPC+ he=ZB,V\jh: t?`4 pKVAQ,VF8m 4e28ЊH+MI+V֝V*ABKZ)Ur3(JVjH`+.|a+^*3bV\[el_3؊w^bcDJ6 B8؊3j𰵶nVl$ T/[q:JWQ$tCୄio'S6ئHWx \Y..gkr+x V&J;VRdnCp+TG Nٜ[\JV*A"V\ZRI"-[@[q፵Dxck+,E[1S(nkV\ n%J7lne;(m?V|nEVHȠ86gm; Lone<Y㷷V[+X+flBaA{Z["k‘}@ ;V.ݠVLZy7LoLj~Ԋ+aV8jeYBLxSOS+VEZqVFT/6J?p?VƃV le[i`+;cUc+8H`+ 2-}@rJHVز3~p V"@}YZK=5J V GD3lVI=7Md i%K+Ҋ"$0VZ0i,VLZ99x`i>K+/!iAH+"hԵLJq+V0ʋ'Rյ}$Tb`r ~ÛZIᘩoڌ`FJP+IZC܇Q+7 嗋Z:cj%cimDoC[I`#܊Mq+㶗r>oE od[1X$oś[vdX''ioeY*zGVRjoťx+5J\; A+\r+KnH Xm"o5s+^V\ onŕuV&sp+!/܊i+m%5k+462*|#n V对,VN5V#Vu[)TG?[aSx+Zr+fV'a x+k䭌ܖW|pJ{:"pE{N[Idoq"qϐ+)r+91W + t%| ]U8]1^`teÜ]E+0(4Bѕ1FW tl̕Tb. ;cT +;C/rŬoȕ& +@XR; =ɕ+M2.4+!# ŽW(_5B'+k L4ĕ*\O&WmM@\H%r+ꃯH^q W<28J*捯^1j{ qi!\TcuA?WbU۠2GJS⨌綵y E3AW2oiCw4։@`"X62>rE`qejOD*s|]~u"(,`^W~EiWF'ʪ_}!|eɀn7 W,$Zi|e^WV~%s|e[uW D^yVpC06x)zpW.++Bnq r쵾@_a 5 ;Z_L@KW#=X" 4Wµ^[|n+ŮGvEQWԕJTmmeVk#AWJ1'BW*Q0W.Е *bO AWl]]yyd]9@jx)9ʋ $+1Pht%\beѕ`F+6W՞J66~$詘+kW~ftѝѕk2+ w;ܕtvWL]fvW]*AEU]!NvPW]TavŹɓB_byŁ+O$RC7v%+l46+aW*Z)dtJee+ʶH]3aFIG]"Օ[JQW_IA]C ue}N+x]kt.{;o2iseI5ϯOa?]Hn+g]5]q`tpt%; ]'hNG̕1Δ2, 46WE1W [\!wJmu+1J*x+VW\Ez+6RW<{ʸ5PWMI+,[]t +{Y]t] !tŻ\jftZ +/++I\q^Pt\[M1W\I=J!\5⍆&r&ȕqtV~dRx7 +5&W@HM\IR'J"s.oM\ +k'®xXv%ؕ]YpnĮd{s+ގ 1ؕb ߊ8i@SaiteĢE42š>tC+Ά]!s-!bxK5̕ \v1WCqe\: JF <W%J)s%O Z!W"@ß+!We\(reOȕR CZ'"W\q;J6N7]4"hpE_+cV(3v[z[ xps+%sn9'`[Aw7ͭxVs+uV۟]'oKeSbKeKVW${+~V|LCCPkmr!j(ir% Wr% ȕBkqЇŕJ$W\/ u,W6X2ĕ_<8+z qZ\q.WB"xJq=-̺u SWmr6Fڥ<+%B-ĕW|!W 2bBM W+Khî4uue[4S@WI]^VWtD+f!WrDwɕwvfY W ؘ\1BD\01"WHL1M&rj+*!WRb{2W\Ec5 $_ʮnrx&W\&s%+++1BR芷!2+1N]fW [vJ]a3+xb8I"ؕLŮb)wJvW\jw+D]q?fw%|e_`.ݕ*$e3'OI+vWR+,ye<4ʾsWƠஜYThoL+%IbmW ]weͷ11b)x0bx|H+プ/wۛ]a#+]a}/<+_P˶®8,'fWO6̮ٕBg} ,+c:`t]ATrl4}Wє>v*te6 t]&t%xЕ;f’HЕq<0Ҡ+,\qk̕BJn3+O\+W($H>]q\aWٕH@fW c]]U=]!S5ʍhwFJaW,\~W\tJVl⭸ +VBNr]V[o%V<Xo%qy+ާ8YW+7##V%[Y5{fV"Y[=r3\bF ;|q+QVp[kjp+ė8&2r6TonEVi?Q+YVFR~~V^@ ԊCDMxLbj 9V|BmLjŅV Z[N8+eg1qV'GX5@+Tv7i}#rVYaNC+cb}he uVX6%C+!Z7šZ9/.1WZVXq[qSSB2Mli+û8n۠D̓V&mŃk+렭8) VڵDVbMY[im`{ h+*Njm`+Vj7F؊ȍNck#zRZvķgVEG/V?Z֊#VnV>mGuO>MmlBSc+#E ;[[Ih fc+BCl[J}V K[9[!C؊ˆ<(&[>^ :V"SIc+^ ble}-hO:VFCo#kevDV¦O¸#֊3r|S+&jV*sUV1P+&[Jt v22Edjez;R Je0눜egB;+UwYq ;ZˁVL YنV^ZY_V۟:ǂ -Zq:tpV9Z KaX+Za1JzX+NB≭_m`nkwVFa#/AdK7k+;8h+=Ǎxlc+ZMmkœX+##^m1CJf2ZY+a‰Y+[?l?V}&k墘֊mx#l[+ZJRJ.֊Bm+y,e jke `Y+(Z_Pk\mVŶDX+.S?JnhY+Z(겵fkE_T[ V?XZqM[K+£%Ҋ+-lyV~"WEBZS H+l>R_ ̖V̊Z!/ZhЊiTZy^Њ@+/VZ)-QYyؘge\:=A+thV%HZȌ.ie_FҊ#P%ie+V\202V&RCN--Aڠ/$5 䒥[Z232IVGZ:J'H+{ C8ljVF!&eUCV-Ԋ5JI+}=9VmjiP+^q r?-vFZڣ$V*ZA7^r'hJЊ㝆V"(Ί=܇U9+Ll)rVƻaY䬸X roolheciQA $p#fT?VhF8+!g qVY1+ۥfe0e5ͬU'LY YA0Sh@+VF^9GZ b6C+qZ7Z1R{g83 k#V-S*-88XZBZ퀴U+V촴1lZ؁V6A+#h%=VxZ@+m ZIBCA+M qC+œR) <ҊOF,x2fi0!"BSHZ H+H+liP BZq昡TZ1bЊg_@+r"mrb!xc#K+$DZ91V[?SJPFT㬤Ί+Rg%Ԅ[hg8+ 8E^NfVr֛Y  BL:̬x 2"bV11+cpV<2Bl}rq]U-8Y u ATYY 0+apVlYዳ.9+ Qͬy̙Ya0+?1+ `VV\4 8bfVWzbYa)fbVvfVnaV$s3+#fe 0IXeIo@Yq# e%* ʊcofe%h.ʊQV([@Y1vqv#O<% wh~;+Yq!h%ǙxZid<ʊVV7"FRVV✠̳1 "e 6(+iYYǯܨEYjH3+DY!YX1+ޫ(ʥ-'g HY qV0K[ hıiiewVj- &^)EZqnR6n bj%2Wa1 @+7mlgI;+ YdgňFK+hH@+. bjRs-lLodh.8+~Dxfd5̊Ⱅ(0+Ӊ#ځZN~G*h Y V[+nV5m%#(qe6FUr^VQ n%1Xo5q+Zm2Fmi>8V[oԐNob k p+Ž9)[WV3!ջZ:u[+vmxk|E֊0} ZagBbje @ xg'g"3+l`V(< b+̊۴Gtͬ<7엮AKge>58+>Ί qV؆ʳZDʰJ*P+~ԊvBLb϶V<)ҦVBԊ)P+yZ9YD nZ;TMeNFJQ0+.ňb% goh)ʶЊ׿_[Փ煮^~/?~Wylk'娪{RZ f)-()-u'vjva*NdӑRZfv)lRZaHKAkrحmc? BKu̖$U*Pc_1H8m}hΝZ\gn:Zqmz2Tol:CdK-;ŵc h,V*S7U(- T,c=ZJc)RSgGN,u.-tW}HZ+öyHB}?xAQXy3Zg9F Yj>Y>W7 @81*v)$>F>,ǨL,՝Ct^Zg|JR:K][5YLMLuz"k6όE}~MY:K}a8msTy`N,<?Yꡠ{v7B,§qjRgG Y*:8K=.u4RoY^)5TzI?qUn#6־+7zYw=Z]:K[nimR&}JQMR8KbRi j++ב:K]kYu:7\gzMUK=ni,lgggͪwUXgY'}0gIbSgVYjKϮ:Qz76gAlY3>14sYs>K}0?L8L,tjܞg,ϒ@[KK/W_z={~,[h_HVYY&eW% Q< -̯?Z0vEód%e&TKZg_ Y|TG"tl81faRCkPq7NP,X nfؔk,>Yk9l h7RKMT QQ,խ=RL,]YɁh:4K C)泺1fMY9ķR"q?'Y4 iz(ΟȍYaq"_nԢY9 Nh⽷^u_}hjI}vDTc9jd",5|}7E,ne$Kf>z(rUb9!Nת#o큿iROXDT'&iVLa2mԃw#6jdLY) kYYx0-H&elb냟X,'mi,:t}YzjsbV gϩԩ g9} qĈU)ft38KW:puX61~ʥ}>2ŒdLl Rv,nu5R?YpۑXy-Cg0t݋H,x4rK:K}?5(1Riq 8ie>]Lx,$dkJf%we 4K췙fǢY",L]d4^XiiqK4CfQRǢY؆hw6͒I,-RJE[QY*sKY4ui,F}[f'"X;;,t fn,uoHhڴvYj&ԣ]):69MLs;,,H4K=%fa1-UX_+5T8Kuc"]gX]f7YtIpWN;o%,u-;*:ER}1!ͳLD,ճ4RQ'yiz*V/RFK,uFROkYQR:KI@ʭH~q]JgY tc jևK!Kc,5oߏu^fi :rTþvZgBYKie C2Yj ՠuK+'ӻΎ2Bt Ih]vD-CUnכa&ZjmRD ˞-Te,w7vӾ hg!~eF[v-DP4R9 4of\nsELSV)-5:Et0+A+-RSHJK䢔2uRZ,PJKE?]RmՑmw>6Nr3-ǘ^3ӒКddRo8MJK} J)4 "+3i9~ѧR=9#-#T\a;$e ?HhNR-}]*7ZE -X,O-[w B6jUOe"QJ_鋝|Yha6 -fjď"Yp-uCfj:isIM-c8Qi43nԖ6:}-~˞ՍBK=e]4R7t?/DL}-sIc^WGZ5/ -GZhL-j^ޫHl9Qn(,u"z!eL YG|hYj)%}Cs;~D#|i!Dj%h3-5f`'OY|^5I,e ggY*Kl6iutc[g3!Xu_2Ig%y݈g/,by~ׁY:kez Jg 9U/>#;,{(e 鼥:KOĴҷFY2oRq8gj$G62RnYzO癧|)+j]YztY=fZի#kYw7iGų$Ҁm5YjnwyMgN>K54űX3g;Y+KhN@K%lf-3|g[Wgq5,u1u}LY:v(ޗ^ruR&t8>p<8gKqlrxդ3楳"u4ϒ2wf)U2K(Y^t>m5Β98g,I&f6K}6,uϭR{mpW:5Y;^1l4hVnG^rwv RxͳTVeUxyhWZheBų}ի;DgtYͳwi~8Y2R9sʉxNUknEYg饻>ͳTVe}*ϒ .W-rRLj\m wef$RhL3YI[i 4͒Y/6K̅T#eN-mfjݜmP1RY3Β|It[:jEpi.qΒp)<jM:K=W6o%tz ˥HR5Yǽt::K&, TT۷t釶3VMt<ghFi0gƬLWA:KZgQ0L,Տ7Rm_&N,ųTBh,Y" <dQijTLMLH,%$( R_XG,YWgW?y[_K% HYi_ߦy[IUԲtYEy9i MgY@k Hi:kYb/nY:'rYx{ Tw^,YJgɢ>tU˥N֍rei+e$ed~A,zj,Yfɢ6Kזxq)ed$BM8XntB92K=QtaZfo, G2K}蝖YIdwYR6 $ΰ YIh$fI"&4KE/U}h,B:9,]X꩎zfyM,44\R>-mezHfrE v ̦YjXY(`:KfI2KΑY70K$,2KMD7̒`*?Y*ػ0KBI`DKfߓBOeF$Y$RSŲ&K-1Yɒ(:&K}&m&KM.r&K}ie 0d Ptr> K:pPGţ]KX@kv}NQk B}|U K2^̖ 5^FYDBL*,SD&Kjȭu@\2YR(RžBY+8ZefYiXò#CUVE,}4˒d"\~QbR,ʨnhrzfQe(w]:99cNY`}𫅓խL>,N*K .ЪɁ<eI (KPTU3z(Ky:NQ: ,]#J Ie=RYL,5,Չ2 Z!%%. eeۭ*wbX;mY(pT"LX2=IJLAK]oEeT$s`{yUEt\ݭ{:8-g>\{EA= /,:sS;6Ŵ$Lh,+cQ},S-NcKR'Kf$t#9V$LqN"pH:4KꂡYBTctDT<5I I"2Kr(C,SNr "h:,9hCtN^U/yU_<^ c%U1V^*2V d!&@l$AcŁ+wXI Vʋ2Ӊ++ζXX!a1+(oh}gU432ʃ4yQ*)2RX +jaeQg"8$J.All2@b/ {Xcb%"+7'c%9Y +ɽYq'J mc3+YdTa4Xq1x^+zx6RrSȊΔ1_07YXq-[XaaE+9wVX =^E+⫘mi^euU6n |qE$UW7Wq)̫bZīti'ʨ üJfͫ,&«U4 ^%*/ ʍݺ!!*.BWI9JHWqy8*-y! Wo^f^|<*#^4+* ^\(, *$"W^%*!0"UXE32.׈Wqy$*WI=9?WyU W)[P\`s#^%*cRC-ɾ *;U* _*㫐l_yB*XJͫ3*U<܈BfLW35r<>*.Bx)2M*.rIW983*?yksxgͫ/ yUjZWYcM``yX:udu{ *Db]/xkr)fe^ЌxĊ^e1{!^e;vWqBe.8^ߘXX7MZXL1Vu6V&< @VCBV0Ycde&:BVUr˄YaRlf;3+Y̊fV̅YcYqIo&2RVdgeEC+cl\6V\ ndt++ EYY&$`eS)+aDY`eQ=++eGY 3+d&febV2vY(lFO)bVEYj{3+OAS8+-EEJǯVm8b`&hE-h%ʫgC+VZ9NnN2<&i~@Ҋ+ЍZSJLihe2VпqV2|bV!g%;+DdΊ@+ŪZ']%b@ʸPL#@V&d_ J#+ ,`BV@AV4Y!+^42YaQ*+alL6V =a\DT`)6V2XI_&+eXzJVXW m@*Wv r܈WI7k^4*WYm,^kUͫWNz*v'̫2>Ex,FXY"l`$+Q X- DXq`e zHi`^*#m[H |̫ū5\xϦ̫lcU̦YW^RG UNWT W9_zApU2tWq&ip!2Jp{DW*cFoⵛ*LU\o^e'(e^ŋU6V|(;Jd|D[UҸ8g `㫌W_5WY 'JZx=İU2OW!0 YUm[/[ElֱUc8s4ʊk]%UN(ݘUDW8ں-tYryMQWj^JWae^pU)*޾UW$]e̬9"]śvW@ ]%*.qt7rXm\C#pTg**AeD3rn@.ҏ0*/2Oi]eߑ $ ]śXWIt*O{*.EWf^V>%Nx.x_ *UR]R:x*ͫX華T{| ]VRn`=  +&`E+t89hq/X٩X5zja%+IXIr6Vb Yq^FVLxY|#d $a%+DXYo^'J4kUCxT2n^%9*㾔^ͫ\J-쫬6*~UH1\*vU^ڌx%WLd^v~xZ<-FWBWquc*t ]D[@/r4VtҬJͪxhVE3*JxTq&(#RUpD6CUT>.ݨo)Pу **R8с*,Tq@R*PeF$x8*ϑMKT`*)*SUF"PeoPR@ >U5&"OJ=&,ߖ=va gbU[1Ċ6ܿAŠ"haeR1VVX +lVbŘtMH "TXXAUDXIWmašiWcaej+TXXфNV\Vaہ;Š,xFX^< LpUtdc%6+++i_+炇ccvŚ7N YaWW#+6XYdŻvYL3(+x iAeTݐ)+@YĬ{7rzW `pVr>¬<`-c2(+\CЕBսJC*B‡җ3 +M"x#0-3;_BXqCi`%+W!l+J"V[X(rbHXg-KXOVƋ2s=VaV~fT?Xѳ{Vbw>muSXa_EEYU*_x|WTjdV&A]+r2 IVa"zi^ūXȆW1Sd^ĶW&WWq*Ux+WŜ42-eHXq,Dzӭ{Xq!Ċs!VI䎈Z]â+1V&iDG!Xqj2V0VIaLWXƊl8cV鸀 lXXg:+"?DQ2BocOv2V:Ɗ -0V)Jjo1VJDzQVJ#eŽq(+GYnI郬x/j++$ԪO xbe%'YKzQV#!bbV:wf% TĬ-L3+s) HfVJ VdeԜ1+Y2ĬPyhfe]9_bVc0J++AYqMT3+"f~|+@fVBYA3ꛕodef}ІuH@VWڃd3m1V,u#+αY!de2t@VXC$`FXXX$ma%Š3M;MĊ/ Ċ6DXϟ~ZXّVRۀ(V\#a%VX4&Vkb%3+-ebFM+Xqh- &VRebIDbk;+ĊL+LB;F1"beUX? ;M %6VU`x`d| $/FV xJv1&4VVFcFa/߰I@Yy-wHY nL^~@8raeэ(+7ʊ„QVTPVY u<6r?~dYA2|mdEe[Xry *^5=UIAdPWAWݼ sU(WI q*:*2*??EXt;UU*%VaWU*b*"|&[*}\ڶJc*[eWN Wqpt,*v2~)eu('*@x"-ZePblcMUP:*L\'p\zp/UlO ]\=YL&lԎTVɄYVɯVl[IV[[ʢ 'YŒUYDJ*񎬲 "ϲUFHV ą2Nh 2΅TiVZiV&ޢU5xHAVGI#*1 \,X[>DV9EV!^YY+UjEXEaAZVO!*I/Jq}@ nUxVUVqYtYزXn"Yet" 2 U;eσUM؊h6 I @YZ}LT2*r eSK*Z436r_HUO'P ilUWxۘWW^mW&l^1Jūh3*3]%u?x;*bj(ʊN*NU ԰EӸ YvUF߸~*\e W%l*. GJz3*iLU _%\%q?*ߍPEc\Ł@*JuA/>rW,^2Yī7Tx Oj*.^52YU*ATyM3 Vآ0ĊĊ7 uX/Xς51X[TG7&+.GraKЉmXYfVyìDY->3+NYq֗fVnFbVHFNqVH.Eh*Ut$-vBy?XV?V&$`ie| I+#0ZI j%iS+!|V^329V Z )$k%kvVl)֊C`+!5V=P;0VƟT$m}KJ_LaBsVf}V!ZYdd Vldnell`+s\ ?4B: k2)ʘtal4`+]Ha+_A[9(<{+&Vj )-WЩ\3ko%b|x+8V<=JNJ{+ߤz<Wފ7r]AVH_3@RÄBh&o \qx.\J Qʄ eqefZ7k0"W6r0BrŹ!W@WE\yM+ƹ W&CJ-\'| Ã+R@\v%#^-% D#)y"<췆,!p+d[!S'ފw!Rjo ho5x+D2 &oV[7y+xd0{+N'[01)V |VLVRcnalfnX`;ĭ$nb̭n%$Wښ[q&e3|%₱+ 8+6jEJ'dGsxFW1yѕGb0BW]!ltņЕe: QFWmuŻG]9|DQWjG *S9N QW ]qk+E]_*P tEK?AW]PJ-AWWhx;^#^͉2"+ϒ2GTW&ye2n ^JΠ8ojxE+xšY+BW/ye$y%#dz +Y^*bzРLG`"HJ-+ +{e+aax & .++Wȴ9udW=b{ PVLxg{5tW.J+Wңx++,W+R$ kJ+ˌF}OR BQ+ci}yJWI9+rUW(;b{%¯2^wL0a|e\G:+[>}WdQ }eWE_l~Zʸ+O52H+WL_}|9WF&GEJLgKʱw60n~eDZ ӊ6k3WLF_JP +eUZ]+~Rʤ& Whʢ]{GR JzCN^r+!W-dyc#x;+l-xW+}o!x҇W&D򊋢,b++!2W<@^< yenSVW^6Wb x%wɛT^!w`M䮜˝JqW̴y]8aW,A®8Db4ICF]I}T^E v%.JJ(aW&= v|'vMv ?j+6 Į$vedHH]!"1`(xϸ+UTowUvWw䮸*Jzͫ:ܕ|w'J8]g]Cwl]1iw++⮗4r3+; bx슳P&v%4 n3-5D]xfJ \$Wjs> w+K]yU}WY>Į$ q] gw% ܕ t]QGaWrj3B蒁WV*WNG+7DW&T x2HB}+ dBl?]&vWFJP^Nn/+th+xx+Ϩ+aQWrPluZ $n-rW*_/yftb3Jt+5WRxP72κ+W\1x%ʄ4+? x%1+ dlx^Jf%X] cᮜvW+MWHU+ܕQlrs_,*]A5ٕst]Yveamvefiv*y ®\,]~]1 Sve?i7+]9;lLu̗ؕaW!t?FaWߛ]R}ՕJןBauENBW*gZ|Еq} b,   W+> ]+[\aKqh2⊷r<W"tZ\j#xreE̕1kTaet ;rfW=̮bvWgI%OQo`+qWHC4:+.p&+o<+lVfteKa 2"E +,b9 IW6WQ=J(G+^\ag/r(̕FW6%]ϯa?g ]9 LЕT.7RYrHc+AWW >VW>X]L ]ٌaWؗSaW<.bW< @!W)6 0B?+U@:"-EJn>W<_6+; A+J}=!J5W.Wo R'QB_!JW\*}"T*`aXGxDW _aJmw_+ _T7W"H_ɵ6Wp)A`ʡ+W&H\9*]LmBDr̊WR/}ި"0xW\6+5WRg}eB' z+/ye JryeWk X'+L"xŕ+^jrQgw=+]]qU6Jzoՠ ^yC /gx\Jg^qlfWvNWxݕxrW]Y;rWNW^-+fMFin+^SWƻxM+~N^q,JL ^y܎$xţh+r >I\ fWFcs^a$+ޮ~B|Vya`W1;t{t>wE,we%]~vWsw?:@ؕ,Z?r"a%+6։+ʪl5 +)B/`[dKWB+}UX `4}}VBXh3XaM2v-| ._U UUh^%yMU\stLxpa\zlĸ rbpW9!UpkpuR*dXWU*.qEWU\@j]UHR-Zl@0b, *g\% sUاlڃaV[!Xt VK2b~ XeC*5Bh`gWq`\*5;tX쟬Z~*U`Y%%f*c5vA\U\ȑURR%xYв$h`BE)0*^*Wer"`J7L䗋VQgU1*6EUVeXW`]%cQ*A+xVx8=ѾK*^ouk_e[䫸p _{WG.*aZ䫌`r#rWل,֧_%"XOM ,#;2&V #`ed=_X}?V/ ]$`Xq' b8 DXXɨaez~h}&axHVbRF+,'Ċ VQU*WI#Jn*|baœt +~-ma%+g}&aE)[VH; +ԟ1i:+Vƣґ 'L ,(V>GhzsUjy:_%Oz*ۣ3Jb*1 xh`>+%԰Jx*] LoE,lqV !ZX(QŠ&a`3+_B90VmLKn!VbAecoMJ?@>Ì)]YaBR DlRcD %*F&aeki#!VlAxhebzk&VAʘCKX!ehPVOBrowhe%m VVx eEIYJBS(+1שּׁxBne$ RV\Te@++FBY3!de2.•;i!+#74o"8dƞAVl LH̊J p3+̬tfaV·OY٥HY>VVf]i(bV9ֵ:uvVSqYYqPfeMbY>0+ͬ3ͬ8jbV`ͬ 2EYA$YJ m$++TFYqt̊HfV72bgeY$Sf ͬ8L g%7$RJ!qYg%$Ί>pV$.BMiΊw> gggrVjf0 0+NO=̬xef2CuTo$a.>1SjfM̊9V(rqV>Ί[ 9qVldފ}y쬄5$ԻZB?׿}꣟Rg/_oto_#̴|k_~ȟ>fe#\Iik2Rn² 2b;e:Jȯ8tʍ5UU3; 2,(Ce\g2\:0V;Ee5`t|a]ҲRZ*JH5сFZh+2aqѲ NqB%hYPx-.h{2ZR-u}Lh)Iie$UhJH-#SD'GH8 BZHie_lT:0e|´,ˋ!eq?42? V):$Y%BZTHH'iWAZ 2]G i2gBZFFLSJ˲J2Rڈi.e$/rYꂟ4fZ*MZ('ehɴ2242R}LĴL_reٕ,\lj -REצ 1-ۡE ?Jl.Tg:γԣ iY뀐(Gl^Hvcieb$>;2o94ZEJB˼:p=ZY;!k P2o ^ΜgQ"/>˼y42]7,yD$xyųlj Y*_ e6[JGfo)D66N,?WY°s68.Ԟ_~Ys珄V@t##D8<-i-m'I\clA2o{Y7>,'8YHg5,z β\lɥR~[,3TWB82w28a_*:5IUB%ucT`LHgNOt{,'!e&~+23Zgɣe ~ϲ]~YbRct`a},T6Bʦf1ij̡gQ/ZS]W*>vQI2"Rg$%m-wZ$thɬe땖_ٍ hZ%"C_Gi.J,MTH2/HR[BZj@I#-$REj{3I2ZfH#-*i~i%SV9e=DYԿ(-b,^}J˶ y)-ǖ1$e#)-R2nz)-"}RRZf;Ң:8Hi KfQZ<iTjׇE;We-emz+%"fZ{ô̓ e{˕߿(-1~QZ_#IiQ=/Jx2E^Hix%1-K/2g"ey>-cս!)DDRK:Zs԰$tя98SԲ/PJ2;]҆ZBXj2%/PK4ljدbPZ?VlevjKZf9I-)mY jJ + )~`A-#jZfE[2[G^ IjaH-#We}R52'n>}iZN2o2pԒ*~ZM-GR(,9tz{Zke^-]Oi9wxeֲ֒0Z|֒)Zˬ\K]~ci-Ik񔾠 zmJkCCJk-Ak`I"̆ZKMh-U6״֒2>+fY(i- yNZKc-5O:nZӔ2)Zfoie^rqkkh cb4֒XK.a-/#% e5k^[Ga-հFf"e0bW:ߚb.JX-36+%V˶cwG72Q-M9iR_ ZT32'C/iEuP-5ReBWjU}Bd 2[$Q-Zy|d$Ւ.EUX-@Y-& eVp.aޥLelX-rbl7տX-s0ZjnE螡Z݇jؾP-D,ex/CQKc-QDc-63XKlk&ל 5kk]Ok.]/y-eY(R!W6Ka-ZHV,#E?[-5oVK ,o%2rYkcIs-sepõU5]aq-s%%5^l7=kn6ytAh- IkYZn\ˬ'k*u,͵d5 e>?\~2_ci-umZk%Aeg2< q- Z|j7$e~t;i-sPtZK %HkVZKeHkT"L?VRU^>8^D7֒U'[˂u乏2gk} *Gߙ^Rq;Zd[mRS~0HlgL[sJ܄Ė,!d!eHl#"S6C`t ԠC/ȝjx-ZR^sX-s[jDie.k]T ؒu` -q l/3[f"\l.̞V l1he(t1d R WvխZfגP<^hi enkIrZKZ9ֲu/ZGglfĆU!dlrD,w-@ #AT˼xʍ-j_IY-s$HY-s@xjqV P-Yk}U9vdHlriMD2-sXز|G,,lEbs,TځIkCcZqZf-Z TlƆN@`[GJ{-kk= 4[-jI?[-kZf;x oȡEv.ZF^CZfCpi7y{x `K%*W8k;SL4TOkRŖ-`my3n2߸+ũ;[܋l`l[e j[<$l 52xa,[<6ز܀[<6I*`K].lq"OQ F%eR_. bţw-=VK.( [a8-12FA-`  !r [E-Nْ -vdVlٺŹ6[HV9b{2[tbT- ud02ٲ x&[בnd#&[ sDdC&[jEGl_Obl:M-~^[lqHdK|uTA⩵͖v -/ EfO1[7ЖdP-5 ߌ^8a(oS`hKd4ڲ]j -sx.Fjl[mIՖ:sՖ2{̡wPԖn?-;t\ًۤՖAXmq~m!bODA[Ux!ebdDVGnɈDrK[pamsw[nwGn.bQ_-W%$rK}e8+¬y҉芖[CnY]c-M"rC[oO,&9tW-[n(gD´ܒ{L-^طq-Wcnp"M6R+RY6[i|[LI[y`L}[4En|AnY 5m%(2W58]p /|5OnF- 5R3R #[fCy-˥ܲ-,Q?r †[ p nNC [ܔp[rT[T}B"n˜t/Rˣ"b>oi%b["ݰ-ElKhgYnap|«l.Jn˜e< ܖkcK7ےylv[f|n˼|BFmv-mkG%|-T4J*J-p ŹV[ 2{> eΗ2{wU[ !ecZmIjK6R[΅~L-m|EAՖ=^mnC3ͶTO;*IbH#XTW9~rbmN"=-Ϊmjwϰ-5d#b[Hm>ؖQ>f[Ab[ܱԖ6"_j|ꬤ-=eT-xQhA[ Br'YmIDm-k^@^ V[j ՖԈh~SڊEт޳͖9 e6f"[f.Yew=Ȗ'u W=^.'zJN-lR[/h%m"Z(t"[\`%c$Ȗz-sBl'rlYJd L1QǍ [|D- A8&oe^ <kuOR>\K\p-s#e>Skr jr)Ls-2SV\KuakYs7uZkYns-!D\K}p-ۀ6kqMk#C*C_P[y)bL⺑[#`e^O5O-^DlOO[Eb9ߑز\--y#de_$ڭ#slI eΖ;l26^kWD'kgz y-zX2uZfJڈ1׼^=a!iZkaa)[zIx-ky-뷑Ev{-NR7tZ_O^٬x-."ג.^K ^K%gkll~kwyrAR:y-5NQ\KH*颪*G0ZfNwTUٺiڥ^K]@ .gwͭe{%Z\+aeJ65ҹ[[`RVH*s+zVݼN-ڊk+nme! tZo #Ӏ+4+s-Op%G+uE\ yuCR dc\9(A4qERpe96*4;+pd\ykp\1pPnpe 鯨NJ6Bj'2mVjl f)V Uoe$ne5Ch+ b(ڊVW[^SJ־trJ`mbOgk+;7WVF[9.`+$[ Y+q?V\kkEd*Z2,2ʡ^WϛZ BL, I P+T \~bW|$ hIJ@+,d$Њ֞g?;+Fogbb9+/gvVl{Y qV6vVXY9܊Yy \ eeדҩ+)+~[Yqݵ?Pfe%VV\مb}mĬ\-u8@ eVVBxgec++;1 *BYqYpduFVsKY䇲R-)O\jdY!fe̬ÖbVvAV0FVAY!iAV !+CcFV\gd";FVC4BVdS{#+_FVƃ18< +Y9 Q=QTYY&3FVG\ʊ7VV>vR +!!+KSYfd%& ȊGtBVȊP!+53Y\ZJ( VV=ri(+%B Q(+3_=u> dey P9Jv+KymOX+obEzwAX(aZZI +P +vT'>`@+GX)%MC +@ZXqOka%LG +$! lf`ŚG nFjV.*$*WQXxIU]s*s=W!^K*}쫄h_jP૜X죍f&`C=?RJUJdV}XYu5"a犰BgaLEŭWQJ3JrDëͫ4⥵Wrh^(*'-*Լʥ]UjJs^-YU.W9wA8*cb]łuG%, y_ͫ@(ZxarJ[_IXa`ūVr5p Pai`ŁM+3VՎUvͫxu$ =y̫a^3ϼ+8b_|U\TBa|F1U{wW9^V6!V^ma8*:7+H, nӓs a% ,TBiaeWxXX1aXXe,ʧ}BZX9UXAXajXX_+cS2,KXbbd$9l,\P^S" +Ɗs挬0VֵCFVT[G02^P?K+7o"Ei:VHZ~VBt!|6 [Gp{ Zxnԗ :(A1-h Pbh*JuV[a3 V TIV\Bp6ҊK-YZqi[<=%.P8 qW>=gKY)bʊEVV:c?eeEYnJ`/ʎnJ QLVV:.ʊRV荲 (+|FV+YʊRVGsA-eQe`VVs++Z V"VG@\2C?XI) e(b%B+ V=+= V.JX WXa+1jVBXX8W1a+>..+= Xڭ |?Xqj`emj+1:XYBVNXYl6+u%tmV\7tK+1 (K+ $Xp)+ۨXa$ VHXY* kW8X46RQɤ*nU i "]eWxdUXUf_R $Uh6 pU.-WqO'_eX. }@+ uoV؎`[Wz3*/;WU_4;6*'}g쫰_հU_ӢP*^exG|3mMF*QxWGP1Uj@H*5Rob\.KHٲpeN(\%UƕAW9afW4pnӞ Vd0VlaZU*;iO*lQZeTGe״UL)Vag*3BjYV!hcbK~qh[U? **@lRjKUlU [EUb2 X*~}ξ?U܎}t Bjwlw*VVڶ ,cVɢIhV ˚V!- U<2QB4`ȲLW#*'au 2ıj-zA v`x(B:ElRUո {fW]U<4*Bg*M*W;"EqqE0B3JE6SF5,*'Ul\ ULBUb SAd\=LJ~hƴUZ*@*[, I BȶFжyK`!+[%UOU/ø qúWBh**^JU ZVٞ*V"0g2x,M+DMxaUt*dV :usU V*.EUH45UBBlr c *U,CW^*~ R=U؍0!.*,\Ŗ1~6,1*7VյYmQcURg\`8a\ p%$*:"HQX*qetZl4SlHl+ *^@†UL[WDɺb -3R,Kj*f+UH4b& mtT*2B~exҌKJBPīxw *bļ ,^e VWa0*^rz*!̫ W_Ogռ252v A|;PU5WIW=!,]Q൅k&Vb 6QZY1e; y"fVXEʿiA?(+]*+ۡA++.RvUVDY9|YYllgjCaV)C+NRMZ!Ί7}TyZq"G#@+޳ @+bhBЊ5푀Vy 0f d ܻ Њ7,yV8J:B+.6$^Zd/7Wܟv-ZA^3*3>tMV1$Њ xQC+/ Њ[)ʳΊ`he`}@!47BJԃty;m"Њ8L`vVY!Z23FZnVE{K+9VZ7FZP [Zskiem-8F | 8s |pKʂHVƭ }{rM$,$VjiyܖVU#t@+_- E+rm^Ϩ6"FJZY*%,N]I*׾[Z! i}v/kǒRcYV$[+lfrV [ak{A7B*oV[JgPZ>(CX+&b|֊m eRMJV\s.k4d`蕥4b ga W_v+Tm[_aϞ++5'J}wWbP+Dcw2RuD 앱P0mP_DBRr{j=/G+U tWSz+TE_Y6ŒZ< BZwK&8#ʜ/+vL̴9W+nW?^sa bCW<2"+W\nx+^RG}WjJL+$-ʍnxe4Wꬻ޺TRJVqWSUiw%#]95❰+oⱜٕՁ]d8/vWN+\Y+0+.W<32^..= WO7 Wp/\YӍp% -!q9+^17 \1Mfp\+9bWƊH\!qe4X\1q1n~qţ>+r[\1nqe>wWt@l/XK,G+HP!WR^.i+_G/+uI;!|p+ o|E_1\у8r4m+ "|DzB2Jjcxʾ+!m9+n+i+!a>3ͮ<9*I&5Baoî@ +.74쮸k` +x4++ε_~vW>q.wE\]9k+'fW(7ď3UkTx]LP]9)>t "1BT~ 2}@Jؕ_v*r]슋Į,*fW0®ePw۳ҹ+仄\qݮ"Wl6-re)0rf1Wr`Zrf<+W]Ij͕H7+6Wcꋹ+۩sA+Kz;P6Wqt`s`+nBWF&6rT+;})ԕ,TH]Yr\6ʭG՛]qVٕӑﯻ+ɗ 2t T]IMG^cSʊ"Ų+1BݕA,+3*E 3jܕvqWRLqWI-+֘+R)ъCF6WAteh2 ]QN[]~QWp +J3/FM+j׼Z9O ߢ梮 VW yŏ7+HW\5my߫%ߦedySW{BK^,kyWr㵼 W<yeijW+D^ ҩ !2nSʒ.xeԄ^a#x+ vW*fLvm"&J#GWD ^# E/WJkiw<+l+>c`r>]<Z yWAܕ\t]9\G+:T/ w堻}M8_2zm_,5 \+ mWX X+^~vW<ԏC`wt+WpY]yDʠ"bTʠP t]!3͠+te[t)> C++7\+Y 5ͮd5 K Bؕ'6IR 2 &Wve{J鍟bW T!]2PĮtmfT\J2ʣ &)+椬X:jVWb"ꊖG|'ЈBW{+;Zt\+vt>Aj@WN-]URffteqY\tWt/ctR+_s;{?C1Wp=u I\Y'ĕ6+l&mp46ⰋUr\9oc*{Dn [J1ފB(2Jy+QQVV3–p+'#ܮ K ؂[a e~,pe5 0bspҒ^"ql W# {n\9l/r7\@eU\ѧҤEV+=Hjq}"8 bqL9+lLLE\p0,`LY\r_`_{`'̕(KFWN#+xuŚ JVWn{.+.mEJpԕmY>Wi"]iwEaWvK+#ؕJ!ܕֳ=+|| 4^I `xŊM+IÉVU$hye<>rjmwŖݕ+ηbJ&|W:^B.xF'.^6b*PJņxʮ+~^ 2ixa+'Bj,vW+^qCWnD<*M5GW< {UbX fi.wVwڙݕS'kwQvWl]YHCD+I^Y`W&׈]CJ9;^_G oaWTumӣQRUZ]YFbW:ue( uGNpԕYAWy R1,抡i+5?~̕a&F+']i෢+A]YЕz+ՕwitdA]qJOȹ`6W\ފ2c|6W0bIǔFWѕ+T˭⍫l V02r+_+BA@W]9|DJw6FW {=d35ԕ%Rʲ`uk+Ia۴!BE +zw6BWt%7 ;]q@FWB/ vjV/PWl[]!,Yv+ Brs ~+N~2a&W5Klhz{BB8'SJzmVzrIE+Cx@W+6^FWZ 9WO59L P0-]M8COJ q hzz+ YjʠM?W]kwʼnrWJR>vW\iw幸+pW\bw^q_4B7!z%+iM ݡW6v6E,WHh vW\:ez%cW+!+.0r5?2YKO+̷x@+su/΢X_9W\H/!+y,]_a> }ҭx_SJdNz8ߢ_WHX_-g92'VTqG_as+.}e)WfG_!XJ`_ jC_nWRc]Wz AJI;1b|ŏWjI4pB6U+Li\_a_d+g'}j_3:+A_qJW`lSWN r~&T,_rH+=_q4v+++Z^"W{8=ʂT^aV+$4^/ #|϶W/^92E앺u-WvwVWFަ+~ ^Gt(%- +c)Z^I% W u _ɪRmk|%C+Kd|C+; V%\b_A.)c6=?4EVϚ/|ŽʂnJ }%,I+կ_qMf ,_[ > [|eWQx||ŮO+e|!_1d|%C+ɕk2aL+_)RQ AWg|@ UW/ xAW^Np^G^>Ω핥W$ԲWv-$dYkWn}-<.x+l+`5W<+Д]J׻,}ţj+,PE_ac}CWk1cWR |}W0,[+x_WZon|dz+npW|{_ᓰW\{X#tW ^]o{B+Z+~X^a^l{d&{z+҇Cݬʰ"}6=JKpJ5օ4l\bY?uT+XU n+FL2^y;7+ xet+^Omx|{7W^YQ^PJRW*r|Jǀ%x#RcwW*sWE yL+qG+z+z=L{eQ4&W_qWL_䲿NWP௰D? Xe52ū+ Xj:%/ŎZBocS]K6 2#o:"eUTX+iFU 5W. +LI+c q+8_e~"$g~% Q+92(V̯x j~ega~+1Q_IB .hc+.6bKPj_^!Jn2$ͯh?a}]}e(Nn}E_񭀾bZ I+dO@W85 |şm|eNc'{{Rt ~n^_9;5cڶWؼ7 {// W^Q8򇽒s.{LWz i+ +}W|^Z]}Wbye(jex% &+;GW|^yO +oʜ@F$xr xB[)Wރwr\cx0ˆW(}∝ՒWj+Xw^ٕC+RWؕ®' E3x]AMrrePWӖuȨ+D.$uk.vVWȞC]"u%>ԕbiku X]F8ouA+2XbE,k*s$+ ]єx)w%[]1row+]]~hve1a5] 2aWȾfW]|+lvc+v]̮X@3¶w?r><χc{1y;_u){We>P}υlc2ǨjP3Re"@9(geFi8- h>vZ H9d|:o:wuZֿӒaNLNK}Cm\ãMK-uM%*:͛jOd h.ZjӉP-Y2԰MT _Zj/(Qe]؋uPYA>p]WFrëA42Z\) B3-BM*-5lݪIiaC,a!-ݪŭ}p3-})WGGzG"mhS!TpH9h(iQ"-uq&=i__DuWSi+K>0uej^'m:R3ĊRO@} [mM3R2-(K$ 1xӍڃB)-}HStc+ۜ|FZcURJ#-ub[iɠ@NKuRein]?rZi鞪s;-u' Rs3]φZgZfTK]Ny/M#\NS-u=Cg.6ղ4MOJuujBPY-ձ} jA5hRVKu/_7q7 kos Y9Y+R]~Z.RXK?uw^c-napXK$\竗2?2`h{>hKVK=Rݧ>s\.5mcsmheB kpXKϊ, a-b`֮?+XqIjYXKMMV+*̛QXKݴ/ 2a-=޹!u-$Rm@.L׽eWrzcI6SAq k~ͪtG,2a-CY5Rȭ#gÜV[Z.ic- ~ZjVGZ,^9ݴJU\`KuU ul^`K#&^^z|+ز~[jbK=㺻r΅_p#q3VGZl &Ėٍ;7~-yJle~S-5ҙrBuL-q-sն{v-<2]FA4LbK^RC9\*vCl9[IZ9q:ZlEV-l/(ݡz>e(>z% ,3|-5Z\+R' P`KHqfzeSӾT-NF[yD`K?Wh,O7mlIXJ^RB%:(e^kZjZu^K1k9kiγ2b{ײ,3Zx!ua4ײLklǔ(AO{-u7ز6[^un RVc:r*L+ڭ[J:$EG} R& ԔTQ`-[wshezlY~[# bK"Cb ŖRcz%TV22u1lY"[* ɖzΟZklbreٲ~&[bR"[Ȗr飚l9/t2[̖jr^F1"[z#;["K9:^9 "[jȮShDlYf82[O1̖C-[c3-}[lЊݮݑJl1액}[l&lbK Zl!Cl?ҵiGeËl!!e4l%ɖ ʹ mBgelY6[ҁCj'ɖԷؖjf[̶ԤcU[zȼ-K?/N[@-ҷ{zx$(ʶTXCmb[ J-}z}(mGؖ 4E6۲öܖ 2Wz|'޹-5ԑGUm%~v[f7ܫUjK̮?j\!%+R[vlVdKCGn6l?dK""[gȖeq@dK* [潲lSlK42o꯿:"jTHTy-{kR] 8Jis%s-sn\ˌJEs-}+Y;"&%e(|@TiVaTB}x-ZfCЉ5e&'#m kEG׋j[(G(餵UkѺXpͯ^0͢P֥كRZjn/ ˅XcVOR |zͫwn7ٵZj2 4k|[^KE.{C9}L{-3 Brt(TkIY5l:,TK2 \#Z}ϕk( tZk#mZk1Jg@8RkZj&jcW*ͣ pEKkIZKQ8QT5ֲ^ZjMg[|ݢ IKC-$9hqp4RZjlޑRԒ3RWH-=(БpGZZ}=p(Z2Bj\^VR9+RfTAZz!zZ@TKZ_?n٧喷Wv rаU\O:ip7-^q߿Jd*JFx%ݪ'Щ 8M'NQDuZ|RBCJhIe![hqG@6h%WSDK5) M#WC(4ҩZB[^Ǖhd  DT8&Zf)"F-إB"Z2RqSoP }he]dTWF)>*0Z h%I-usѢ Z215Ѳ|x-Kђ"FKr1Zz-=בC ?Yh^M-Ylh _-B,?-uhIB-D2:RTJ+-5+⯞}W :=Sn۟2PZke>Ui楯, J(-$%(-i`hKX] FYԶa[Ziɓ*@-2jY4Z깰 a+PKYa= iџgzജUB8-ޢe)l˟dU( qZ:3نvZ}xRѢɎN#0-'U1-ɤiq:르tk^5m ,])-DžiY/V~LKR"̴TF#ʹ԰\Åi q)=ն ô8"ҽExRxe-%0e,%h1-5葟LK/]19?3-,WSj%e+60-kj<ʹT:KTQT#s+E^PKW%9ZjY&2Zj /Z/\S-Kg!Uj:6O>>7GTcFd)@Ca餹*Qirfe/55k6oWU\KRZE RRD<UEkvxJqZ\ee %$9ܕQfZpj2+]Z2R!n}uluOՍ&MI-iҝW/kPIi `-CI|heiZ{KJk S,rvFӵZjL - kݩ#T8P˫DdEUC,MOTK]r5*R*iH=⋢Y/ kй Z iW%EM`-YVk|[U;.jFnTQoՒ'\Xn|{.jRFQ[-U6tyqAjr;5Z*RO>:Q_ѣe~mHR fmȒZ*6RVKʷZ@`-ը4R БVFXKVZGMzTm;VYQ8: ѵ$ĴA\9Q?Ŵ@LM5U7pPM;9-iARQ\"ʪA+odi[!rZ 2_LK ЅܔUO>*PZLu>)ArQU![OH21Ӳs xk^e3j!),[yyO`I1!ru)Zj|.tUUZfw&RSU)=" ad(<j$\-O  +OenesBl*>*v䴤sʪ҅`+RAC1-wnZNwbZj"1-T`ZJWTWu3ӒK3-YӲLVĴT,^s{y'FI!zy7ti^stv+R_TJ;-^=m9 -)iu{EZ.;KieYRgvP1cr+yZ94 dJ&S|]2Ogt0# ZRЗuZy:>@+I6¸'Њj,Z@Zg!r'ZZ$EZZmj|D!iV*V:ԊB`L7V+f^2^ny+,w+%p9 ty/WyE^y(Fp}"V iԕ!+߭>ʧ}dKC]Q(1Kll}ns\q=Jb6WX\sM9lg̕RFd+bC͕Wۅ]1v`tdЕ{vG݂ѕ]3t+{#%]dtź0J*AWD GԠ+7?fX]qPЕO 2(+cЕB]D]a`ue;VW;2 :+|J>u+.Q2 +-芇\FW]]IɪW]qGet2b"~A] 22VܴSԕ끛A]ay9ꊛՕ.E]EG]Y#Z=s2>> uŏW+]2Gr'PW\ERԕ QW:E+ bvٕv]q-+î\wĮdxivK+]`WΛocu+.Av@+WfWN:S+^ 0>eW>U]q®6r\mîovaV+]avanv|ˮvmy]y_aW$6ՌFWLj\Gcsr+ʌ^~͕o`a$ls. ?+Q\C̥"W2rBϙL\-rTyPGl1 RCFWyBWRYtⲃ(uI+b2#M0W( r>:RWҭ[]qu%2Օ YQWP/H]Im+̍FWoiʠ+sfq.dcteM|ℨ7rS<7&鹖+qOd芧IVWHюny+vPWBZ]iY;+/>+^5> WnW+L-Py+p WnhC+rUb+\1nrhr~\&抃6W.ؕ@\-=+ס-+ͮxPy_Wjydy%J 0+$K^A$W([R}Y_!f}QfW}+<ͯx +WX_PW}e2)D{h~a}WHqmYxYW|\zl88b|&~Ԥx9 B/ _4R5z}5\q0+w/+޹rF^:w~Y+`!NJeȥpid{&nMV|e@+!̎pMßy E"G\}_qҧЩ+tWORL_?]뭯 }% m.~= O +)_)UPy(?~2Wf6銕ʂx[xW5+U: E,G7PXl%P V@X 96XH,a AQ8+X h8,.`yطB'\B.(`y7`1< X ! K XϏ+X\bu &XGB8E;`I KY>!XC%]!X,,`!;-~K^#Ž ?LRRBs^MZJ#~{b_/yQ.EUڧ7CW D_a_2+_a +T W^{%w?ʜSJC^au,=0#T[KiD+yW) WvxW\mï|$~M:򴌯_oW\2 +dn_9z т oS"JW|W~_qo _6*WWS _+V2R;WDa Hx2t!xe~_ ) $^!exۥ^60+]6[˝W>eB+]qgj]Q]ɻH]1auփ]O)lu\a_\\q-bB1Wx \9+7mAW1GoBVWrԶJUPWYRʭQL-7r/RW!A T]yv# r^ +IQ`wemஈ~]^FW\>}AWj/Q-H"芓~ܨ!+ߦW+.\s{\y} %+\&+e!+2Q "᷐+G\:`q[+0-+ ;\1wslSgi6W\qYab\͕m 46W\js&N+o'se:+q|CR0Z xz̕ٶU\lsx7\aC-Vx\- eg诨+tlu~0jPWcm+QWu:S+v;W̐Y\q}ϟ#jqa ĕT1\!W- Ѭ+,4\&WX!\ȕ`.m+aEw\mY:̕tYm8hsvO¢E0τ>+͋!NpĘ+ +{cs DcX2/2W(+s0WF RBʧkrҵ&Wd$W!Jbs? rF (v!P`I 2֧T7 cBux+l2"J⭌cVH[qBʌח[1 vފ bVo|o2[rV\L[\yY\@\\Xf9W T{+o[joE]p\NŕnU"⊊|Wqe9@ biS+-KB&GUr\O\qU}͕>1WF/ \q6WRvlU++Ņ\al䊕+$Wz.\y>-1WUsbcW֫\1KseƎX)e-+76䊻cI0RireI+ ,Dĕq@+!W6\~022 ph뒟r\6lsŽ5WT&0Wz7͕ϓ42W\]q'h]^]]t}@WMW !x!$M?Jo\.BI+f]AWxl/w{;$ KЕX2+b96 .ϑv]tήdDWXltvWSW̚T\lw\ɕ @MlnrE reW@\F1Wzjqx͕N\q]EȕXI1p\!vE+`+J+ǝ' s\I8J-,-xWqݹ.+#鿊+}W(\a'b ~n} -"bʛ\VOez+'ފz+RHS+c^pG,,ʾ2B8RAC1+#*˛VA=/T+w.' f{+A/oV [!wgV[HTpe;j"\ ĕA)qeyTJ\Y ;\y>Ҁ+xKVpi+lQ(;= X2p2@+: |Y++9+7^+bV+WDrŭ\a&W͕\!FE䊏Rɕʓ[qr.b +3=K}Wn#Pŕ1a޸WFw2R|r+Ԩ^+QO W@%W\'\ 0J\O)tJoB|?vQ\s+&/ w+-Uqd+!*(g+N WH )M@6bϥ_r 5W\#isE͕OF+#3]IvѕDWB `ڂ0]agFW&偹%2duȕ1ּD]!ٕv$d 2rCpXx b"i+[]<t=]Vt^+.b 5+#I1f Ԛ+B+\a6WbCHmMBsM'MIȕ®0M8+5W30W.\qu\)]aFW(htE]iG&H/+#s|+.]tnvNm!J|+iBs|5W|0Lc$W|(Bn[*[aW^#ފCx+o뭰XފފVV{bq{+Gv-[/ W>+ī[\9$W%hEh͕ԢI`)IdIuKqũʹ+pP\aMBĕ_W\U\ݴBGqwqWWWt$W Wrܱ\ѹ]y+j8W\ql EӘ+h)sb\kb 2@+v-+++G2|m8\YZRq%-c K͕1A ۋ6bPVrEIse#͕1~WserIHŽ2 pȕqIE:(m>ͮ(BJu+$rA]dBY4bԕqB0,K+Ϫ+TWYԕ'I $#xjW6o+N̕Wެᶼr2aP^! yE^x9+B8Ł^Y+[^;u zîrv+,7²e+%ȮIt[" +I^E.q/+3jFPImcx+A^!xQJ&w<w3Jӎ+졻YwŨy†ஐfʞ]y}*SDD]:M+ة+/}ԕ"PFJ$ԕcКJQ^YBٕ!ICve#b[+`WNVdW^@0`WF|1)haWTbYò+9a#(+Ov Vo`WsovĶvW~+݋]qrCr]Q^iOxb"B%h+.S 8<^qMxE8PxP ܕԧ,[h'he^B۲)LMX} Bv$Y}X՛b8eL23WXXfb H,S:Q% XKXbv@Lkt |.@,4KJ!~qX܊^2vX>!``X !/!X'Ž(ͰWJ#uXttXDa2 ˧^w^7ayB[R"iENO<_avX,a!y* >uaXH@hfX'!# "l*2bQX^vwEa1 )ue6(,3OR 4+ ,/u",v",EXTDXGb0aS@#,Wh b"Xv,xhEm& ,[*0X%2u ,\X4 " +kF2m:~uXѦCP,``|LyM. G d)=ˉb$H(ɻ" ҏ L+}WtW_k]0௸|+pJ+* MWxl|W_92+l;} WHUD WW WW-W؂ˎr70k|W|b/r1_!],J{^y^Q^{ "nBvE+V,b+lʝWI`A+PWH k|BLB_ِF} JvY W.+c@|]_!P"BYʩ.rq`4ק%b X^q殽r"ig[5+y+\o5ᕱ %@2+m+cr X**J'^1ӳl I Hlm^DWxWxExefԁ#qW dWgî̩F Lue D] CQWp-WuE uI+s0$pĕG\1yŕ +[bq(rZ\9A WP+R+IR+;yB& 8^jyj%je9VگZy(V.DC 0R+n!tWj}TZ!jE⣩YrZa뺦V-ie3@ZbIi`+BE'iř!J"\W@V:ien(ׄZVX)Z^V+jmV:je\P+C էZZHiZÖVN-Yg)YIZ4* qK+xb)h$Lhz@hs'Ws%.@+[ZqrZRh8vV,l| p8+Y/gvVOg`l6Hig Jbl68+$tVޤ묘8+KY9H2:*8+Y⸝FuVqYgBLgytVzSvtUtE謼8+7Y+O2Kl' VVP+jfS+W›Zij,tKidH+`K+Ҋ5"J+7ҊuJ+ 遴XԊ)MP+b JPB bix;+:+=Age 2iUJ+*X6J+n窴RU?b¯ AVZa /r ZWkVNJ%9[aB(➄b+N"[ bNʨle}Jl  dc+1܊kkeTlԁP+*VʛYyTgjtV'U.:+ꬰ fq;+8q2+Z2+ߛ4⚏ՄYeqVX*YFg*57Y% (/JzYqWrs"VH5SZh]F6V4Vg1Ίd~2@g㬌iNGC%Y9>a`VNofEJfr dV JOJ+vH+,>J+'J+oʖV|n*7Ҋ^-!sgvҊ* ˋ6J+ ۦ(42&>BWzS+'jv[ ZQjjV6Zyv.J$P+n jȽSHm722V4ݤV niV,AZ9g1+c[P0+H9̊OveVνYf hEyJCH+"~J+G0miZZJjVIHZ+TVHZ%9'\C YY8VΊpЊJ+H+-?AK+*IFV zJi,VG+gJ9VF?RJf-IP>ԊNԊsJjeHq[ZqE-d[S9j͵VƢ:Z+ӋBNkcҳ3}KkEMkEujZ5Zq7EZaͪZalk%ۉ JSS+_n4+JhX+idGEP+J8XZ1&W{~Z Z#GB\{~ `M^S+@OAWQ?lEMUl&p4~leNJ_lybV 9X-29%}:4[=O؏;;Q5r\le3lEXl[y B4؊6h  ċpfVVVX_m欭ikFS+gmςb4Ai1/BcI4 ~Vp\VXihEwB8J+t[Z)1bHg=uVR8+rV(i:+FKtV.LppVLYqGvVȽogVZa繆V6"!B+ PAh% #8+}<٪+o b(dEWdYɊl#+2"+:"+FV\ +=YA QȊa +-. |_N7hd)ce7G:b\dE g:+ģdVXnfv g7c}\> & 2o:';X_Xa%X1ʊBAҮ (G/e|h ĂdbޒJÒ*+msc++>VdLZoJeŕ *+b(+\TVpZY3Yq/`Xd`JYd$2$Y1r-¶"+㞿#YKdŕ& Pd`EVHY74 SX"+&Ȋ3K ,SdE@rYf7\y$ZtYfK Q.rfo V>XYfe~'ʜGe,˭e^YkUf'0Je̦T`mf]Ҫ/U`#Gr;St+Sb8˼,ρ,ۻ_n*e,3?Q\l#FGNS/e)wS>\=ʌ.l#}M-mXYmVmoQ|H˲r\bYbYQ;FTȤRIijﬔERk(5o"YCŲ̋§XٗbU~qT|pn躯.l#=- Iȑ3՘AYZQx1U(<peFDND,J(e`R{d(ee 2g)+2|Lߟy| (gR-pˌ.toe^Qn4(QU>}NlRY#D,շՁ[]NBYYe,st 2P"eN5^2OfT5AQYefeץ&(aYX9ò .,˼ʲ{|ϻ >@3E?}ȤY9h9{We6B̙*l -H,G"o2c -eUf]0˸{4%H7GebYf3+2Y9,2 2iWŲ,㒰,O̸X2JoYOZQ:޿+2?8F,l3JӦ=:|6b2ev-5h|Zfd?-K&@|uj$m>B;?I-r)eyhY!Z9-eȅhϷ#sW}~hY 1Z$4?e\FKe62Zj7]m;2G'[8Y22٪qog1ZVYF|F)eF>;[ԨՌgPHKŢ*H2 ҲRrdr|#ҲL #OiZQH˼B=28xv*!# ie婶}Jy?RS:e0EZpeNl, g/4M!-K3H72[dM~,(-y3宆0/&o5싉W,[:m RZf ciHQZfK^{fkAZ6F!-oe/e T4Le֗2o4:˧~Y~@Zf~+HKB2ކFKOkB,-3ցfT!2Gaf&2I"Z]&EPd*$D˼lyQa^BiDEy~yI__p-QWFKEc\ E2Qf* 傴;F˼0c1ܐ+ y´1L,(eJ,8-KH,N|<z8-UQN6J#y2DnM*eu^j0-ˊOSL|"݁[DhZaZjPG1-eSOEFи3Ӂioc2-ՠ2LKXZs 1wuZe&jYBρZ9eRL 2ebZu SL 2O(˖5aZIKU gT8οJK-D`Y+ߔR+Rϟ۝eּ`1(-:JKwQZW@JAZu -Kge?}K)-so}μ^u2#*}lr޾2'5 2n[+':,,H{pYY#VZ橫e&2/'˧ZxLoe_RRM+M*+eyh|Ӛ7\YJ~R,qP|:51\,9gY®!ZّW֙wSD˼UԩaѲhY.e-K5D2 2{Ol=-O).Co@-?S>be/ʷM>,hYb1Zj龮(2D2Zjz[e7F˲nnp+{-~%F˲(9?B)t/ZɝشX<\̓1D%e% GզwL-#𤄖%V> &2e|Zg/M.ZֆVBKQ_L#̑Q~f -3)e7,˷"DZz!sB"te2CVD<hO,2Z1 -D[9Ue3G꪿ThFQ*[1H63&Y AZ:.2PJS!8y@Zfk˛ isHw]ղ_ỳVBZGBiM"[c9Ș,GⲔwEid$_ߥFi`3J|ϔ{^Gi1Ie6#ozYީғK-M].evI :sWO_m<Ԋh=B^t^ho`oC-9w{h'DK}C-DK%!Z:]+D2hբ(.:/yF6pU"$e(JhYL -*e9oQ2?H3jCmq]$U M- QDMѲ-QDrBcNBҭFh"lY9[QB *)eQyeZj8&1}@Z3eBZ(M!- ҫJ -r9#HY3-s M~U)re~|HIA\I=˰iti=d0BZ֟THC) iYAZ:[esiOBZ#9jǖrG+vbyR9TN H b Fy`qJ2Bh\2Zf[ -ˈ.i}!w)>Z*dހg iTN*ԫUHKt9HK㡴t=L\q=ITTô,Ӛ0-5k_~Nȉ%/8- MWNKTDtrZef2RSD *eYKju)tJ7TnE]) rZf+K;GjÉDjYoZRB7T r/H-?b%RKB ̑T7Z(gqfg޲50JL.f9rTIٓJSP~,e,# -XH=Uet6:HKW@,cAZ:6$VJ+oe!.[R }R;+JuVduFVXYiade#G)>Yy䑀̅2FYjX7[YeSAVd" VAV'2ʕx++kR2CeEd奢p3 +EVzDV=YGAV.%AF@VH XyȻkcE#JWiXbъĊOMxFC?5U(,D|?!V;B+/R E6-|<"GBX)bB +ϙ&V(XiJcwbX;XYuf$VV[+,ĊCEJ},C &z+ZXyjG`KjV5rd\}q}klU2vW1|&ⲱ9*~W>|Jr1Wj߬UU$*- eZLm^Z^xqKW>S^Ut(UHU-쪾JBW'O*ϗMU, 2}1XRbG+g2 )b7rb+UV!Wi:hPDYJ1*rW b&d*rACUJF4לRcUDiULj[eE[e$P%^hwR9uI!W"dM-x׻UXmYSY4MVٟ\ZT hQUHYUeO UmW^Iู*/_VAV潲*ªzȪ\'I\V*3[H5*NC2!U\({! jYeB",l J Vq]UZ**SHѴm(3U.m)KhI8Vq5~hJ*oV\Y#媌Rt,ʈE<.Ҳׁ*ϛURU!OVaULl[X9YKdU7UNVV*JYp)Ve|.J>GReMRʻh"wu T銘6UR 2>HH*nQ*cU.WQbb( g!Jj*ۜ BIRk*-gM#B4تi0gS%CS2z4.VS&b0JZSIۘ 5荩0lS1L &*`*[b* cKRIKeRar|,Z*X* G"e2j+c8uRSRiJbq(#rw@@R)SVTJxNT/E(QyTiJLTTSTZNj)Z*c,"QSy<TSqRNbp:Z*26X*{6mKE,JLE* L9ʡ*S$`*lT&J?!TFM 4cr*MҜʢ\T|7rp*,A4Rq؟TA+BJ**&*2[idH%U$)<_UT\QLP#%9IȨIMiRxSSIl*.%XFQWQ~*= 2f_5TItr*L~bpF LMsL4Ufs& !UƄᎱ&OSeLSI2.T_Ih*IUQU+j*ǀ4TFUxP,֨vAsUFUu@U @Q%PEUޝOg۬BQXQ,o a ;2Ȫ) VEVݲ*BV7*nqU᪐>2]>lϸRVevW`UšUKlW@WE0TXe.Z .8l9daªɪ4VŕL]+!Y7 U5+Yb[VRҺ*o,ʱ9W2UNm/b@^ $BDfUƸ>.V#J3N*vw*ⵯ4J_e"TM-2JW([c>[ V`鍆Ulm*j*%U*,G* VxUX\\2eUHTUqVU~D*j K*Ӧʖ^M UUUK`UF Q>tUUi"Y-dJ8X\9X~UqJYzYŠU9[ª̀f}2 fUdUU*_Vªڰ*n **êJh*zZ*KyAUZUM[ZUٿmT,a듑ʌD'#RVa ғ%hG*)%jCdg LUIаJ*##ȀU܋a-,5XUyqUVQaV VW UN]F[*\B\SVyUiX^XŻ Xe,yS`[ʝS*Vyհq Rq O*=hX,aVUc̠rT`\pcRB"Gh{ܶJfJ*swm pzpUOUVMlA[v*Pͦbd$m}Zg,ʢV+hKh6V[0*360il*7{p5UR -yͫ}UƁW6XJXqul]E]eV[IO\ů AYq75j\٢5*6B dtdPWQ`W:W=qV3UUzTӸK\Un r|n4poW3WiD\鷸Mݳ MtH/U^=yXWπ&*\*wW4-xYx6yU^pt򭫴삮*FUޘB***ƫUUlY*1^J7XO 2ܯUҮr`J:mV9 a2wh!UZ[VqhӴ h1JU\@HPOٴ+N*+RCuBU DV MGroS*'-d ʇAt`^]EG]ŧq*W*UL#Jt2U,\n]%;6Bʤ{T9W2:J<rKW^*yULn^X7y6WaUk_8z*B]^ŘH*۸ȫ ?|?GVֽ*1^wV")ֿʞDVRG@VFB>7َ20'C)_C`Ɗc6Ba9b=L#+݈0Y]@V6bfdE8Wee #u2"|_e8 _`Vx50+̊e00+渢TYUVMw%ʱsJYyޮe ʊY*+T催EXY~?%Ȋ"+-`XȊ)"+'"+4F ZȊDȊ "+&ډ*+n *+mDYqVV?6DY1ʊ5(+b7ʊɕ(+oS,̊j 'cXheIJ)hVVZЊ{ xtڜ @+]VbwTZQZasEZ!K[jtVNwZtVXR(?m@.~Z+B Vދ?YP b+&_c+bb+Rh+y2Jor ׯTB,Ub+l؊b+n/_c+}VE>%*{58b+A_leV b y)b+AKle$@_9R"W^͛X+ P+]b _BԊ(I,#VZO?N+ݿ(x((!>qBA+]U+5'YE:ĕ!R\G\mq)m+WW+㞬ren̕sZD $0WwbXһ@kEʓϰ+#鯻r]i+Rw^\?MG2{$G⮐ +]\+鮌]cV:^#B5莿!U#xϘ+a\aBrK$W,\roȕQO%,\yK/S\qqen}rR5!WɕS\91Wa\i2VWiQWKԕ1ue|+ QWzeՕȽ+.+nڌ J+q be+=]qG]ɸMuU]&B&\͏I0W>.+~D^؝#O(^aöW`WX2T^qCc^q,B@+_f#4¶m+oe42d xbJ-+'qz Γqj ;鯌'^s`_N<sOZ,Wvѝ!b/ <UKG,fOʑu+*%++ O z+ 4{+_2zMQvWW\|%+ G+*JZ^I@M|09m& 3+d]b2iwE# [D6b0_xIWExEx%†WWExᕃxeuW2^y7́WLw^iXx HWbFyM!IGyҬW"HMdCmǞWf<8KmyeiWy?y((>"Dy+sG:xe /p , @T@^a_^1 L+rW"iyœb1u+X+6GUzLze^-^aWW+^W(lyl% lbNyiLWR^QlW^:y[^!yep-(7WTWZ}2WbD^W\ny;Z^y9Wf(hLz+$r|<'#OV^a{e8+k`2[^mW\ht;W)}+痆Rv5=\+b{+ 6G¯TIO+T,ʭxqEUZ_q,"'¦+Nm{)z+WF3|9 Z>ËbJghj^!bE6z"u镶UWA J;0+)Bt#a| a%6|s$J{WWƯu&b@Fs+2Y.wX# =+u+k`P쩽2Ni W Wwآ^a6~`^IBAA讈 lnF^c:HV{4qW #+4 +MUW-GnW^D]YX+SU+BudVW2W]KL{gʋB)8J'\Gʼu%Eԕ ]N]Alw\w|ݕ8c]9MwexqWF3h+H +艸+Gw\_ܕ9͛⮸TX#bn4s+@m{e>9w7^Y ʜ5WFo>1l=B^ɼX{EyxWлW%J +T.C(IEz ɤW 䮎4+WD';rK,G,l\N ]W!{W4lWŰW^yi{=WuJ}W7ȯX2t-{K$X66&`-/IJ-ҝVX^E++={~u\|(ʶAdf -~<@N%JK+iT4X>Y`s7 XN,\,6I^v`ae4%X$XR˝-4XƑ {Un);K/Ak`aK {0X: "r7R, }K ``4XT2:BAYB WpX~O_e u"u&wS`yS>ڕ6j,_q(``92#X̭jE#L { Ih&X E@cS^X-6RQH[J=f㷃_qOZ8_0(W2JWOR*W0WzyYW:uSe+nU2opt,#WA``[KM)r:,DZ`qK>7%:`y0۹X,r",Wz'6s;_~7W_iC~~MWܶ~(M+gZ_O?&с 24|^+B NWu7B5ʇ{W+%yҗ^{_^Ԁ^mW̬^cWW2 \:ܕtWwkw] Ĺ0"]N +7 [+/+ ʍ"Bi ʸt`ٕ^+  2~PUtqw>3H ˠ+c#J. {Ћ \ 2ggR7J\.N X.Lq8Lr7ɕPB̤:PC72\ao`IM)X@+ +iwx+,v ފx+M[&' ųAn[Y p+tríPb=?JצX 0dk+ț['[qOK lV^l8L nKVn0[YYjn w `Y.܊zENnnEe$܊ʭ=rSo[nENmeZCVබvj+ʇ9NSR[')b+$6BCc+Kt ǍtS һX⎣b+ Ia+s# |wdQdF Q/2 vc WX\\P[)ggVFPelE[V1Vk^Ukm۵VHYF fŠ+:O+.֊L(c +_̘+7 {i;yg͕p8 bzJY\q)r h P8Mpecf $JW&K+W:/B JK+#G. ⊺- [\a?DK-[\qb!t+!W 2LM+k1WF3fsCҚ+0i9ʝ=$Wk\P̒\e+Ey3ɕ~d%P͕l puB)vZ@+oo,K&5Bp~iz+[yʸ[W*gWCAq؁ + V+\TFWu EW-uV]W]wueS] uTWTW,8]? DWH1JשGJdWF aW ˮ+]Ϥ® zvWvefk®A+Ǜ\bWȮg+ v]$Ĕ5Uo&{#)a$W6b9JדCtU?ʌ$l rdm^2˓Z$MWL^nzN^|WFU@^8@(]W`D7啥눼Re/++w- L D^#[+]&ba"ϨW2h܃rfE J' ҅W6M" 4hxY ^Nxe.C2u5㮌` 2KexxNx LUW^qV^YdYݕ5)w!q+$ͮT#îL+)v1UWN QWoJS]acWf+(t:1ѕ+1WF˪_$W/HȕJ"*KSqE6Qq7F\ͰWzGTWܣZq}X*Jc +cSs ɕ_ W܁+lb%z+W|rRn B⊩ۊ+WDWWWRa4#%ΑW#Je (o%2b+*r0ile[[a,jPV^*"t*Ҋ;",M.5ZZOrmP&P+}V.*>V2Z%GZ9?+ELNgZA%oj2tfij If+V$Vy$b2Znhj\b k+WVPZy7IoL<Z|Ik.VB]7B8yR+]'RX+O6LZ-˵VPa+7o4dLM#J#h+ͭ܊nVnECne s­B+ ? 6jh*xhTheWh@+ hEaYhԁVNV 2Ɯ9ǁV,kiEie׏8>hũ " ~aB+i]@+ρ3Uk~ênB+yZ5UH5cu1c 哚 q8+., .Hh"3V1͈r0V#Fjh0qVN![) Wb@gD$Yq2ڼH,KfUe&򆲲p(+Gb*+]_ʊld;W煗Y`KX}7J*;cc6V#Gf8 _ͬ.sR+vPo8+ì4+&6 2+Vʬt2vgTjGDVDVR8)dAȊABRDVu'e:R;㥔e I4|(&~M[+'H4ber %ߐX2X4+K}+ǙG 4r~DWRs"+B[+X12VL+DV0Ba2H߾Ɲ V >\+X+sZ=ehRJ{+}jVF`դ}|Yo}*dët)2jۃUTW_<V7n]&PWTQNؿz=X$cP2&nWޕ0qg0+Xq'c+21VfjcEbe=G̢Iber VxĊ#1ȕXҩ| +c8{!V5B(r|PD V V:TғK+<1VLcX陭 e +G< R-Y1&Z7ȍCYSMʶ|m5Y!V]beMbYI<@#+"\cŠ kmh̓LE' BǾiF~|Aǿm!?/=_}ԙ -c{eq*r 2&t9P-3GhٿlѲa4ZF D0Z~FDqDWhHZF'^GJ)!@fT@@BTda%μ" {,,UF:ˈT:L2~G|,3e,{D,3#e ge.R1q5#Pgrp8g26YͬY 2o傂5.ie)f,DU βg'1aV*0䔜,#ȳY9/2~oԗ,ډβ᫳DtJvCgٷ_pJ-5gOӨ?8 HMgr"2~ה2a~St\,x<;6e#S<ˮ2m~xyEc~ٙQ YƏs۞g)te,qg]DU>˸xW,M"#>hAtN*7bџ2ZEHeJh*K2IU*0h2SP-cTda9E-xˤZr^Zbpj ZF"Tx҂TdaoUC;{C10KQ-}| ҽ3Th6ՐZ\TiA`&2Z\ \JpP-cCP-c~H~j q%+Ys\X-c$V˘)TǏ2/!i~SjkZvKZC27gqT˸)1-?TXqj>kRX/kى)2aVJkEhqEs ehZzeO^Qӫb#eN2Qn5gk.X|$ eyIt2f1%b_v-jS+3j?Zz2:zz`Vs)e%bjY.MEɻe&5eNXZ '݁Xj7oVս2˝vM'2gea4e4+KY-2fP-s#wnM61neT)ekjMsZk_j|n~SK6T)&jYod2[E>9B;6wZQ-e߿P-CD aeq jd2U:X-=jtKUe|?+j=AP-3=*/)e

\ja9Th{>o2N8X-3fuqN1Vj@ZƻBjg'KQ-cBwȤZF>6TrnB0_e(eIZ!eA*Zv]H`&2:[Zƕ])e㻔ҳH-UoRAeeQZF^7GJiƻBDiY´|0-3[)LՁRZZiet&Qn)q7hqp앯@˜lXLˇ\8R>xl)e>YN]g,Xk93<˼,uxԑLeo=$Re|A4#'`Zz ҽ?L,L0-@iڷB$iLIHg 2ަ~]ea(#Qf66i9IiyyW Nje.j,P,[:V)NK8Yi!qZƕDʏH e_C4i'? yRHE:2.8iqJ\i9/i9?Bhq+"e%F2+E̵Ex$Z1hwRD6[D˘R 2g`"Zƒ SDo 2۹J!Zfi-̑"ZƍW9{Te.ԁ;i!Z\GhSo>-G2S/rHwszZf2U#ZeG -fZw j -se'y*<{G%`ShXBˈXGhq"BXY=`D,$Pe|t1ZfP^r&<{$M=F-=yK'AbphY ->1EZ?he-!H2qԌ-8Z\nhŨ|Z\VhmTZF|"D7hs-jY-cg!@˸=,c`x%Df gqU5@ x-#oZ3 R@-3a'@˸e1Q8+ٌZ1hY.gqDhq2k FhabJUYs^>Y8aWѻ|l Gg#2^S-Ji2G^hÉꃂ!2b5iQZ2´8i1U@eִrfEK3 2.z! Ť t'2L2-`Zf9LMɑrZFg<8-M6N˘N#B-\iefi9OiM9&P_q jW'4CemBNPKB-gt(-&tʴ~#RpJ-cZƍɑZ* Qj|H-gej1HVK)Z朢2y' 7ZK+TQja1TqՁ;LI-3|\I;\#:-ݷഘSҳ3btZHiôe r^ PZ .>;J)(-.ԁ)QZ2J@-i7\NOb(XeV`i++iAzzi%"u`2-WцôT2Gi9*M秕UZRő#oֹ̿RJp[v[Ez(-#e)U9q̓k jqCeِ Z+ P6B-ݼZ < &ҳ_Gj#RIOH-s60˓5nP-c"QTxzIWjZX8VjYdu Q[j&R8Bh8TR-Uj)=5Z΋Y*Te%R-s%T7u$aDVnҴDVn)AVNCDVLJY9^_2ieB BʙD;+WC;+;+aVHPd 2&\B8HV@[!J㍭2#9 [SK!b1AʐAgc+b`+PlWkZiBkVuTX;ЊgFhEhCh&1B+7HB+f [Vsi:p5V&V^vZIOʻ+tdE%cTZcVUVZ+SZFhe{&{en,jbp9+&XE* Ί5w:+Yٺgel8+ĜuVaHgRdVN  0+ͅ 2 QaVLjf̊KJ(_p0+B;YnVfZ`V%Y^YitCg%Y;+XSb~o wd@+PV2idFV/G@V.%=;b#+,O:iWa/R*&lڮb=]J8*󨮲w@W4*URJWm W|ͫ_.ʳY*f˫4ӼI(BRC*P|lŬryQ)␥}o”~c|9}Uv} 2џsbN`pʸ ce91Vƽ(e5V4VƊ"+7YzV#+adYd#+֍E*37RW!}M"ʬWIC*,ʫAXxwW.ԓ*, yJPWiB^eWW^Ţ~yD^ER^QJ/|,G,"G|8Uf;U׼ʹWY btɫʫFˍUDZ"<.ʕ7J8Y!*W/q'*5i\*3?\巇(\EI/JD4Ҕ;VcW9W!&V*mUBEV 6Nu-rG|;(QmVH*:6aUz=Wei*ݥ᪌K qU/oW|vUr[WhLTU*mQUnCUqVUUoUUTUpX==Xy9UDUFEe<*3>W?`HXx$j5f3uKe*3u.I*](r_k* TAUTe:'嬇+@JPvhUIUe$)UEvQUUӪJgMQUfQUPU,RUQUP-2b uL2/TTaFUUaUaRQ'j{M*PLml9*b$Vkӵ*SLKTɎ(mUc6PRlHG(k*<U!QVEUoUꪠ,ꪨ/XvbJ*{F ` l_X9J*UcamV>*Vm iP l,}rrUcXdžU̢V9VWW堒UWeQTpUTW$]EpUHVm2obJ* hXltx*,v6`a#+ d~ޮ Q]dTUUYr\=tUW5vU (U~pUZ*@ VzuYldU|ȪO˪۬!#4 ddQQKHT%y@*ª*T٨/ ;顪_u!/CTvÚUS\'BdYHerYXJaU\$UdUwdUjeUsU4,ʘŸlqUkWAu*AV@V9;UMPV"|iK*7m[eCVV(o[asV(VVϳ?J?(UfET}epM\/ʱ‫lX5*Yn\Ї x;*HڸsG\;*J*r1 V1:2oUfy1ZX-7\E[E!O\0q* [> [g6*#YWiZzCh2\-O- TA tY$GeŚcOʊ ۭr.-ԵҖ JeoZdaVЧB4JY9NxϛP#ҎJ#/0+ଠ3RǟY_SM(,寋D9|­D-'!iY!$:+Y^8 _g vVhg6^Kq?8Z#|Vb&|S+i :jA 61V#:סoK+5bя؊j+` d 7 \ipKWzvE=J7+CWKW+QbJ¥ͭRs+֩R[9u^ڊ{o<_ $[9dV|ȭJp+s Vc2xr"ZifZ+l*X+IV,|hkKVN51 ZqR{OZ!֊uiZ+VIkX?bmʖ4V2ZikE[kD[+Vcia Yr+ةr+} VV܎Vm*h+Ga?L 4J|oD EP[a䤶j+s/ b+bh`+-N[Q[[!xV.6bA(JW$Ʒb+C-Z ؊؊OVB(@lkV B[qV h+̧x$ꊿ>V]٥VjD(r_+V.P+d-ԊT ʮIh J+ͳ(4)\`JH+ҊJO+֊^ rC9Zi kňJ?b12!+b7O@H%X'ʌ׉BJVkx֊Ԋ#j4V cQ-A|ҥH+=DZ9wH+m7!|G)'x~ZZ? Xh𐗔r>LVHk;d\kő֊EPP+](JV˂Za7rBJ*mBTgV.) 4J;!H+Q({[Zq')"6%ByC+0H+CB*fK+)PZ!6rnibԋb 2|+IXܕ[neZwRwqW(wtWSwKvܕP)tIWƙHB0"@FE+^!0ȦWX"` bʬze!^B~W^]4M2^G"2Jk,%B\9{G$^Rz;A^1z 镇vڥW ^?^Ԑ^j̱WF;<( ]{3W(^ٲ_/ʸ&W̎D^S'޵YR&ۄ^z+̥WXo^WN+;UI+It:P+-a+4++4] "5>MP^ʍF+n* >NХW}^~+nZ*kMX:,b] {Wo nb+&CXGkrFCtm"JWKX@"2 uc+]B|Ixk1Wzr1l*_ŮP  ]q ٕdW|,%;?-Iw+]1^we<<[qWܥwb+bh+< W#X8bu򊠇 P"+L /)x=|WF]+8H]!%́+<2*W+rSD} 2L)@ܛK+I8cmz{WJ] tWtW]a+vWZ9pWguWuW][wŪL]IutWȈmw]O]iw2^a^7]]qʁ/EpW؇@v}=]]1ʮ]ve>u ⹄]afWv[1zHivJ$Nf5ͮx ڡ yͮP b\ٕ`W U]pXuqE+'tEJteg.+*vRVDQWƙs" VꊃZ+nQWڐjuwb+J+aQWuŌvՕDȮl[n+*]+%ˮ+%aW vlٕy@T ՕQWF9ԕU]^\uGՕ d@ve2xQWHґ]W]!-+l/ʨ%kعDh^+VTWmuE`KudVW6\]}F}1WMseV `suT+׈悺Jp+Hc%䫺R}JӨ+TWXiuEgAWr灮툮\'+ ”ѕ K+w+;ҪI̕w5W6+3T_%xxyO?x\_s޸͕(j.K9%\I3x,BrEbGr hj| xsŊ6WJhr_r$iK\\I V\aGW#+# >?+mvI. 0MsB\aCq%W>Jĕ}=;D\)Lȕ h\Q9?M G\OIEՑW]ŕ=%۫_QYJ\vqVЕK*XbVZ+YksU#͕r+c F!z+I kre"WLJ+ P+[QhoHB X&Rc\1Sp,Wp%p,MpR#\; p2u\P>[qZ9 ~p+6e+Ԋ+W.J'y"-qŝ>#J*lTz"Qww+ T $p˧Wo[ %6[v!r+ip+ͭ܊Gs+dcʭWڕ܊r+[y5[Y ܊uCr+Vf<ʇ) 32\^WX\l"ފox+wWފ{ioe"ͭ،V8>n{SnV,.[yNVгI2$j+tr+=("mZ-TmZ[[yPYl+݉܇> 7JqesVDZTjV]Ri9V- J+H+){ Lik$&rhK+V{(%hi+IniVVieܧ)v aC~@Zk3㬌!qVzf"Q? r4rgdžV# +8+8+リH ge čr_8++N1dZ9TfL8YYqDqV,UY8+{8+g$@+mc\ H+.´gV)h\H+$2`H+= GZq?EZy0V>rIgieQ*F^H+IrT9OK+MMGZV B2s⬸uY;iEU:ᬰ8+P:+j8+rG0+SJǠpVY1A_f8+WB+ B#,[ ЊeA+hE@h%WeE[eE0PeE\YeEc_eŞWess|._eŰJ׫r=PQVPUKSCYWUY!ExQVLZYat $Fϑp++4j; VX9XRb倱"a6Vxhhi'X1?d dE& wr]1VھXh߿ʴ ~2W75V(RmdŇʊ " oUVQYq򾴤(+FcCY(+R \gYg 2+ }ˬޔY!cQfEIfEIfe%J eETeeO颬lQYi'ʊͬ-ʬl/ J dVȴc|O*Rc؈TV,JFFDV+l%#n@B`ei\9CQ|R;\7 X$Uxm_Hr+b M+R~V`e X9T`XE4Xeut"ا‰PV9UKeKYҶe. if-؏r|jY@cSV!ײ{+Xe@V1QeU(1VVm[ iiUVZ%nJhg7 r*w-U>*^:d=-в*>UF/ku}ȑo$RV^VaUZq 2ӡUZV ZꬦU;a,mK[Z@ljki6>*>U䈫Wa;U7'*$#`6RU? J7/u*>uO8,@CmXB8YV!߶ !U qI-qmEBVq>pDl]\$*N1dU $\VEjVdY1}\5 nTU9UiPs DUf_U:| {U\7t VTKw&E*>CUUDUUDDU{P@ Bh*ΩUq28TU%2(VU!WU5NU;-TsUUjUUkU>G*+"TUe_UELVŧJ,X@BTrI`U6mVٌ]v\&S& eFoe,(nm&>Jdgj',HoJ:rS2r~g,}+{xkm\|]}>+ey}T0e߮6s̲Ż̲:4Y~O螴, R2>B`}G̲ܽf3S0ʿfM V>7ՂYf8G3QuGP2˼{CZ̲OYx5wFoy]lu<E^4D,=e^݈%ET^>ijy"YQdjudgKjedGfG]Y0Yg=y~ց\?n, dQ0ôYf#⮌=,fYQț|]?Y/2c=@1DM,B̲~߂Y.|e.9SGMe|YfK:e>;>[=]z`506C f][aܛ@,YC¼Zj'}f\y%ͲҢYFRV~YF].9τh0&e,A?+|$2+j1#ʲz9__N,u5o5Va}BlkeŽAeGi~t,kT.KV?XT,} !,<(ex[f\a^'P˕|;2#$2*|+eցJe#{-i,sTWeQYH>ThFedbYYYVS,\,:gE(e>?)[c\y)e74 sS.l~5Ў2oa<|rYN`,sDw.Me`Ջ26 Y]kje fG-ua3RMH1 ˧E?eU 2w؝YjtQߧ`qY0х fOœЂYf;e0.|Tje 2# =cʲYԪsUʲ -SɲjuRaYg˲.3p~P3Zz/O,snSO,&es#T~< ,ן_pYl`Y~?*Y(ef=4hFμ栗u2H,Y `-W5뛙BheeufeQ}o?R#՝h2m9}^46l3U{oe6|G |DYfYf Q6lfo62ʅw NY~?)efm‘OE,~f[h9 rT efYfY >i9Js'u0^2ǕiOEW /m|w2dYsS wVþg߇'A|9c#YYY<˼ʳ~sR+gx=Ձˮ1Dgм[8\pyêժlx2; gYGxp9t[6>:zf]@ E,#,3pbņfY&>YkVfwT0Y&h}<aW4Ce,ф,n2X%*"ThY漱YpGd-ցOpPYLIqy,5_eee}.2o-I|~c, fA)0p ̲&!o<զkuܕ8bB\Cl˲}Dbp2{x$e],3@ W0! f?-e,g2-,+AY:02_ ̲ 1ԍe&aeޚoft]y*\ފ]G2MnYf#ը?ْe f=I(qilSj',s5myl*AZ,K<.6ˌo˟\,c 6$2coD;kY*M@(eRg?;920s,suxWų8>Kó,YYx&ߕg7~媦} .OV:|62Uo?,s Y}kt%e#JZY/X:mY5xZϪ9 ,eϾ,BϲL@gY'gYfSídRXZf\ee/@K+su%O-aR6Zf@=RRx/- el=s.r@i8wZhYV" PBzKhgb_;ܔ2;Jۋ2JC2S3q *B3Rjkhi$Z!)bW[w.h'˷.#P^hWDE)e&JJ-*e>9*7D|͞#gk&-WDKw-=hL^6h#UhPD>+AIѲ"Z;XN-EؒB-= Ѳ|Z:2yoݶUB>k*Y^i:;*8DK@x6JX޻!ZPi!Z:0DKF 0_I RShg D˼,!Z:W ;l^)1ZUQFK-NN٪a$Q {ç2W 1NhF??YBP-s\_ZzVвZ0BKͅ!9R3h1 2p%a ˓ $D&)e^As7ˑ3jJK!~}ծKi^e*- yFCKivL)--my]M ґfC2?+?yaZj%,]S8"/0-_:0t*"N˼9̞ rGP={q% j7gxZL^j9eLEH-5dmJ-Zl2#be/#V ,C@-P*P Yj7Zi &ᴌّxNR˹X~H ) -t(e^ۼh\-$25'hFK20ZU-0/H2ꋃ{.I!-E*AZW9QHThyͷqW"d -*eVi$u'Dҩ= -c3wAZKu iOgEZ),]dVi/#TirhY4Z: eKjVmPHl i)J'rfNeeyZi6bZzNid3R82tۨIVЧrZ_Yd~e4k(e6AC@-7H-+VTifΩi8-݀RT2o8TбH!RQH3xYNi-ʸô{&禘Ni"5 ooS'_t (y5#YiÁkuZP9$BTqȖ9ЉmSdK篮jJ-C+Ȗeenٲ2ߔU]''㛲* [ l|d,DSd˼~'l(1d2 2bu lÇX+C>J-U(#UYuDky-k{9>-ٲƛs*Vm&etfKa#?PȖC,wLȖe_d˼v6l1S[fSd|_&uUۗ3Zds?6kIQ9e>=j1‹ײGS^Kg]Z/`\[t2r! lZ(9,\`2 7`_Ѭ!nk>dM9< \M=Շ0Y}d[SOu|3FkZiRgHNjYOB{:P-URBY-rȝ$+eY-Kt:V\6RV˼+{ &2W){ʩjĞr#-VҭjYwZ:Zf# \_ZFN$k}cXZ*W9GRYŒ8^]yM*>0DZft=kߧ'<,Gռ`ĖUb̍2[^Cqզ5Y-D>b#Iq?-U)b()3-OԖ%]jK䢶,-K(jGmܑoʫMaר- ڲ?2MH}ٲLcN~[͖eKb`ID2[yL̖%S=1[#G%=tETWNٲ=S-s1(O-G-emO`O"_e^΅̧C}xЖR`mgk@[NesB[: emIՇE,ԖuGmO#GRbu>1GjveQ[YV#ʛ~E_pdC,-S,flg &5ViekGȍeVigC,ҹs-K>d? _d%Ol?1kSs1JWhX҉+Yore'V=Bܟd\y=%W6h\9Vs\q͕"P* u+ ]O@i9+\Rѕ1b+Ί0ZotLzTWhuHGJ"+huTBAeW6kb`+75+m]a승+7ٕJR n+V讘'r0W)\+G ft`,[O PSq"BWؗVJ,rS$Rkͯ+޽+ $d,6XUJx,{yXnG D _G_rVЛ_>~ES~%ͯ\JW$+ہYB8J WFMl9sc)}y@@WFEn>;,-EiO+%15ʞXŏ 2Ը|e U#X2̎( W|ť_`P  WWV'瓶2֦9|::앱qma,{e|+FuWWN$+_׻|+sW^} +dhrҿ;٪89W_QD_1G}eD_i+U˯Hɯ0<_IJ-WA_OMk勯-XA`9}XВXTZ`!\1߀`yyIG,P,vmP H z2OS,,:4_`!eh XH|m?j!Xv`1 O u$ *J`y. <=X `E`C ks XJQr`a9v!XIp  * FXQ@X52u`,,Y!X捜!Xske`0` !XFR˸@/LV,B},K7z2buqXhEIJbXXN`"A8`! V}&X~,51Xz|e r,z\ r+y8a ,.Ӊ@XD`i6ˍ'BK ,*,8@V,kyE egÃ&Xu,lj^'BK+,W*,OhJZaqaH ˤ%@ZaUa9^@(,oU, 2,>`Xai euׁað\[@ CTay1 ,6r~K²g k> ,7fFXF8 K`;IHo@T˨l, ,+ Y3-8Q`qk V,k}P.bKr`qYH!`#ps, dqkօ`dWX>'_5*&XnKegG 7 hEe wXRkEK,~N˂z C`kS`9ˆ(Ⲁs*` `iCecg\``mIE ׄ`q&n,E``lU; `>|x1lɹ` fsso_q)M ݵzIN$Oŵ'CtΩ;^ЈW{2f[ܔ+P+ +!.`1[+ rS%B%b3 GC1@m,A۔"Eb-,ˈ+}`󂩯W_aE__|kW; |'¯07b7̀_ќ_LE~a_y~ekr靄_v.ʶcÄ_Y3WVrQϣb͖${[{%̦W^Aoz˶+V=Coz W| WL^ym^13[{W,,^9b8^_0{LFHa+&,=L dW^&J E_S}w+m4+b@+%mwEKwESwܸvWX]! ["YCreWaW2eWna]Q>]y+z+n+l#ڴٕotvYؕM)J?;dWFY5ƔͮȮ|/+7ˮ,aW˔]٫ܨuOՕD[]CqXRU1W]kugʙ3ՕfXUWl++l DTW(hu0KlrŠX\ 4b]ɾ;m报9mP M, vW+\+vAeihB߁zJ +l !7is1\is6W|jh,Jc#AWL(mt]q 2sP]]Q ]9oܙreLrg2Sc`v%i͕`4rB@Bɕ& mlD5W(\ø\O\++R++)iuE uc+b(7 ꊱW^ dlxE Qx2^^^91WRػhl__6 Ȯ\6Qmv ,Z]ٶ4?Օ-tū+9à+ bl ![=B͕ Is%ᑘ+f϶r}mbl4W:k\1|m1M碹@JS@++ KT+B+}yW U4[p%Y4,pŜW4WӇ"S|g?JPNq>El#ڂ!ZLkX+Z+JVvVȔnkŇ g1JH+oB+ hNYC+*B+ [U. <!cR8Gh% PgEUg}Cy s7ⲦԊ"J*R+V^nB`)I=@8rijYjerjɖh+؊C[Kc髸>ح<$ E: `+/; OX1\aёAXa =TX(0RX b9 ͫ\ ɗͫ\~x Ob遼T*օɫWiw_WxWA WN^#ūX -Wğ x@*▓*W7BcWquxvDAWWW9*''^?xˣU"WyTnUHn^O %*'*6i\I> pBy Dba#1yUZWyiIP y6o]gy` 1RW]疺JU,*8t2jXJV_e U|W~}p*o =Uë,^Ey**.S|V%Wqf}D}'*"7+s C8XD`}rVF(tU19BCDXi ae>ewVj`m`VzvCYFwVF[.l#aЊE+f6"E2MX!5Or+b~ J`qJcVX|-teJpϋ82+ՁSF&z*Xx **/ ]ԿEWfT]ݟZWnTWNtBBRs*=]e{ U(h]ym*7c%tsZWGW1D*\UT6m \Ux0VVAU}(h41VmM*/*lyMt4¢p*6ZqU <̇J0Lm]UZi޶*H[GW)\E\\, PJUDUd"U𡫴 2Y|Ҧ⣼J*=ʃb.+JXG{X 9 `msVQߪi] 6%3 MI+{n`eDԊX9cWagWAW9u}.U'`be<8G%׿BR@a͕+-HZ - +?E +ۦ#^ +PnW}oWAzWqԼ Sy6w«dc]Fn*r*WZlLUUFuM~V*@U;'**c _eW1b=4%VƠׄXi&x/+%V>[+nFQĊa B_bp YbMK+$8/B "+ 㩬P(+(+LJeEYaSeo{D c_ T%VܝDbŇby[]+HJ+{ VƐ8BsIWXyO LÆ+7žM~b K$4.JVhaJFWa%V Š@ q-?n + V4QXaBN +Z+ -!+Š9+<&+$Bz +Ņ, 2V*B+hTBIVPZqoKVFMoX":+MԥXff%̑Ɗwny22VƊ%+V-kYhc80V#XH<نTeegwm0UV0WYqkD T ŭG7b1~2+ʬH%2Z( , eV,“YafVʊ5(+8f'@VLYa9 b J=dJ4(+*+k|N 3fELfŚ0eVޛFf dVYI|++ʊUYɕAY̰VVFNEʷYd-nEVvooj6LU2EYll \= +"+ |vh"{2YqB7YёYHQf !+R=2+DnYYZ!"Њ߁V((KhC 5Zhʩlb~H+4">m(i4kV^lEMl% UMX#"[iFJ+OZ!MV*7.VJeeF%T~#+S/p2FV7rW>Jd%FVLY!  +d¶:+O:+LjYa:e"˥rP bBY醇b)ʊYʊc*+,7B# 6{ @Vkr|TeCYeha "rZS"i#A"NX VTؔVoVVhΊu\)%WrVأjAYqʢbճ 8Y YiۿydVdV,gYq-bI>J0+'50+V׶ƒ_e_ZI06YigpC+۝ [52GC+3VcȠ鬜_&SgFV.O:Њ P VޗvBnbC+l'b B+m RC+vB+Vw6†, {fUB+O 9a@+*pB+`VGiZI0 VH5hiV>hJ+J-ThhUh 5VhMiqH+ՑVOFZVaVzF, [4"\(LVJhiEBh6!)gؐΊ}J8+:+DY/Yْ vV58+R:+T ʡvw=r)έRX|g.H셱0v-e;qaǚ1ru7bɪs~G "rV` pVzs-_ΊJqV1rVV]JmHfEvV8+ᳳB3 DP+\Vb56icVjUVTv$MZNk:X+GD`$2ȝ[!le`[iB3 ;`+[9/[1#lS6 "ڊ2me5BȗAJђUY_xi+ҧ%l嘍"Bb+J%؊V_*/$ RJX[AD[V[m뉶k+hm m.miImdebz+9R/{+x+ୀ\{+ey1&Vx[J{+(gx+Z2oiƈ[[Q=s+Kp+SQRG m rp+ M5 [-D[QiҗAڊvxZmrU3 V*¤ p+t'FV[M\Q+̉\2Е7Hj6&@WTb40Ac-tԕNϊ+20%X])YJ +F]a uE%tK5+# u[5+0VW2ʒ*ͮFE+x]Qٕ2=ؕor)auEEPs+OR]av[P u˙]O]VWTJ]a tEq_{al6Еziѕu0$Еi̘ԕmɰ`nQWH=F]iB:ؕNQ&+c/]!6슊 ® ]ɢ+W@4"hB$1 M+g˙\!ur"W WY2*C@\L$ Q{+!WHA\m=\h +Q5Bƅ̕e+zҷB~Rc0W\q[ЕUAWQ5RB| ` ^!$h ^!xnxE+Ԉk^c,x*Wiه-+ɀ]]GnweV EqWDD`w׸+A +L+]e _ʓ!J;@^1#yLWb~%w2C^њ> r+}}^7mC\C9+ 5C^11"y8f-NJB+UBBԱW:%be|_Al"z e Ww JH_I| {mR̪w"7-MT^,,E2X9w@ ,DxX4eopSeᯐWz++_!BEɍ / ?9+Rį6 + Y¯pBF3+hV++Ԓ_ $}9}c+38+0)WbnM5,:4PjQAXbEX$9\+ `Ri ,)BX}.B? -pA`=I`RI(8ڧRpb$ȏNB+26(NJ}II_7+2GBcmY_"JOJz+9VفȚ]SV\ͤ}f-Z;W&{YWN{!y"{EΦWB H+qЊft^^!{Ee{E7.+e _3_RJ^!{ z+|o 1zWcS_YPD|uރ$by#̺@!hTʼRs^8a,9+ {iYHWEU2w+s=vW&$rW{ர,B(xem^QʨܕN,M^Al+1?GXJ. HxwEx+eȋTd]Q4ٕ^8ݜ]kwe~]:}ܕ"wKKJLJ]Bn&a`W( sрB*ԕ#$ڠ+$уpY])m@W,jtț1TNЕs!2B1W8]+dPpBʾ +a+LjU]q!VDAW  'u%\bB ++\^PWMDigs%0\BJ|rcd\ᢀ%\IsE%lEc(s"s_muژ++H+ q+L=`ijsg̕~}.:2W1WF+z&\ 2yK\Ip<3ʜ( Yx+|ĭtv(Uoe '`&VNLy+s[Y{V/[)b pK 0V +Zm4R ŠbW O+epRVF+-#ԥ& B2R[RJnnJjv'rEU^LE)r%0Mp\%ʼa$b/rE!`&W\JN![ qW++zR h p8EK+[\ѵCފWJVzd[N*-D mEu^cθ⡭(m%C}V[)jEPVM9 V+\RZiGE,,ZQk+*nmEVG[)lm +c ` k(`+V<ile lWYD>V VU(lR`+ !L>fTJj+<3±VFMPDmEu92jm3YZVEDGj+k2jm,Oj+D&[[mi\`+=ؐE(MY[(VŸ\ь/P}V(+n"nq+qC^/p+[QMq+1.%!nLZo܊q+ií(JfV$U[B=s+] 0hmEVJeLAHrVF[!ڊV" YKh+h0m_2k #qIS$}Jj+J*d:Wk+H[i)meVTo%f[%oE+S?<͕x2Õ 84抅 +ds\ATR3y-sXf+@P!W!Y+3h⊪Z\+\Pp\WWȘ\4J$%Xj DB!Ua`Ip*+iWWjB$ !o%gX̭B$JsVTqV&pVd[.p+ >WX[!ڊ9WA[Y‰B9n!3 P/ķ7іVT.lV" ) \^؊nnVyX[Q0؊vVńuDž{ u݌"l%#U[фą -V[2kr- ZL]V%ScVeVG%QV ZiT$vkEiX+H4X+l䓚 ֊ʻCzHZni iEVpqV2.*)~$0.P+~X+Bc ʔAV" h+FF4V( 4{!peB0O+"N!|t+sFX\+"P0q$qpWHE\!qeuR\A@\#$qV\Yʨ݈+^5=* M+߰iԕKUW\lu%R4K+|]1#vE fWN*aW|H]!ʢJư+˪GԕJ]VRWtآ+ ]A{]<+T7ԙkՕn͟9u׆]1w vVE]1!w,yY#+|R+|o+}3R|ށ]M!vePB*T xm+(veD£ e R+]'~DJîdEW4슾 +xĮp:avE fWq+ZhRYF&~]Q…e1hfW/抲l𔇹"1Wdbh5*\Qb̕X[SF9Ueː'ݑ7yT-gsE0WLbPpsE6W%5E:)$tJ׷Ir%s6K\Q ]Y\bɠL DK'J'{ȕ^"W",Re$Rd\ Y +hDe\1!rE3s\DWQЕ^+=13 +̛8+$( Z-~i A+ʕbsJF N+"+%u+aL.++އFW!&BWp @W]tEK FWw^,dK+Y]Fwx ԕ)9+5 $rEOU\qE%ʋ \^\GȣA & J;ʾ}Z*J5 Vx+ 5J'C 7\92+ x pES}W@5£#ʠҟVFqH(s+=í("nn tq+ՖVLY[X[WnPnEh+gWE9IՅ[VL42o:^ɭ2y9q+7V#i+4VРV%E2os+UneYkne4Vx[aq"ɕ)L& W?&T5ڐ+ӤSYJ'rEv&WcrȕK`e% N k+hp{-3:L|*, }R d0{+L[1"VF1VK[\[Y ooa\AJGK+ |7ʷ,dyD bj\Ld^ռҒyU@Z \aGJ|+<"po2-Bl$0Bĕ|\A\a~^J\SH\Fq@re˼S+}쿟ϳd{icܾ/9_P_͇~sJRCxT><}w{վ6w%6DŎƌ_b>ݛoݾyݧ)bV|"NgGrl_|o?rS؄gS"hySOoʱ^O>bM)l!m\M< %7ooɍ*Z~Iry[7w|XJ/L7u 5s<|1q}.7u]6(rcD$7 S8.7&Q]ou75:,ߪƣ@K\cF5;𬡮̔#-CW^4^4[pzF+ŽفG/5hүeˣvs.5b܋Ƚhi@CD/44DEKKC^-/A?8L8豆X'a9I&h:Zmuih-/O-Ǯ*FKSqk^ӾzEjt-^52XmZ"/+>U?{I?!-.V[Wz)q?#r:F78$[Nm_sgp~ȭc΁S6:/{-=<@-rWm9:BNk>^s.5tk˹ױbt5r}oi)Ȑ XqS|[]>?UvG&y ' 4s3פO<ymyd4y)kw:"ʚs|ǿ I[GL\=Wܝ}17 6r{\,Ï_0^N~e {pyt ޴~6n[SPt9+}u|}7w3XIr}23+ dxPN M3cڰendstream endobj 494 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4551 >> stream xyTS׾O -B&jU[XqFdQP$L!$)LI$DYk- +^[{Wkwܵ Zu_o-9;wo?fa %0_;wZ̠ޜE܅ jҙ` \tqf/`|̉t Vp %;-!.>c6,^ד%^1 q%2c)~O/Zd&8awa|=}dEg?[AQkIaK6m} [|w1?lbcAal9!0/lۍa{}~v,l+ yb 07掽Bl<06s9a1CRi9K5;fNV)< p%>A=f g?34.˧.?syssͽkμy_//߶`قMju0za?\KFec@Љi\egZ<\TZa*[YR dl&;#`)>HC+$ٕ*uydzM2O.fVQR ؁?Pq qQ /RxŒ)*t5Hc\eJPKbhҗjq*rB :J2ZgLhhѺ|A@.X#z# 'GW+rئL`Hr|E4xվF6@ˠ´cT0 Ι,uD.@jՄF-`s]c4@G! á/\dk/o3`{=D}/ ]<˱%oxQNB""#$S//":=+Qyf1|9t]?JjCPf|N-MbPew:J_5~ɶ l{3¿D7m7W 2 a7 S7ov~W|]-@,cUiޓuS3ݒ$LmhNO"B~ 8.7ٸp:ƣIfB2$UQ qۙbFY1K+4h2XOe%wrk 8Y@6}*Z h6IȈkjoMA^Gmlttn>=ẋo9+XFs*>@E!_wX8ΛBźKUg1 kY[ *AↇOD-)Wr&*PMnOO!2F U`H*mvĪaB]5JA a5^8>8Z哝{4F(%DZ Qꇮs+-=#) xEe`z)ަ-S˅i<;GW!m̭fi1W 3:.5m:=Gb-h*-8_QTYR\YU} =GK  PA*?Q;6굛/'{&\ee)9l~/A) zMǸxd7߁X*v:}kڬ̪ؒbqx-^Ǡ4 heΕ* ԊpXʯelp\o-[kbB:Hi R 'K2d4@ddJ؞o \vK{&#ۡs+yI-k]qX_MW2[,,7vK}msBDq()ǫ!lou{XkmIa"KS|Z~\>G( Mh.ZRʏvFh)z}ldԝ !g T*R!*·N]>qKkZQPd P ,rϲWHz&3|,V? ڷs[ӊ.k#7qkFʚ%3\ 6w1f2|QflʻlMP`#z8 34?$':IP&-cVoMpc퓣m= JINuj.H'O6vENLF>%$ ei= /G'SiƷ!BW/膫^f J%~,ϔ kUvSVX[5|Qtugd=)6XWUC /RRboD7"ސʯ›0ta%+J> stream x]O10 D'BVUAp$6|/`,@L4[ub,,O\r)x@w|UWBi )(ItUwXI%0`ѴxO%ES&qMsT2Χ_nSendstream endobj 496 0 obj << /Filter /FlateDecode /Length 6029 >> stream x\IsHv/CAOFnH cfC(Q $Ȫߒ KTu([$~Ju]~Jѯn/_ʶjwQ׍Ηq7Bի(mu-t\X|^UeWU[vϫwb.ږJ?ڲLnmxY~S[qF5uS<~6&m)n{췡km_VVZ0rj m*eMIc"'OaQ/|Ĵ^+ kʦ$N`*2!]+ j]ҼO#u7id#(=΀Eba8%׳3.JXj_hs+v{+mSSho n,dO{iqGhXd9_3oG/iLHpoƓyD_"yEFh %kBJG"!]x꼖u>2F^m⸾I+uHUJsBn>رn 7tUzG5sbO3 )8]}.h0iOǢCTD8j1E3hMߕG=,{D}l O{68c䛦 ^kн@51^NdB"V .9 =)2Ӵ)Hz*3hhp$֠P:Ekjۃ-t)HmlQpHZ*] Zͪ1}}n|v]kKW뺵e |@Fm~CݨjCҞ4f;I8ͶIR_'eFNˍ)B0/] ̅.NB >F\CuqZI9y`Î OSKyA/Ys #j*{m,y#X  >ES8k #(vwgg]trKMgj!e#^"ʭ  wu]PN+j!a <f;#n=&| X9Š`-?ͭ"nm27(;*\dllćF_HD{V^GH,:>Wa/K\7wȞ%6 gͤ(@dr]dhZպqU]|fi i-E hhPԉP&Ro ޏ$¨rڻ#OmaGPJt3rnP3+ԚiU>.]jrIʎjύH$^ڍq#@N> G uˎp~q!$xi5h ӱW]_DWD?-[p.p $R/=1V [n8*DE&jm|'HhJn5g VqW JO?| :ázB(hriՎrV!!ñ]-x%+`0d/W ƶݦ>ߑ] L޿~?1uvOsd0!>69Lp/uBX;7DbO3v#we8 4.>%Q9w"l[4ƽ ׀ɳ&\C!Z,,0a%St(^$=V {BCa R>s[N;8 y0ۥy;.$Zс39L؄v*IC5rII q:$AnY/ IfGHMUx# 1aŢ49!BdS:nۢH%b z߅GCi'gSQYqE &E8 8:Z6Py^x3ƌPfJIF|cI !5Uڹ @%,+N#/`.a9rG.Ȏ>P0PA(~{\#sPKPF >>gُp\ QhA:s m(s($78W4zb͸̍0Ͷym3䇐tA܁Ө4z2ـK/{DQ;T $̵EA0bLTuA6>E ӣa,5_G,]ed8 3ݒpA/OG7yi1deQ$c)`#F{u ЗԺg^^'6O! a3ytiii 3ꌓ3K=BFWbԓ (O"wIղo N6)$h,6*dv/,<0ltSSRg)9sr.->񏋡(yhPqm;[GpYMZβDLD,gXHtnZyr_fw\y杒,K1t&T#܅m I0Y7G뵈!h)qsÈ] &al7QP$pTMS%Kt IN]Mn> cФd1OBOߗ 짘2XRjЉ(KZ<@J-p:cZ=vhtkTJbOsEoP3KȰc\ ,́nMqE9T.`B ?^8 ot&RxK3ꌣ$*ox*OqO3ŋi|0t {t3:ÁBp>&ŨB+xsc).$y" ,2rY1K S#QB/UEw-ZWK]\JJ7)Sy[uۣ)5vacO3sw25V?w;4r1AOUi3V9% kPemmc VIceh H_Dg݅lS6Iޅa1I! v?/dl)5M%m` |Va- ]ZHC̥9MS™sؼmg^ %4b&]*%O s0WY\X(ͥk1A?dz}4[/81Fw|gD;ϡeUyytR9ݲ5yPytbp5"Ș<0O <}KV#MQ&g\1%C0 ;!f|-kCRXCGQOև$΅'8#XPEG:ʞ'Y0h߄C)RCEء)M'Rϰ- M4]ߡAi D:b1k֬RCcCcopeG m}su-%O ɨ<|; [0i%&І͔[LZ 2G;s.ˢiը-0e"g&.]tȚ"pv``a2#,5-r>TESrVcx.d06bEޝҌؿj9 Å(o|E.>̭セqKb㳌Hryi;a,TD.0L\yodS(AWp9B Uق\bn5\T T2-Ve݅^^ (Tv?̈́pLnXs5"=R( QJl"1{ڧ^>T$FXx9VyѢAtG lg^"\&dt=߾Lė!BYt *ǁ=pFj\.en nfCVw#p%fZN_* sh̸k:t%( &ʞ1zϖFFQkFFpyw !oIť#g" Yڏ4f?[/ccz0n'V@`M+0, -$b&@V4${\(:Yhs1Kn˿9-rVoy8 = E6剙gFg-v61Ӕ&u;"Xh~HC0ۘM[Op.PiG3N>:-2ʹK*G{u+YivRo| " ewYC֘ iyA7DIXm_ sIx:rγGթj|#gn=1 u/^"bY7()Ts4}/dim+W:3}|-9V5N⋌%hw+x3MNicfDb&X.+*yT9 b% _N/2@(ІoJ4%($dn,pk>+/h%sy÷L(>4E> "$pwF$.Y2s{~ϡUݾZuſxx:i@sq]#QF$! _e^p,(bpaMد,%v!U`3."yͿ,!|sx~KILFX%=YpZ*T ..D@ >$13KnrɥׇT뎇^Ɍqt?k NI]^b9;Xi9儞 _𢪤.s2{;gOZEN b$CTUbo!l!̾dndq`x\'ϏPY߈,k >^bvw+L::BЏיUaÀ՛6\GڜB}pxA!BrM"(#w•Xz^5(XQ#swf4o"_*}0j7~$ DZ\]xpbngŨ!Wb4fj8$דfL,B5STija?yK/ٲi-q,\ \# {TPy%h3emvV>Z2bZZ זb\-csU4o`XqP8X>2R+CэTlyC @U6P7ը\:KH;Mj\{tҰ_M܏?j\w<WbTCС{_xpWOഄz1 k$gSAKsA+]tb*R 7 vO>zjן endstream endobj 497 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 382 >> stream xcd`ab`dd N+64uIf!CO/VY~'ٴۭ |=<<<,%={3#cxJs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k꡸a``` b`0f`bdd?Ӿ ʾг*URmf]@{8-l_bOVAn7Ʃ33u/<6NJ?DjC^];ǂXOy҅?|\v s=g7y'Ly7}:Me`ے}endstream endobj 498 0 obj << /Filter /FlateDecode /Length 2529 >> stream xZK*2 =(RZɖ6QUV9`Z!cqȿ=t04P˵\L k_&z~"ib5}x:ˠ EӷfJmzAjr~I +og/rl9_&hY]}Vz6WJ\'\^0qӿDIgU:0f1cR2OV0目>Z ˖+wZz*mt\dq^VˣPO>簡qDCf9y53* q}j-ʯ"&xnzh mm dP&l/` zmn: v&-|}S E!sgYݨ۝h(4S#w{4/܁:h`E7IQ8"@ Q%."J"A%%kXHcX4GA8ph0T!5fUo`% ʇ~q.uN}2^$\}ov{bYnGr\03; $0&9bC\P C TnWɶX}X`#BYq؛)+i1aLȳdr K #vA6˿ԑAcdo@= C[;t\tA*.-X:F ȿ)͜*2 t̹lD.bA*j,.!g|h$#5>&#Kw]._d^BkV^Ȝ$Z-ja) 2* K+kby7DT]tuo&iD$3?4J4,h/;h@Xk$6 ( (aO|VZ#%f]h7eގ5!Y0(FUM0~a|?PEͳNW'bNӳ>HJS,!Ȉv7d5цٶM!4:]7U@baYq:&aDQ&X6딊5Df[h}t3뼻waRęA#~d Q]Zn !rMiojI:vu}9}C& f[<۔O ,Awx0.g+?ゆǯr=,g䶈K+U{@fǯ $$'%PeJ(/ #R?. #d{]$e^Q ,{O" >n2-*l\q\s#jI;Rq=Fr/1qy+//'mS%dWdlLsbw1;7$a>'a9/t;Z\ǜ }?s-Ԧt3oE`wiAghU> stream xZs翐fo96&[It,fv ɖP Bvտ@嶣oigt_٧uOfb5;M(xosCZr9_mf꟰3Yeaz\.,\ܒz_cg[(狥"sN^ 2krC34(Ce],9̒uSZi1V5zTj59\J'/o7)ɛI^u!Tf|)E&W29@ "XZ^ Eqf:\ii9 ;7=P")?f80.(<%Yf曙*t`={sYPcʠpt͝OH %+RM 0);J6Ɇuդqh ;$tuN;d r[7E2h.@X,DGGVh$hd2Ŕb 3),V]K`P=Z(D"S #rĝ"'s`B>Ui2ҀC`꼆eY=_Wn+\T)x/RTXG\rcA_Np^vSUb6Np͏3 ]j#0EPƕqwNHyM=bM#eȃ%{qKn])W"vti)_*r}8 IA)Y?]b] {,`R bm[cNTr l ^Uˇ!)iQI:.7e3h  c] yہ㈈%AɘXxL3a~5bf P{Lmn5@[ծ;\!dê[,pa*.oHc?Lm=RgJ[G$& 9,ٱeJ~D7>;IH302ڈ{Mb$t} @D @4Ŝ]w>XhFdZn #w.}׌cL`ePL$i*ߛ6T6sMϝcUyǐ*F@L$LR t46uDR2hE}&I1Ul-4fe}>T8Ay2N8OH3ZwRUVP9i1h.d TImޫpM4;s~dt]!pD .wMeȫ>U4P3yFh'Vfh.p!qI AN\RPE9'OFNT NL0r|8ACLBh]M:-w \ү*xN% \i->OZp($9[JƵDe۪gwaRnDZaZO Hϡ@ti0K/.xK~F(X@ 1!\C Eņ|X#!fv?G`CGrlX!YF29N2LfR2V|ŠpedV&S A4C|q,F%z|9IGDre#^v6} qS]HvK;}AQW~L ,S4qL+׀׈:@^Ɔ˴oo N])Ʋ >?z8J_<^G [c$z(@}|rzجEf09nu>0!ȗm&>g'tNŮ&_.rh'Ov$&n˨@Svo&|Lχ&m> C6#P9N['SyFY]q$gT#*$&y5ؖ`6l0~Ge^colwIfSI<F7PR,/$^;PoeΟ5߂,͹Y2V} sr^G|(6eo5f ]0q+1xڐQ)wHAR?B}G8J-@FxN#ϛNq'nvL:2k@GPhIx9-K(^ >+̭XXFV6E'U2N@AOgyP8f{|D8Mvu1ƕtPk^twBi\~Sӄ^"WzIᕣ<+ TwPDGˏ^KE]6٠y[.4}|k K y[La-î`GelC y&]FCF0:SBivjRʽNMB6g{(vK@#|>T// rjr-rN!E ӯendstream endobj 500 0 obj << /Filter /FlateDecode /Length 1924 >> stream xYKFr/6#B(D`~z~Zʑ Ðb﫮.<%N W n&?O|W6OS&$JLSbtANƄcB0<ĠrFh.qaB~׻m썦Z +`UQ06Q"0!>{dMvz 8Jc%4L!D_'ac : BTSެ"WE&i>ҽ|_O6v% |n'X@ڍg6Ea<@JRkq1:1"TnsLaC8JMzݦ-*%OT`Fimޠ:_{Xd-QpI9j>Xw@Oeʚ|F%=m-ʛ gW"+AYmA5ə&|H9u1r;4jAv`Udz!&,&TJPbҫ*Y(J*#wW97PRU-*jU@LKt3dW @0[ϟ?hiq.p_իϾW/_>ڜ~W/kcrZpvU.>CrׇUYTPj,`ϙۑt GRq.Np>׆v f9=وz|&4N3KFCsaKSXCüu@XVОvг!cX.8Knq龫h@,%[ hIT~w,og Ee5Pmq\2}܆9xu;XZ'tn qsUݴ%N^AyXlӐ&'3;'6sAKK$o^L柿/6NBm(w픨U۴NeC\('9DP/z]7}]AQQܜ'H{8׵4uiǿDZ+e3 C~ͱ|%]Oᶧg PP @ }:U_ezd7a2 ⮻vO*Ԭh:*]Uoy<j15St 0RJpKP7b}bLa*\H&Q8j``aB-؄36k?C 9pNFɔŐE7 n~.endstream endobj 501 0 obj << /Type /XRef /Length 323 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 502 /ID [<3f412c676621fe83f3d88913ac2d8530>] >> stream xһKA3Jar@cAQVBi,E"MIa !}3v03w7r( jJTYǡ5[΃*I5z{vV1&N*I1 vPj6 K.v& |1sdžubܕ=+p(0^uYƻ֙)-^90RSyt!~eoEVQV %\VignetteIndexEntry{Estimating Multivariate Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction In the present vignette, we want to discuss how to specify multivariate multilevel models using **brms**. We call a model *multivariate* if it contains multiple response variables, each being predicted by its own set of predictors. Consider an example from biology. Hadfield, Nutall, Osorio, and Owens (2007) analyzed data of the Eurasian blue tit (https://en.wikipedia.org/wiki/Eurasian_blue_tit). They predicted the `tarsus` length as well as the `back` color of chicks. Half of the brood were put into another `fosternest`, while the other half stayed in the fosternest of their own `dam`. This allows to separate genetic from environmental factors. Additionally, we have information about the `hatchdate` and `sex` of the chicks (the latter being known for 94\% of the animals). ```{r data} data("BTdata", package = "MCMCglmm") head(BTdata) ``` ## Basic Multivariate Models We begin with a relatively simple multivariate normal model. ```{r fit1, message=FALSE, warning=FALSE, results='hide'} fit1 <- brm( mvbind(tarsus, back) ~ sex + hatchdate + (1|p|fosternest) + (1|q|dam), data = BTdata, chains = 2, cores = 2 ) ``` As can be seen in the model code, we have used `mvbind` notation to tell **brms** that both `tarsus` and `back` are separate response variables. The term `(1|p|fosternest)` indicates a varying intercept over `fosternest`. By writing `|p|` in between we indicate that all varying effects of `fosternest` should be modeled as correlated. This makes sense since we actually have two model parts, one for `tarsus` and one for `back`. The indicator `p` is arbitrary and can be replaced by other symbols that comes into your mind (for details about the multilevel syntax of **brms**, see `help("brmsformula")` and `vignette("brms_multilevel")`). Similarly, the term `(1|q|dam)` indicates correlated varying effects of the genetic mother of the chicks. Alternatively, we could have also modeled the genetic similarities through pedigrees and corresponding relatedness matrices, but this is not the focus of this vignette (please see `vignette("brms_phylogenetics")`). The model results are readily summarized via ```{r summary1, warning=FALSE} fit1 <- add_criterion(fit1, "loo") summary(fit1) ``` The summary output of multivariate models closely resembles those of univariate models, except that the parameters now have the corresponding response variable as prefix. Within dams, tarsus length and back color seem to be negatively correlated, while within fosternests the opposite is true. This indicates differential effects of genetic and environmental factors on these two characteristics. Further, the small residual correlation `rescor(tarsus, back)` on the bottom of the output indicates that there is little unmodeled dependency between tarsus length and back color. Although not necessary at this point, we have already computed and stored the LOO information criterion of `fit1`, which we will use for model comparisons. Next, let's take a look at some posterior-predictive checks, which give us a first impression of the model fit. ```{r pp_check1, message=FALSE} pp_check(fit1, resp = "tarsus") pp_check(fit1, resp = "back") ``` This looks pretty solid, but we notice a slight unmodeled left skewness in the distribution of `tarsus`. We will come back to this later on. Next, we want to investigate how much variation in the response variables can be explained by our model and we use a Bayesian generalization of the $R^2$ coefficient. ```{r R2_1} bayes_R2(fit1) ``` Clearly, there is much variation in both animal characteristics that we can not explain, but apparently we can explain more of the variation in tarsus length than in back color. ## More Complex Multivariate Models Now, suppose we only want to control for `sex` in `tarsus` but not in `back` and vice versa for `hatchdate`. Not that this is particular reasonable for the present example, but it allows us to illustrate how to specify different formulas for different response variables. We can no longer use `mvbind` syntax and so we have to use a more verbose approach: ```{r fit2, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) bf_back <- bf(back ~ hatchdate + (1|p|fosternest) + (1|q|dam)) fit2 <- brm(bf_tarsus + bf_back, data = BTdata, chains = 2, cores = 2) ``` Note that we have literally *added* the two model parts via the `+` operator, which is in this case equivalent to writing `mvbf(bf_tarsus, bf_back)`. See `help("brmsformula")` and `help("mvbrmsformula")` for more details about this syntax. Again, we summarize the model first. ```{r summary2, warning=FALSE} fit2 <- add_criterion(fit2, "loo") summary(fit2) ``` Let's find out, how model fit changed due to excluding certain effects from the initial model: ```{r loo12} loo(fit1, fit2) ``` Apparently, there is no noteworthy difference in the model fit. Accordingly, we do not really need to model `sex` and `hatchdate` for both response variables, but there is also no harm in including them (so I would probably just include them). To give you a glimpse of the capabilities of **brms**' multivariate syntax, we change our model in various directions at the same time. Remember the slight left skewness of `tarsus`, which we will now model by using the `skew_normal` family instead of the `gaussian` family. Since we do not have a multivariate normal (or student-t) model, anymore, estimating residual correlations is no longer possible. We make this explicit using the `set_rescor` function. Further, we investigate if the relationship of `back` and `hatchdate` is really linear as previously assumed by fitting a non-linear spline of `hatchdate`. On top of it, we model separate residual variances of `tarsus` for male and female chicks. ```{r fit3, message=FALSE, warning=FALSE, results='hide'} bf_tarsus <- bf(tarsus ~ sex + (1|p|fosternest) + (1|q|dam)) + lf(sigma ~ 0 + sex) + skew_normal() bf_back <- bf(back ~ s(hatchdate) + (1|p|fosternest) + (1|q|dam)) + gaussian() fit3 <- brm( bf_tarsus + bf_back + set_rescor(FALSE), data = BTdata, chains = 2, cores = 2, control = list(adapt_delta = 0.95) ) ``` Again, we summarize the model and look at some posterior-predictive checks. ```{r summary3, warning=FALSE} fit3 <- add_criterion(fit3, "loo") summary(fit3) ``` We see that the (log) residual standard deviation of `tarsus` is somewhat larger for chicks whose sex could not be identified as compared to male or female chicks. Further, we see from the negative `alpha` (skewness) parameter of `tarsus` that the residuals are indeed slightly left-skewed. Lastly, running ```{r me3} conditional_effects(fit3, "hatchdate", resp = "back") ``` reveals a non-linear relationship of `hatchdate` on the `back` color, which seems to change in waves over the course of the hatch dates. There are many more modeling options for multivariate models, which are not discussed in this vignette. Examples include autocorrelation structures, Gaussian processes, or explicit non-linear predictors (e.g., see `help("brmsformula")` or `vignette("brms_multilevel")`). In fact, nearly all the flexibility of univariate models is retained in multivariate models. ## References Hadfield JD, Nutall A, Osorio D, Owens IPF (2007). Testing the phenotypic gambit: phenotypic, genetic and environmental correlations of colour. *Journal of Evolutionary Biology*, 20(2), 549-557. brms/inst/doc/brms_monotonic.html0000644000175000017500000031361414146737242017031 0ustar nileshnilesh Estimating Monotonic Effects with brms

Estimating Monotonic Effects with brms

Paul Bürkner

2021-11-22

Introduction

This vignette is about monotonic effects, a special way of handling discrete predictors that are on an ordinal or higher scale (Bürkner & Charpentier, in review). A predictor, which we want to model as monotonic (i.e., having a monotonically increasing or decreasing relationship with the response), must either be integer valued or an ordered factor. As opposed to a continuous predictor, predictor categories (or integers) are not assumed to be equidistant with respect to their effect on the response variable. Instead, the distance between adjacent predictor categories (or integers) is estimated from the data and may vary across categories. This is realized by parameterizing as follows: One parameter, \(b\), takes care of the direction and size of the effect similar to an ordinary regression parameter. If the monotonic effect is used in a linear model, \(b\) can be interpreted as the expected average difference between two adjacent categories of the ordinal predictor. An additional parameter vector, \(\zeta\), estimates the normalized distances between consecutive predictor categories which thus defines the shape of the monotonic effect. For a single monotonic predictor, \(x\), the linear predictor term of observation \(n\) looks as follows:

\[\eta_n = b D \sum_{i = 1}^{x_n} \zeta_i\]

The parameter \(b\) can take on any real value, while \(\zeta\) is a simplex, which means that it satisfies \(\zeta_i \in [0,1]\) and \(\sum_{i = 1}^D \zeta_i = 1\) with \(D\) being the number of elements of \(\zeta\). Equivalently, \(D\) is the number of categories (or highest integer in the data) minus 1, since we start counting categories from zero to simplify the notation.

A Simple Monotonic Model

A main application of monotonic effects are ordinal predictors that can be modeled this way without falsely treating them either as continuous or as unordered categorical predictors. In Psychology, for instance, this kind of data is omnipresent in the form of Likert scale items, which are often treated as being continuous for convenience without ever testing this assumption. As an example, suppose we are interested in the relationship of yearly income (in $) and life satisfaction measured on an arbitrary scale from 0 to 100. Usually, people are not asked for the exact income. Instead, they are asked to rank themselves in one of certain classes, say: ‘below 20k’, ‘between 20k and 40k’, ‘between 40k and 100k’ and ‘above 100k’. We use some simulated data for illustration purposes.

income_options <- c("below_20", "20_to_40", "40_to_100", "greater_100")
income <- factor(sample(income_options, 100, TRUE), 
                 levels = income_options, ordered = TRUE)
mean_ls <- c(30, 60, 70, 75)
ls <- mean_ls[income] + rnorm(100, sd = 7)
dat <- data.frame(income, ls)

We now proceed with analyzing the data modeling income as a monotonic effect.

fit1 <- brm(ls ~ mo(income), data = dat)

The summary methods yield

summary(fit1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    30.96      1.23    28.56    33.30 1.00     2820     2627
moincome     14.88      0.63    13.68    16.11 1.00     2492     2350

Simplex Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]     0.64      0.04     0.57     0.72 1.00     2984     2440
moincome1[2]     0.26      0.04     0.18     0.35 1.00     4214     2857
moincome1[3]     0.10      0.04     0.02     0.18 1.00     2933     2006

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.64      0.48     5.78     7.65 1.00     3299     2468

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit1, variable = "simo", regex = TRUE)

plot(conditional_effects(fit1))

The distributions of the simplex parameter of income, as shown in the plot method, demonstrate that the largest difference (about 70% of the difference between minimum and maximum category) is between the first two categories.

Now, let’s compare of monotonic model with two common alternative models. (a) Assume income to be continuous:

dat$income_num <- as.numeric(dat$income)
fit2 <- brm(ls ~ income_num, data = dat)
summary(fit2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ income_num 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     21.36      2.14    16.98    25.43 1.00     3668     3139
income_num    15.34      0.83    13.73    17.05 1.00     3805     3037

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     9.17      0.67     7.95    10.60 1.00     3658     2650

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

or (b) Assume income to be an unordered factor:

contrasts(dat$income) <- contr.treatment(4)
fit3 <- brm(ls ~ income, data = dat)
summary(fit3)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ income 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    30.84      1.25    28.32    33.21 1.00     3332     2959
income2      28.78      1.80    25.34    32.28 1.00     3866     3507
income3      40.53      1.84    36.96    44.04 1.00     3747     2992
income4      44.74      1.97    40.86    48.61 1.00     3670     2932

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.65      0.50     5.78     7.71 1.00     4168     2893

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We can easily compare the fit of the three models using leave-one-out cross-validation.

loo(fit1, fit2, fit3)
Output of model 'fit1':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -332.9  6.6
p_loo         4.7  0.7
looic       665.7 13.2
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -364.3  6.1
p_loo         2.7  0.4
looic       728.5 12.2
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Output of model 'fit3':

Computed from 4000 by 100 log-likelihood matrix

         Estimate   SE
elpd_loo   -333.0  6.6
p_loo         4.8  0.7
looic       666.0 13.2
------
Monte Carlo SE of elpd_loo is 0.0.

All Pareto k estimates are good (k < 0.5).
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit1   0.0       0.0  
fit3  -0.1       0.1  
fit2 -31.4       5.9  

The monotonic model fits better than the continuous model, which is not surprising given that the relationship between income and ls is non-linear. The monotonic and the unordered factor model have almost identical fit in this example, but this may not be the case for other data sets.

Setting Prior Distributions

In the previous monotonic model, we have implicitly assumed that all differences between adjacent categories were a-priori the same, or formulated correctly, had the same prior distribution. In the following, we want to show how to change this assumption. The canonical prior distribution of a simplex parameter is the Dirichlet distribution, a multivariate generalization of the beta distribution. It is non-zero for all valid simplexes (i.e., \(\zeta_i \in [0,1]\) and \(\sum_{i = 1}^D \zeta_i = 1\)) and zero otherwise. The Dirichlet prior has a single parameter \(\alpha\) of the same length as \(\zeta\). The higher \(\alpha_i\) the higher the a-priori probability of higher values of \(\zeta_i\). Suppose that, before looking at the data, we expected that the same amount of additional money matters more for people who generally have less money. This translates into a higher a-priori values of \(\zeta_1\) (difference between ‘below_20’ and ‘20_to_40’) and hence into higher values of \(\alpha_1\). We choose \(\alpha_1 = 2\) and \(\alpha_2 = \alpha_3 = 1\), the latter being the default value of \(\alpha\). To fit the model we write:

prior4 <- prior(dirichlet(c(2, 1, 1)), class = "simo", coef = "moincome1")
fit4 <- brm(ls ~ mo(income), data = dat,
            prior = prior4, sample_prior = TRUE)

The 1 at the end of "moincome1" may appear strange when first working with monotonic effects. However, it is necessary as one monotonic term may be associated with multiple simplex parameters, if interactions of multiple monotonic variables are included in the model.

summary(fit4)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    30.92      1.27    28.40    33.47 1.00     2586     2174
moincome     14.86      0.65    13.63    16.15 1.00     2287     2161

Simplex Parameters: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]     0.65      0.04     0.57     0.72 1.00     3057     2558
moincome1[2]     0.26      0.04     0.18     0.35 1.00     3869     2718
moincome1[3]     0.09      0.04     0.02     0.17 1.00     2553     1739

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.65      0.49     5.77     7.67 1.00     3445     2818

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We have used sample_prior = TRUE to also obtain draws from the prior distribution of simo_moincome1 so that we can visualized it.

plot(fit4, variable = "prior_simo", regex = TRUE, N = 3)

As is visible in the plots, simo_moincome1[1] was a-priori on average twice as high as simo_moincome1[2] and simo_moincome1[3] as a result of setting \(\alpha_1\) to 2.

Modeling interactions of monotonic variables

Suppose, we have additionally asked participants for their age.

dat$age <- rnorm(100, mean = 40, sd = 10)

We are not only interested in the main effect of age but also in the interaction of income and age. Interactions with monotonic variables can be specified in the usual way using the * operator:

fit5 <- brm(ls ~ mo(income)*age, data = dat)
summary(fit5)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) * age 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       35.27      5.35    22.95    44.47 1.00      930     1368
age             -0.11      0.13    -0.34     0.19 1.00      949     1292
moincome        15.61      2.72    10.88    21.39 1.00      713     1459
moincome:age    -0.02      0.07    -0.17     0.10 1.00      710     1414

Simplex Parameters: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]         0.65      0.06     0.54     0.81 1.00     1646     1065
moincome1[2]         0.26      0.06     0.13     0.37 1.00     2202     1090
moincome1[3]         0.09      0.05     0.01     0.18 1.00     1604     1292
moincome:age1[1]     0.40      0.27     0.02     0.90 1.00     1282     2237
moincome:age1[2]     0.32      0.23     0.02     0.82 1.00     1907     2383
moincome:age1[3]     0.28      0.22     0.01     0.78 1.00     1615     1865

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.50      0.47     5.67     7.47 1.00     2664     2826

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
conditional_effects(fit5, "income:age")

Modelling Monotonic Group-Level Effects

Suppose that the 100 people in our sample data were drawn from 10 different cities; 10 people per city. Thus, we add an identifier for city to the data and add some city-related variation to ls.

dat$city <- rep(1:10, each = 10)
var_city <- rnorm(10, sd = 10)
dat$ls <- dat$ls + var_city[dat$city]

With the following code, we fit a multilevel model assuming the intercept and the effect of income to vary by city:

fit6 <- brm(ls ~ mo(income)*age + (mo(income) | city), data = dat)
summary(fit6)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: ls ~ mo(income) * age + (mo(income) | city) 
   Data: dat (Number of observations: 100) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~city (Number of levels: 10) 
                        Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)               6.65      2.31     3.16    12.22 1.00     1544     2157
sd(moincome)                0.75      0.62     0.03     2.31 1.00     1781     1820
cor(Intercept,moincome)     0.12      0.55    -0.89     0.96 1.00     4133     2584

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       37.84      6.12    24.85    48.35 1.00     1375     1954
age             -0.08      0.14    -0.33     0.24 1.00     1287     2246
moincome        16.38      2.99    11.08    22.51 1.00     1090     1066
moincome:age    -0.04      0.08    -0.20     0.10 1.00     1076     1085

Simplex Parameters: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
moincome1[1]         0.64      0.06     0.53     0.77 1.00     1742      971
moincome1[2]         0.27      0.06     0.16     0.38 1.00     2359     1127
moincome1[3]         0.09      0.05     0.01     0.19 1.00     2424     1679
moincome:age1[1]     0.44      0.27     0.02     0.91 1.00     1543     2292
moincome:age1[2]     0.30      0.22     0.01     0.80 1.00     2620     2964
moincome:age1[3]     0.27      0.21     0.01     0.78 1.00     2874     2761

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     6.59      0.51     5.65     7.72 1.00     4172     2565

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

reveals that the effect of income varies only little across cities. For the present data, this is not overly surprising given that, in the data simulations, we assumed income to have the same effect across cities.

References

Bürkner P. C. & Charpentier, E. (in review). Monotonic Effects: A Principled Approach for Including Ordinal Predictors in Regression Models. PsyArXiv preprint.

brms/inst/doc/brms_families.Rmd0000644000175000017500000003324314111751670016361 0ustar nileshnilesh--- title: "Parameterization of Response Distributions in brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Parameterization of Response Distributions in brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see `vignette("brms_overview")`. ## Notation Throughout this vignette, we denote values of the response variable as $y$, a density function as $f$, and use $\mu$ to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, $\mu$ is not estimated directly but computed as $\mu = g(\eta)$, where $\eta$ is a predictor term (see `help(brmsformula)` for details) and $g$ is the response function (i.e., inverse of the link function). ## Location shift models The density of the **gaussian** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation. The density of the **student** family is given by $$ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} $$ $\Gamma$ denotes the gamma function and $\nu > 1$ are the degrees of freedom. As $\nu \rightarrow \infty$, the student distribution becomes the gaussian distribution. The density of the **skew_normal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) $$ where $\xi$ is the location parameter, $\omega$ is the positive scale parameter, $\alpha$ the skewness parameter, and $\text{erf}$ denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean $\mu$ and standard deviation $\sigma$, $\omega$ and $\xi$ are computed as $$ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} $$ $$ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} $$ If $\alpha = 0$, the skew-normal distribution becomes the gaussian distribution. For location shift models, $y$ can be any real value. ## Binary and count data models The density of the **binomial** family is given by $$ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} $$ where $N$ is the number of trials and $y \in \{0, ... , N\}$. When all $N$ are $1$ (i.e., $y \in \{0,1\}$), the **bernoulli** distribution for binary data arises. For $y \in \mathbb{N}_0$, the density of the **poisson** family is given by $$ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) $$ The density of the **negbinomial** (negative binomial) family is $$ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi $$ where $\phi$ is a positive precision parameter. For $\phi \rightarrow \infty$, the negative binomial distribution becomes the poisson distribution. The density of the **geometric** family arises if $\phi$ is set to $1$. ## Time-to-event models With time-to-event models we mean all models that are defined on the positive reals only, that is $y \in \mathbb{R}^+$. The density of the **lognormal** family is given by $$ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) $$ where $\sigma$ is the residual standard deviation on the log-scale. The density of the **Gamma** family is given by $$ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) $$ where $\alpha$ is a positive shape parameter. The density of the **weibull** family is given by $$ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) $$ where $\alpha$ is again a positive shape parameter and $s = \mu / \Gamma(1 + 1 / \alpha)$ is the scale parameter to that $\mu$ is the mean of the distribution. The **exponential** family arises if $\alpha$ is set to $1$ for either the gamma or Weibull distribution. The density of the **inverse.gaussian** family is given by $$ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) $$ where $\alpha$ is a positive shape parameter. The **cox** family implements Cox proportional hazards model which assumes a hazard function of the form $h(y) = h_0(y) \mu$ with baseline hazard $h_0(y)$ expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by $$ f(y) = h(y) S(y) $$ where $S(y)$ is the survival function implied by $h(y)$. ## Extreme value models Modeling extremes requires special distributions. One may use the **weibull** distribution (see above) or the **frechet** distribution with density $$ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) $$ where $s = \mu / \Gamma(1 - 1 / \nu)$ is a positive scale parameter and $\nu > 1$ is a shape parameter so that $\mu$ predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family **gen_extreme_value**) with density $$ f(y) = \frac{1}{\sigma} t(y)^{-1 - 1 / \xi} \exp(-t(y)) $$ where $$ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} $$ with positive scale parameter $\sigma$ and shape parameter $\xi$. ## Response time models One family that is especially suited to model reaction times is the **exgaussian** ('exponentially modified Gaussian') family. Its density is given by $$ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) $$ where $\beta$ is the scale (inverse rate) of the exponential component, $\xi$ is the mean of the Gaussian component, $\sigma$ is the standard deviation of the Gaussian component, and $\text{erfc}$ is the complementary error function. We parameterize $\mu = \xi + \beta$ so that the main predictor term equals the mean of the distribution. Another family well suited for modeling response times is the **shifted_lognormal** distribution. It's density equals that of the **lognormal** distribution except that the whole distribution is shifted to the right by a positive parameter called *ndt* (for consistency with the **wiener** diffusion model explained below). A family concerned with the combined modeling of reaction times and corresponding binary responses is the **wiener** diffusion model. It has four model parameters each with a natural interpretation. The parameter $\alpha > 0$ describes the separation between two boundaries of the diffusion process, $\tau > 0$ describes the non-decision time (e.g., due to image or motor processing), $\beta \in [0, 1]$ describes the initial bias in favor of the upper alternative, and $\delta \in \mathbb{R}$ describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by $$ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) $$ where $\phi(x)$ denotes the standard normal density function. The density at the lower boundary can be obtained by substituting $1 - \beta$ for $\beta$ and $-\delta$ for $\delta$ in the above equation. In brms the parameters $\alpha$, $\tau$, and $\beta$ are modeled as auxiliary parameters named *bs* ('boundary separation'), *ndt* ('non-decision time'), and *bias* respectively, whereas the drift rate $\delta$ is modeled via the ordinary model formula that is as $\delta = \mu$. ## Quantile regression Quantile regression is implemented via family **asym_laplace** (asymmetric Laplace distribution) with density $$ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) $$ where $\rho_p$ is given by $\rho_p(x) = x (p - I_{x < 0})$ and $I_A$ is the indicator function of set $A$. The parameter $\sigma$ is a positive scale parameter and $p$ is the *quantile* parameter taking on values in $(0, 1)$. For this distribution, we have $P(Y < g(\eta)) = p$. Thus, quantile regression can be performed by fixing $p$ to the quantile to interest. ## Probability models The density of the **Beta** family for $y \in (0,1)$ is given by $$ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} $$ where $B$ is the beta function and $\phi$ is a positive precision parameter. A multivariate generalization of the **Beta** family is the **dirichlet** family with density $$ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. $$ The **dirichlet** distribution is only implemented with the multivariate logit link function so that $$ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ For reasons of identifiability, $\eta_{1}$ is set to $0$. ## Circular models The density of the **von_mises** family for $y \in (-\pi,\pi)$ is given by $$ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} $$ where $I_0$ is the modified Bessel function of order 0 and $\kappa$ is a positive precision parameter. ## Ordinal and categorical models For ordinal and categorical models, $y$ is one of the categories $1, ..., K$. The intercepts of ordinal models are called thresholds and are denoted as $\tau_k$, with $k \in \{1, ..., K-1\}$, whereas $\eta$ does not contain a fixed effects intercept. Note that the applied link functions $h$ are technically distribution functions $\mathbb{R} \rightarrow [0,1]$. The density of the **cumulative** family (implementing the most basic ordinal model) is given by $$ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) $$ The densities of the **sratio** (stopping ratio) and **cratio** (continuation ratio) families are given by $$ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) $$ and $$ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) $$ respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the **acat** (adjacent category) family is given by $$ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} $$ For the logit link, this can be simplified to $$ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} $$ The linear predictor $\eta$ can be generalized to also depend on the category $k$ for a subset of predictors. This leads to category specific effects (for details on how to specify them see `help(brm)`). Note that **cumulative** and **sratio** models use $\tau - \eta$, whereas **cratio** and **acat** use $\eta - \tau$. This is done to ensure that larger values of $\eta$ increase the probability of *higher* response categories. The **categorical** family is currently only implemented with the multivariate logit link function and has density $$ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} $$ Note that $\eta$ does also depend on the category $k$. For reasons of identifiability, $\eta_{1}$ is set to $0$. A generalization of the **categorical** family to more than one trial is the **multinomial** family with density $$ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} $$ where, for each category, $\mu_{k}$ is estimated via the multivariate logit link function shown above. ## Zero-inflated and hurdle models **Zero-inflated** and **hurdle** families extend existing families by adding special processes for responses that are zero. The density of a **zero-inflated** family is given by $$ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 $$ where $z$ denotes the zero-inflation probability. Currently implemented families are **zero_inflated_poisson**, **zero_inflated_binomial**, **zero_inflated_negbinomial**, and **zero_inflated_beta**. The density of a **hurdle** family is given by $$ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 $$ Currently implemented families are **hurdle_poisson**, **hurdle_negbinomial**, **hurdle_gamma**, and **hurdle_lognormal**. The density of a **zero-one-inflated** family is given by $$ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} $$ where $\alpha$ is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and $\gamma$ is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are **zero_one_inflated_beta**. brms/inst/doc/brms_missings.Rmd0000644000175000017500000002476714111751670016437 0ustar nileshnilesh--- title: "Handle Missing Values with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Handle Missing Values with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction Many real world data sets contain missing values for various reasons. Generally, we have quite a few options to handle those missing values. The easiest solution is to remove all rows from the data set, where one or more variables are missing. However, if values are not missing completely at random, this will likely lead to bias in our analysis. Accordingly, we usually want to impute missing values in one way or the other. Here, we will consider two very general approaches using **brms**: (1) Impute missing values *before* the model fitting with multiple imputation, and (2) impute missing values on the fly *during* model fitting[^1]. As a simple example, we will use the `nhanes` data set, which contains information on participants' `age`, `bmi` (body mass index), `hyp` (hypertensive), and `chl` (total serum cholesterol). For the purpose of the present vignette, we are primarily interested in predicting `bmi` by `age` and `chl`. ```{r} data("nhanes", package = "mice") head(nhanes) ``` ## Imputation before model fitting There are many approaches allowing us to impute missing data before the actual model fitting takes place. From a statistical perspective, multiple imputation is one of the best solutions. Each missing value is not imputed once but `m` times leading to a total of `m` fully imputed data sets. The model can then be fitted to each of those data sets separately and results are pooled across models, afterwards. One widely applied package for multiple imputation is **mice** (Buuren & Groothuis-Oudshoorn, 2010) and we will use it in the following in combination with **brms**. Here, we apply the default settings of **mice**, which means that all variables will be used to impute missing values in all other variables and imputation functions automatically chosen based on the variables' characteristics. ```{r} library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ``` Now, we have `m = 5` imputed data sets stored within the `imp` object. In practice, we will likely need more than `5` of those to accurately account for the uncertainty induced by the missingness, perhaps even in the area of `100` imputed data sets (Zhou & Reiter, 2010). Of course, this increases the computational burden by a lot and so we stick to `m = 5` for the purpose of this vignette. Regardless of the value of `m`, we can either extract those data sets and then pass them to the actual model fitting function as a list of data frames, or pass `imp` directly. The latter works because **brms** offers special support for data imputed by **mice**. We will go with the latter approach, since it is less typing. Fitting our model of interest with **brms** to the multiple imputed data sets is straightforward. ```{r, results = 'hide', message = FALSE} fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ``` The returned fitted model is an ordinary `brmsfit` object containing the posterior draws of all `m` submodels. While pooling across models is not necessarily straightforward in classical statistics, it is trivial in a Bayesian framework. Here, pooling results of multiple imputed data sets is simply achieved by combining the posterior draws of the submodels. Accordingly, all post-processing methods can be used out of the box without having to worry about pooling at all. ```{r} summary(fit_imp1) ``` In the summary output, we notice that some `Rhat` values are higher than $1.1$ indicating possible convergence problems. For models based on multiple imputed data sets, this is often a **false positive**: Chains of different submodels may not overlay each other exactly, since there were fitted to different data. We can see the chains on the right-hand side of ```{r} plot(fit_imp1, variable = "^b", regex = TRUE) ``` Such non-overlaying chains imply high `Rhat` values without there actually being any convergence issue. Accordingly, we have to investigate the convergence of the submodels separately, which we can do by looking at ```{r} round(fit_imp1$rhats, 2) ``` The convergence of each of the submodels looks good. Accordingly, we can proceed with further post-processing and interpretation of the results. For instance, we could investigate the combined effect of `age` and `chl`. ```{r} conditional_effects(fit_imp1, "age:chl") ``` To summarize, the advantages of multiple imputation are obvious: One can apply it to all kinds of models, since model fitting functions do not need to know that the data sets were imputed, beforehand. Also, we do not need to worry about pooling across submodels when using fully Bayesian methods. The only drawback is the amount of time required for model fitting. Estimating Bayesian models is already quite slow with just a single data set and it only gets worse when working with multiple imputation. ### Compatibility with other multiple imputation packages **brms** offers built-in support for **mice** mainly because I use the latter in some of my own research projects. Nevertheless, `brm_multiple` supports all kinds of multiple imputation packages as it also accepts a *list* of data frames as input for its `data` argument. Thus, you just need to extract the imputed data frames in the form of a list, which can then be passed to `brm_multiple`. Most multiple imputation packages have some built-in functionality for this task. When using the **mi** package, for instance, you simply need to call the `mi::complete` function to get the desired output. ## Imputation during model fitting Imputation during model fitting is generally thought to be more complex than imputation before model fitting, because one has to take care of everything within one step. This remains true when imputing missing values with **brms**, but possibly to a somewhat smaller degree. Consider again the `nhanes` data with the goal to predict `bmi` by `age`, and `chl`. Since `age` contains no missing values, we only have to take special care of `bmi` and `chl`. We need to tell the model two things. (1) Which variables contain missing values and how they should be predicted, as well as (2) which of these imputed variables should be used as predictors. In **brms** we can do this as follows: ```{r, results = 'hide', message = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ``` The model has become multivariate, as we no longer only predict `bmi` but also `chl` (see `vignette("brms_multivariate")` for details about the multivariate syntax of **brms**). We ensure that missings in both variables will be modeled rather than excluded by adding `| mi()` on the left-hand side of the formulas[^2]. We write `mi(chl)` on the right-hand side of the formula for `bmi` to ensure that the estimated missing values of `chl` will be used in the prediction of `bmi`. The summary is a bit more cluttered as we get coefficients for both response variables, but apart from that we can interpret coefficients in the usual way. ```{r} summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ``` The results look pretty similar to those obtained from multiple imputation, but be aware that this may not be generally the case. In multiple imputation, the default is to impute all variables based on all other variables, while in the 'one-step' approach, we have to explicitly specify the variables used in the imputation. Thus, arguably, multiple imputation is easier to apply. An obvious advantage of the 'one-step' approach is that the model needs to be fitted only once instead of `m` times. Also, within the **brms** framework, we can use multilevel structure and complex non-linear relationships for the imputation of missing values, which is not achieved as easily in standard multiple imputation software. On the downside, it is currently not possible to impute discrete variables, because **Stan** (the engine behind **brms**) does not allow estimating discrete parameters. ### Combining measurement error and missing values Missing value terms in **brms** cannot only handle missing values but also measurement error, or arbitrary combinations of the two. In fact, we can think of a missing value as a value with infinite measurement error. Thus, `mi` terms are a natural (and somewhat more verbose) generalization of the now soft deprecated `me` terms. Suppose we had measured the variable `chl` with some known error: ```{r} nhanes$se <- rexp(nrow(nhanes), 2) ``` Then we can go ahead an include this information into the model as follows: ```{r, results = 'hide', message = FALSE, eval = FALSE} bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi(se) ~ age) + set_rescor(FALSE) fit_imp3 <- brm(bform, data = nhanes) ``` Summarizing and post-processing the model continues to work as usual. [^1]: Actually, there is a third approach that only applies to missings in response variables. If we want to impute missing responses, we just fit the model using the observed responses and than impute the missings *after* fitting the model by means of posterior prediction. That is, we supply the predictor values corresponding to missing responses to the `predict` method. [^2]: We don't really need this for `bmi`, since `bmi` is not used as a predictor for another variable. Accordingly, we could also -- and equivalently -- impute missing values of `bmi` *after* model fitting by means of posterior prediction. ## References Buuren, S. V. & Groothuis-Oudshoorn, K. (2010). mice: Multivariate imputation by chained equations in R. *Journal of Statistical Software*, 1-68. doi.org/10.18637/jss.v045.i03 Zhou, X. & Reiter, J. P. (2010). A Note on Bayesian Inference After Multiple Imputation. *The American Statistician*, 64(2), 159-163. doi.org/10.1198/tast.2010.09109 brms/inst/doc/brms_nonlinear.R0000644000175000017500000001053014146742367016241 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ## ---- results='hide'-------------------------------------------------------------------- prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ## --------------------------------------------------------------------------------------- summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- fit2 <- brm(y ~ x, data = dat1) ## --------------------------------------------------------------------------------------- summary(fit2) ## --------------------------------------------------------------------------------------- pp_check(fit1) pp_check(fit2) ## --------------------------------------------------------------------------------------- loo(fit1, fit2) ## --------------------------------------------------------------------------------------- data(loss) head(loss) ## ---- results='hide'-------------------------------------------------------------------- fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ## --------------------------------------------------------------------------------------- summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ## --------------------------------------------------------------------------------------- conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ## --------------------------------------------------------------------------------------- inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ## ---- results='hide'-------------------------------------------------------------------- fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ## --------------------------------------------------------------------------------------- summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ## --------------------------------------------------------------------------------------- summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ## --------------------------------------------------------------------------------------- loo(fit_ir1, fit_ir2) ## ---- results='hide'-------------------------------------------------------------------- fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ## --------------------------------------------------------------------------------------- summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) brms/inst/doc/brms_families.html0000644000175000017500000006663314146736037016624 0ustar nileshnilesh Parameterization of Response Distributions in brms

Parameterization of Response Distributions in brms

Paul Bürkner

2021-11-22

The purpose of this vignette is to discuss the parameterizations of the families (i.e., response distributions) used in brms. For a more general overview of the package see vignette("brms_overview").

Notation

Throughout this vignette, we denote values of the response variable as \(y\), a density function as \(f\), and use \(\mu\) to refer to the main model parameter, which is usually the mean of the response distribution or some closely related quantity. In a regression framework, \(\mu\) is not estimated directly but computed as \(\mu = g(\eta)\), where \(\eta\) is a predictor term (see help(brmsformula) for details) and \(g\) is the response function (i.e., inverse of the link function).

Location shift models

The density of the gaussian family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2}\left(\frac{y - \mu}{\sigma}\right)^2\right) \]

where \(\sigma\) is the residual standard deviation. The density of the student family is given by \[ f(y) = \frac{\Gamma((\nu + 1)/2)}{\Gamma(\nu/2)} \frac{1}{\sqrt{\nu\pi}\sigma}\left(1 + \frac{1}{\nu} \left(\frac{y - \mu}{\sigma}\right)^2\right)^{-(\nu+1)/2} \]

\(\Gamma\) denotes the gamma function and \(\nu > 1\) are the degrees of freedom. As \(\nu \rightarrow \infty\), the student distribution becomes the gaussian distribution. The density of the skew_normal family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\frac{1}{2} \left(\frac{y - \xi}{\omega}\right)^2 \right) \left(1 + \text{erf} \left( \alpha \left(\frac{y - \xi}{\omega \sqrt{2}} \right) \right) \right) \]

where \(\xi\) is the location parameter, \(\omega\) is the positive scale parameter, \(\alpha\) the skewness parameter, and \(\text{erf}\) denotes the error function of the gaussian distribution. To parameterize the skew-normal distribution in terms of the mean \(\mu\) and standard deviation \(\sigma\), \(\omega\) and \(\xi\) are computed as \[ \omega = \frac{\sigma}{\sqrt{1 - \frac{2}{\pi} \frac{\alpha^2}{1 + \alpha^2}}} \]

\[ \xi = \mu - \omega \frac{\alpha}{\sqrt{1 + \alpha^2}} \sqrt{\frac{2}{\pi}} \]

If \(\alpha = 0\), the skew-normal distribution becomes the gaussian distribution. For location shift models, \(y\) can be any real value.

Binary and count data models

The density of the binomial family is given by \[ f(y) = {N \choose y} \mu^{y} (1-\mu)^{N - y} \] where \(N\) is the number of trials and \(y \in \{0, ... , N\}\). When all \(N\) are \(1\) (i.e., \(y \in \{0,1\}\)), the bernoulli distribution for binary data arises.

For \(y \in \mathbb{N}_0\), the density of the poisson family is given by \[ f(y) = \frac{\mu^{y}}{y!} \exp(-\mu) \] The density of the negbinomial (negative binomial) family is \[ f(y) = {y + \phi - 1 \choose y} \left(\frac{\mu}{\mu + \phi}\right)^{y} \left(\frac{\phi}{\mu + \phi}\right)^\phi \] where \(\phi\) is a positive precision parameter. For \(\phi \rightarrow \infty\), the negative binomial distribution becomes the poisson distribution. The density of the geometric family arises if \(\phi\) is set to \(1\).

Time-to-event models

With time-to-event models we mean all models that are defined on the positive reals only, that is \(y \in \mathbb{R}^+\). The density of the lognormal family is given by \[ f(y) = \frac{1}{\sqrt{2\pi}\sigma y} \exp\left(-\frac{1}{2}\left(\frac{\log(y) - \mu}{\sigma}\right)^2\right) \] where \(\sigma\) is the residual standard deviation on the log-scale. The density of the Gamma family is given by \[ f(y) = \frac{(\alpha / \mu)^\alpha}{\Gamma(\alpha)} y^{\alpha-1} \exp\left(-\frac{\alpha y}{\mu}\right) \] where \(\alpha\) is a positive shape parameter. The density of the weibull family is given by \[ f(y) = \frac{\alpha}{s} \left(\frac{y}{s}\right)^{\alpha-1} \exp\left(-\left(\frac{y}{s}\right)^\alpha\right) \] where \(\alpha\) is again a positive shape parameter and \(s = \mu / \Gamma(1 + 1 / \alpha)\) is the scale parameter to that \(\mu\) is the mean of the distribution. The exponential family arises if \(\alpha\) is set to \(1\) for either the gamma or Weibull distribution. The density of the inverse.gaussian family is given by \[ f(y) = \left(\frac{\alpha}{2 \pi y^3}\right)^{1/2} \exp \left(\frac{-\alpha (y - \mu)^2}{2 \mu^2 y} \right) \] where \(\alpha\) is a positive shape parameter. The cox family implements Cox proportional hazards model which assumes a hazard function of the form \(h(y) = h_0(y) \mu\) with baseline hazard \(h_0(y)\) expressed via M-splines (which integrate to I-splines) in order to ensure monotonicity. The density of the cox model is then given by \[ f(y) = h(y) S(y) \] where \(S(y)\) is the survival function implied by \(h(y)\).

Extreme value models

Modeling extremes requires special distributions. One may use the weibull distribution (see above) or the frechet distribution with density \[ f(y) = \frac{\nu}{s} \left(\frac{y}{s}\right)^{-1-\nu} \exp\left(-\left(\frac{y}{s}\right)^{-\nu}\right) \] where \(s = \mu / \Gamma(1 - 1 / \nu)\) is a positive scale parameter and \(\nu > 1\) is a shape parameter so that \(\mu\) predicts the mean of the Frechet distribution. A generalization of both distributions is the generalized extreme value distribution (family gen_extreme_value) with density \[ f(y) = \frac{1}{\sigma} t(y)^{-1 - 1 / \xi} \exp(-t(y)) \] where \[ t(y) = \left(1 + \xi \left(\frac{y - \mu}{\sigma} \right)\right)^{-1 / \xi} \] with positive scale parameter \(\sigma\) and shape parameter \(\xi\).

Response time models

One family that is especially suited to model reaction times is the exgaussian (‘exponentially modified Gaussian’) family. Its density is given by

\[ f(y) = \frac{1}{2 \beta} \exp\left(\frac{1}{2 \beta} \left(2\xi + \sigma^2 / \beta - 2 y \right) \right) \text{erfc}\left(\frac{\xi + \sigma^2 / \beta - y}{\sqrt{2} \sigma} \right) \] where \(\beta\) is the scale (inverse rate) of the exponential component, \(\xi\) is the mean of the Gaussian component, \(\sigma\) is the standard deviation of the Gaussian component, and \(\text{erfc}\) is the complementary error function. We parameterize \(\mu = \xi + \beta\) so that the main predictor term equals the mean of the distribution.

Another family well suited for modeling response times is the shifted_lognormal distribution. It’s density equals that of the lognormal distribution except that the whole distribution is shifted to the right by a positive parameter called ndt (for consistency with the wiener diffusion model explained below).

A family concerned with the combined modeling of reaction times and corresponding binary responses is the wiener diffusion model. It has four model parameters each with a natural interpretation. The parameter \(\alpha > 0\) describes the separation between two boundaries of the diffusion process, \(\tau > 0\) describes the non-decision time (e.g., due to image or motor processing), \(\beta \in [0, 1]\) describes the initial bias in favor of the upper alternative, and \(\delta \in \mathbb{R}\) describes the drift rate to the boundaries (a positive value indicates a drift towards to upper boundary). The density for the reaction time at the upper boundary is given by

\[ f(y) = \frac{\alpha}{(y-\tau)^3/2} \exp \! \left(- \delta \alpha \beta - \frac{\delta^2(y-\tau)}{2}\right) \sum_{k = - \infty}^{\infty} (2k + \beta) \phi \! \left(\frac{2k + \alpha \beta}{\sqrt{y - \tau}}\right) \]

where \(\phi(x)\) denotes the standard normal density function. The density at the lower boundary can be obtained by substituting \(1 - \beta\) for \(\beta\) and \(-\delta\) for \(\delta\) in the above equation. In brms the parameters \(\alpha\), \(\tau\), and \(\beta\) are modeled as auxiliary parameters named bs (‘boundary separation’), ndt (‘non-decision time’), and bias respectively, whereas the drift rate \(\delta\) is modeled via the ordinary model formula that is as \(\delta = \mu\).

Quantile regression

Quantile regression is implemented via family asym_laplace (asymmetric Laplace distribution) with density

\[ f(y) = \frac{p (1 - p)}{\sigma} \exp\left(-\rho_p\left(\frac{y - \mu}{\sigma}\right)\right) \] where \(\rho_p\) is given by \(\rho_p(x) = x (p - I_{x < 0})\) and \(I_A\) is the indicator function of set \(A\). The parameter \(\sigma\) is a positive scale parameter and \(p\) is the quantile parameter taking on values in \((0, 1)\). For this distribution, we have \(P(Y < g(\eta)) = p\). Thus, quantile regression can be performed by fixing \(p\) to the quantile to interest.

Probability models

The density of the Beta family for \(y \in (0,1)\) is given by \[ f(y) = \frac{y^{\mu \phi - 1} (1-y)^{(1-\mu) \phi-1}}{B(\mu \phi, (1-\mu) \phi)} \] where \(B\) is the beta function and \(\phi\) is a positive precision parameter. A multivariate generalization of the Beta family is the dirichlet family with density \[ f(y) = \frac{1}{B((\mu_{1}, \ldots, \mu_{K}) \phi)} \prod_{k=1}^K y_{k}^{\mu_{k} \phi - 1}. \] The dirichlet distribution is only implemented with the multivariate logit link function so that \[ \mu_{j} = \frac{\exp(\eta_{j})}{\sum_{k = 1}^{K} \exp(\eta_{k})} \] For reasons of identifiability, \(\eta_{1}\) is set to \(0\).

Circular models

The density of the von_mises family for \(y \in (-\pi,\pi)\) is given by \[ f(y) = \frac{\exp(\kappa \cos(y - \mu))}{2\pi I_0(\kappa)} \] where \(I_0\) is the modified Bessel function of order 0 and \(\kappa\) is a positive precision parameter.

Ordinal and categorical models

For ordinal and categorical models, \(y\) is one of the categories \(1, ..., K\). The intercepts of ordinal models are called thresholds and are denoted as \(\tau_k\), with \(k \in \{1, ..., K-1\}\), whereas \(\eta\) does not contain a fixed effects intercept. Note that the applied link functions \(h\) are technically distribution functions \(\mathbb{R} \rightarrow [0,1]\). The density of the cumulative family (implementing the most basic ordinal model) is given by \[ f(y) = g(\tau_{y + 1} - \eta) - g(\tau_{y} - \eta) \]

The densities of the sratio (stopping ratio) and cratio (continuation ratio) families are given by \[ f(y) = g(\tau_{y + 1} - \eta) \prod_{k = 1}^{y} (1 - g(\tau_{k} - \eta)) \] and \[ f(y) = (1 - g(\eta - \tau_{y + 1})) \prod_{k = 1}^{y} g(\eta - \tau_{k}) \]

respectively. Note that both families are equivalent for symmetric link functions such as logit or probit. The density of the acat (adjacent category) family is given by \[ f(y) = \frac{\prod_{k=1}^{y} g(\eta - \tau_{k}) \prod_{k=y+1}^K(1-g(\eta - \tau_{k}))}{\sum_{k=0}^K\prod_{j=1}^k g(\eta-\tau_{j}) \prod_{j=k+1}^K(1-g(\eta - \tau_{j}))} \] For the logit link, this can be simplified to \[ f(y) = \frac{\exp \left(\sum_{k=1}^{y} (\eta - \tau_{k}) \right)} {\sum_{k=0}^K \exp\left(\sum_{j=1}^k (\eta - \tau_{j}) \right)} \] The linear predictor \(\eta\) can be generalized to also depend on the category \(k\) for a subset of predictors. This leads to category specific effects (for details on how to specify them see help(brm)). Note that cumulative and sratio models use \(\tau - \eta\), whereas cratio and acat use \(\eta - \tau\). This is done to ensure that larger values of \(\eta\) increase the probability of higher response categories.

The categorical family is currently only implemented with the multivariate logit link function and has density \[ f(y) = \mu_{y} = \frac{\exp(\eta_{y})}{\sum_{k = 1}^{K} \exp(\eta_{k})} \] Note that \(\eta\) does also depend on the category \(k\). For reasons of identifiability, \(\eta_{1}\) is set to \(0\). A generalization of the categorical family to more than one trial is the multinomial family with density \[ f(y) = {N \choose y_{1}, y_{2}, \ldots, y_{K}} \prod_{k=1}^K \mu_{k}^{y_{k}} \] where, for each category, \(\mu_{k}\) is estimated via the multivariate logit link function shown above.

Zero-inflated and hurdle models

Zero-inflated and hurdle families extend existing families by adding special processes for responses that are zero. The density of a zero-inflated family is given by \[ f_z(y) = z + (1 - z) f(0) \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) \quad \text{if } y > 0 \] where \(z\) denotes the zero-inflation probability. Currently implemented families are zero_inflated_poisson, zero_inflated_binomial, zero_inflated_negbinomial, and zero_inflated_beta.

The density of a hurdle family is given by \[ f_z(y) = z \quad \text{if } y = 0 \\ f_z(y) = (1 - z) f(y) / (1 - f(0)) \quad \text{if } y > 0 \] Currently implemented families are hurdle_poisson, hurdle_negbinomial, hurdle_gamma, and hurdle_lognormal.

The density of a zero-one-inflated family is given by \[ f_{\alpha, \gamma}(y) = \alpha (1 - \gamma) \quad \text{if } y = 0 \\ f_{\alpha, \gamma}(y) = \alpha \gamma \quad \text{if } y = 1 \\ f_{\alpha, \gamma}(y) = (1 - \alpha) f(y) \quad \text{if } y \notin \{0, 1\} \] where \(\alpha\) is the zero-one-inflation probability (i.e. the probability that zero or one occurs) and \(\gamma\) is the conditional one-inflation probability (i.e. the probability that one occurs rather than zero). Currently implemented families are zero_one_inflated_beta.

brms/inst/doc/brms_distreg.R0000644000175000017500000000613314146736036015715 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- group <- rep(c("treat", "placebo"), each = 30) symptom_post <- c(rnorm(30, mean = 1, sd = 2), rnorm(30, mean = 0, sd = 1)) dat1 <- data.frame(group, symptom_post) head(dat1) ## ---- results='hide'-------------------------------------------------------------------- fit1 <- brm(bf(symptom_post ~ group, sigma ~ group), data = dat1, family = gaussian()) ## ---- results='hide'-------------------------------------------------------------------- summary(fit1) plot(fit1, N = 2, ask = FALSE) plot(conditional_effects(fit1), points = TRUE) ## --------------------------------------------------------------------------------------- hyp <- c("exp(sigma_Intercept) = 0", "exp(sigma_Intercept + sigma_grouptreat) = 0") hypothesis(fit1, hyp) ## --------------------------------------------------------------------------------------- hyp <- "exp(sigma_Intercept + sigma_grouptreat) > exp(sigma_Intercept)" (hyp <- hypothesis(fit1, hyp)) plot(hyp, chars = NULL) ## --------------------------------------------------------------------------------------- zinb <- read.csv("https://paul-buerkner.github.io/data/fish.csv") head(zinb) ## ---- results='hide'-------------------------------------------------------------------- fit_zinb1 <- brm(count ~ persons + child + camper, data = zinb, family = zero_inflated_poisson()) ## --------------------------------------------------------------------------------------- summary(fit_zinb1) plot(conditional_effects(fit_zinb1), ask = FALSE) ## ---- results='hide'-------------------------------------------------------------------- fit_zinb2 <- brm(bf(count ~ persons + child + camper, zi ~ child), data = zinb, family = zero_inflated_poisson()) ## --------------------------------------------------------------------------------------- summary(fit_zinb2) plot(conditional_effects(fit_zinb2), ask = FALSE) ## --------------------------------------------------------------------------------------- dat_smooth <- mgcv::gamSim(eg = 6, n = 200, scale = 2, verbose = FALSE) head(dat_smooth[, 1:6]) ## ---- results='hide'-------------------------------------------------------------------- fit_smooth1 <- brm( bf(y ~ s(x1) + s(x2) + (1|fac), sigma ~ s(x0) + (1|fac)), data = dat_smooth, family = gaussian(), chains = 2, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(fit_smooth1) plot(conditional_effects(fit_smooth1), points = TRUE, ask = FALSE) brms/inst/doc/brms_phylogenetics.html0000644000175000017500000062003614146744715017703 0ustar nileshnilesh Estimating Phylogenetic Multilevel Models with brms

Estimating Phylogenetic Multilevel Models with brms

Paul Bürkner

2021-11-22

Introduction

In the present vignette, we want to discuss how to specify phylogenetic multilevel models using brms. These models are relevant in evolutionary biology when data of many species are analyzed at the same time. The usual approach would be to model species as a grouping factor in a multilevel model and estimate varying intercepts (and possibly also varying slopes) over species. However, species are not independent as they come from the same phylogenetic tree and we thus have to adjust our model to incorporate this dependency. The examples discussed here are from chapter 11 of the book Modern Phylogenetic Comparative Methods and the application in Evolutionary Biology (de Villemeruil & Nakagawa, 2014). The necessary data can be downloaded from the corresponding website (http://www.mpcm-evolution.com/). Some of these models may take a few minutes to fit.

A Simple Phylogenetic Model

Assume we have measurements of a phenotype, phen (say the body size), and a cofactor variable (say the temperature of the environment). We prepare the data using the following code.

phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex")
data_simple <- read.table(
  "https://paul-buerkner.github.io/data/data_simple.txt", 
  header = TRUE
)
head(data_simple)
       phen  cofactor phylo
1 107.06595 10.309588  sp_1
2  79.61086  9.690507  sp_2
3 116.38186 15.007825  sp_3
4 143.28705 19.087673  sp_4
5 139.60993 15.658404  sp_5
6  68.50657  6.005236  sp_6

The phylo object contains information on the relationship between species. Using this information, we can construct a covariance matrix of species (Hadfield & Nakagawa, 2010).

A <- ape::vcv.phylo(phylo)

Now we are ready to fit our first phylogenetic multilevel model:

model_simple <- brm(
  phen ~ cofactor + (1|gr(phylo, cov = A)), 
  data = data_simple, 
  family = gaussian(), 
  data2 = list(A = A),
  prior = c(
    prior(normal(0, 10), "b"),
    prior(normal(0, 50), "Intercept"),
    prior(student_t(3, 0, 20), "sd"),
    prior(student_t(3, 0, 20), "sigma")
  )
)

With the exception of (1|gr(phylo, cov = A)) instead of (1|phylo) this is a basic multilevel model with a varying intercept over species (phylo is an indicator of species in this data set). However, by using cov = A in the gr function, we make sure that species are correlated as specified by the covariance matrix A. We pass A itself via the data2 argument which can be used for any kinds of data that does not fit into the regular structure of the data argument. Setting priors is not required for achieving good convergence for this model, but it improves sampling speed a bit. After fitting, the results can be investigated in detail.

summary(model_simple)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ cofactor + (1 | gr(phylo, cov = A)) 
   Data: data_simple (Number of observations: 200) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    14.44      2.15    10.30    18.74 1.01      836     1500

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    38.36      7.22    24.12    52.57 1.00     1924     2149
cofactor      5.17      0.14     4.90     5.45 1.00     6221     3394

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     9.24      0.73     7.85    10.72 1.00     1129     2173

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(model_simple, N = 2, ask = FALSE)

plot(conditional_effects(model_simple), points = TRUE) 

The so called phylogenetic signal (often symbolize by \(\lambda\)) can be computed with the hypothesis method and is roughly \(\lambda = 0.7\) for this example.

hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0"
(hyp <- hypothesis(model_simple, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0      0.7      0.09     0.51     0.84         NA        NA    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp)

Note that the phylogenetic signal is just a synonym of the intra-class correlation (ICC) used in the context phylogenetic analysis.

A Phylogenetic Model with Repeated Measurements

Often, we have multiple observations per species and this allows to fit more complicated phylogenetic models.

data_repeat <- read.table(
  "https://paul-buerkner.github.io/data/data_repeat.txt", 
  header = TRUE
)
data_repeat$spec_mean_cf <- 
  with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo])
head(data_repeat)
       phen  cofactor species phylo spec_mean_cf
1 107.41919 11.223724    sp_1  sp_1    10.309588
2 109.16403  9.805934    sp_1  sp_1    10.309588
3  91.88672 10.308423    sp_1  sp_1    10.309588
4 121.54341  8.355349    sp_1  sp_1    10.309588
5 105.31638 11.854510    sp_1  sp_1    10.309588
6  64.99859  4.314015    sp_2  sp_2     3.673914

The variable spec_mean_cf just contains the mean of the cofactor for each species. The code for the repeated measurement phylogenetic model looks as follows:

model_repeat1 <- brm(
  phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), 
  data = data_repeat, 
  family = gaussian(), 
  data2 = list(A = A),
  prior = c(
    prior(normal(0,10), "b"),
    prior(normal(0,50), "Intercept"),
    prior(student_t(3,0,20), "sd"),
    prior(student_t(3,0,20), "sigma")
  ),
  sample_prior = TRUE, chains = 2, cores = 2, 
  iter = 4000, warmup = 1000
)

The variables phylo and species are identical as they are both identifiers of the species. However, we model the phylogenetic covariance only for phylo and thus the species variable accounts for any specific effect that would be independent of the phylogenetic relationship between species (e.g., environmental or niche effects). Again we can obtain model summaries as well as estimates of the phylogenetic signal.

summary(model_repeat1)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ spec_mean_cf + (1 | gr(phylo, cov = A)) + (1 | species) 
   Data: data_repeat (Number of observations: 1000) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    16.39      1.92    12.79    20.32 1.00     1389     1848

~species (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     4.99      0.84     3.31     6.56 1.00     1046     1442

Population-Level Effects: 
             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept       36.18      7.80    20.95    51.26 1.00     4161     3464
spec_mean_cf     5.10      0.10     4.90     5.30 1.00     8003     4395

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     8.11      0.20     7.73     8.51 1.00     5286     4336

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
hyp <- paste(
  "sd_phylo__Intercept^2 /", 
  "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0"
)
(hyp <- hypothesis(model_repeat1, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0     0.74      0.06     0.61     0.84          0         0    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(hyp)

So far, we have completely ignored the variability of the cofactor within species. To incorporate this into the model, we define

data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf

and then fit it again using within_spec_cf as an additional predictor.

model_repeat2 <- update(
  model_repeat1, formula = ~ . + within_spec_cf,
  newdata = data_repeat, chains = 2, cores = 2, 
  iter = 4000, warmup = 1000
)

The results are almost unchanged, with apparently no relationship between the phenotype and the within species variance of cofactor.

summary(model_repeat2)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen ~ spec_mean_cf + (1 | gr(phylo, cov = A)) + (1 | species) + within_spec_cf 
   Data: data_repeat (Number of observations: 1000) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)    16.37      1.90    12.88    20.30 1.00     1675     2450

~species (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     5.01      0.84     3.30     6.57 1.00     1179     1617

Population-Level Effects: 
               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept         36.19      7.69    21.16    50.86 1.00     4249     3995
spec_mean_cf       5.10      0.11     4.88     5.30 1.00     8382     3830
within_spec_cf    -0.06      0.18    -0.43     0.30 1.00     9757     4098

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     8.11      0.21     7.73     8.53 1.00     5397     4137

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Also, the phylogenetic signal remains more or less the same.

hyp <- paste(
  "sd_phylo__Intercept^2 /", 
  "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0"
)
(hyp <- hypothesis(model_repeat2, hyp, class = NULL))
Hypothesis Tests for class :
                Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
1 (sd_phylo__Interc... = 0     0.74      0.06     0.62     0.84          0         0    *
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

A Phylogenetic Meta-Analysis

Let’s say we have Fisher’s z-transformed correlation coefficients \(Zr\) per species along with corresponding sample sizes (e.g., correlations between male coloration and reproductive success):

data_fisher <- read.table(
  "https://paul-buerkner.github.io/data/data_effect.txt", 
  header = TRUE
)
data_fisher$obs <- 1:nrow(data_fisher)
head(data_fisher)
          Zr  N phylo obs
1 0.28917549 13  sp_1   1
2 0.02415579 40  sp_2   2
3 0.19513651 39  sp_3   3
4 0.09831239 40  sp_4   4
5 0.13780152 66  sp_5   5
6 0.13710587 41  sp_6   6

We assume the sampling variance to be known and as \(V(Zr) = \frac{1}{N - 3}\) for Fisher’s values, where \(N\) is the sample size per species. Incorporating the known sampling variance into the model is straight forward. One has to keep in mind though, that brms requires the sampling standard deviation (square root of the variance) as input instead of the variance itself. The group-level effect of obs represents the residual variance, which we have to model explicitly in a meta-analytic model.

model_fisher <- brm(
  Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), 
  data = data_fisher, family = gaussian(), 
  data2 = list(A = A),
  prior = c(
    prior(normal(0, 10), "Intercept"),
    prior(student_t(3, 0, 10), "sd")
  ),
  control = list(adapt_delta = 0.95),
  chains = 2, cores = 2, iter = 4000, warmup = 1000
)

A summary of the fitted model is obtained via

summary(model_fisher)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: Zr | se(sqrt(1/(N - 3))) ~ 1 + (1 | gr(phylo, cov = A)) + (1 | obs) 
   Data: data_fisher (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 1000; thin = 1;
         total post-warmup draws = 6000

Group-Level Effects: 
~obs (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.05      0.03     0.00     0.10 1.00      757     1399

~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.07      0.04     0.00     0.15 1.00      724     1482

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     0.16      0.04     0.08     0.23 1.00     3015     2716

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.00      0.00     0.00     0.00   NA       NA       NA

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(model_fisher)

The meta-analytic mean (i.e., the model intercept) is \(0.16\) with a credible interval of \([0.08, 0.25]\). Thus the mean correlation across species is positive according to the model.

A phylogenetic count-data model

Suppose that we analyze a phenotype that consists of counts instead of being a continuous variable. In such a case, the normality assumption will likely not be justified and it is recommended to use a distribution explicitly suited for count data, for instance the Poisson distribution. The following data set (again retrieved from mpcm-evolution.org) provides an example.

data_pois <- read.table(
  "https://paul-buerkner.github.io/data/data_pois.txt", 
  header = TRUE
)
data_pois$obs <- 1:nrow(data_pois)
head(data_pois)
  phen_pois   cofactor phylo obs
1         1  7.8702830  sp_1   1
2         0  3.4690529  sp_2   2
3         1  2.5478774  sp_3   3
4        14 18.2286628  sp_4   4
5         1  2.5302806  sp_5   5
6         1  0.5145559  sp_6   6

As the Poisson distribution does not have a natural overdispersion parameter, we model the residual variance via the group-level effects of obs (e.g., see Lawless, 1987).

model_pois <- brm(
  phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), 
  data = data_pois, family = poisson("log"), 
  data2 = list(A = A),
  chains = 2, cores = 2, iter = 4000,
  control = list(adapt_delta = 0.95)
)

Again, we obtain a summary of the fitted model via

summary(model_pois)
 Family: poisson 
  Links: mu = log 
Formula: phen_pois ~ cofactor + (1 | gr(phylo, cov = A)) + (1 | obs) 
   Data: data_pois (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~obs (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.18      0.09     0.02     0.34 1.00      687      886

~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.18      0.10     0.03     0.41 1.00     1072     1418

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -2.09      0.20    -2.50    -1.68 1.00     4260     2762
cofactor      0.25      0.01     0.23     0.27 1.00     5743     2852

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
plot(conditional_effects(model_pois), points = TRUE) 

Now, assume we ignore the fact that the phenotype is count data and fit a linear normal model instead.

model_normal <- brm(
  phen_pois ~ cofactor + (1|gr(phylo, cov = A)), 
  data = data_pois, family = gaussian(), 
  data2 = list(A = A),
  chains = 2, cores = 2, iter = 4000,
  control = list(adapt_delta = 0.95)
)
summary(model_normal)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: phen_pois ~ cofactor + (1 | gr(phylo, cov = A)) 
   Data: data_pois (Number of observations: 200) 
  Draws: 2 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~phylo (Number of levels: 200) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.70      0.53     0.03     1.98 1.00      889     1415

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -3.09      0.65    -4.37    -1.80 1.00     3011     1836
cofactor      0.68      0.04     0.60     0.77 1.00     8183     2819

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.44      0.18     3.08     3.81 1.00     5132     2648

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

We see that cofactor has a positive relationship with the phenotype in both models. One should keep in mind, though, that the estimates of the Poisson model are on the log-scale, as we applied the canonical log-link function in this example. Therefore, estimates are not comparable to a linear normal model even if applied to the same data. What we can compare, however, is the model fit, for instance graphically via posterior predictive checks.

pp_check(model_pois)

pp_check(model_normal)

Apparently, the distribution of the phenotype predicted by the Poisson model resembles the original distribution of the phenotype pretty closely, while the normal models fails to do so. We can also apply leave-one-out cross-validation for direct numerical comparison of model fit.

loo(model_pois, model_normal)
Output of model 'model_pois':

Computed from 4000 by 200 log-likelihood matrix

         Estimate   SE
elpd_loo   -348.2 17.0
p_loo        30.0  3.4
looic       696.5 34.0
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     170   85.0%   694       
 (0.5, 0.7]   (ok)        26   13.0%   143       
   (0.7, 1]   (bad)        4    2.0%   352       
   (1, Inf)   (very bad)   0    0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Output of model 'model_normal':

Computed from 4000 by 200 log-likelihood matrix

         Estimate   SE
elpd_loo   -536.1 15.9
p_loo        10.5  2.3
looic      1072.3 31.7
------
Monte Carlo SE of elpd_loo is 0.1.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     194   97.0%   488       
 (0.5, 0.7]   (ok)         6    3.0%   2289      
   (0.7, 1]   (bad)        0    0.0%   <NA>      
   (1, Inf)   (very bad)   0    0.0%   <NA>      

All Pareto k estimates are ok (k < 0.7).
See help('pareto-k-diagnostic') for details.

Model comparisons:
             elpd_diff se_diff
model_pois      0.0       0.0 
model_normal -187.9      18.0 

Since smaller values of loo indicate better fit, it is again evident that the Poisson model fits the data better than the normal model. Of course, the Poisson model is not the only reasonable option here. For instance, you could use a negative binomial model (via family negative_binomial), which already contains an overdispersion parameter so that modeling a varying intercept of obs becomes obsolete.

Phylogenetic models with multiple group-level effects

In the above examples, we have only used a single group-level effect (i.e., a varying intercept) for the phylogenetic grouping factors. In brms, it is also possible to estimate multiple group-level effects (e.g., a varying intercept and a varying slope) for these grouping factors. However, it requires repeatedly computing Kronecker products of covariance matrices while fitting the model. This will be very slow especially when the grouping factors have many levels and matrices are thus large.

References

de Villemeruil P. & Nakagawa, S. (2014) General quantitative genetic methods for comparative biology. In: Modern phylogenetic comparative methods and their application in evolutionary biology: concepts and practice (ed. Garamszegi L.) Springer, New York. pp. 287-303.

Hadfield, J. D. & Nakagawa, S. (2010) General quantitative genetic methods for comparative biology: phylogenies, taxonomies, and multi-trait models for continuous and categorical characters. Journal of Evolutionary Biology. 23. 494-508.

Lawless, J. F. (1987). Negative binomial and mixed Poisson regression. Canadian Journal of Statistics, 15(3), 209-225.

brms/inst/doc/brms_customfamilies.html0000644000175000017500000016405514146735327020055 0ustar nileshnilesh Define Custom Response Distributions with brms

Define Custom Response Distributions with brms

Paul Bürkner

2021-11-22

Introduction

The brms package comes with a lot of built-in response distributions – usually called families in R – to specify among others linear, count data, survival, response times, or ordinal models (see help(brmsfamily) for an overview). Despite supporting over two dozen families, there is still a long list of distributions, which are not natively supported. The present vignette will explain how to specify such custom families in brms. By doing that, users can benefit from the modeling flexibility and post-processing options of brms even when using self-defined response distributions. If you have built a custom family that you want to make available to other users, you can submit a pull request to this GitHub repository.

A Case Study

As a case study, we will use the cbpp data of the lme4 package, which describes the development of the CBPP disease of cattle in Africa. The data set contains four variables: period (the time period), herd (a factor identifying the cattle herd), incidence (number of new disease cases for a given herd and time period), as well as size (the herd size at the beginning of a given time period).

data("cbpp", package = "lme4")
head(cbpp)
  herd incidence size period
1    1         2   14      1
2    1         3   12      2
3    1         4    9      3
4    1         0    5      4
5    2         3   22      1
6    2         1   18      2

In a first step, we will be predicting incidence using a simple binomial model, which will serve as our baseline model. For observed number of events \(y\) (incidence in our case) and total number of trials \(T\) (size), the probability mass function of the binomial distribution is defined as

\[ P(y | T, p) = \binom{T}{y} p^{y} (1 - p)^{N-y} \]

where \(p\) is the event probability. In the classical binomial model, we will directly predict \(p\) on the logit-scale, which means that for each observation \(i\) we compute the success probability \(p_i\) as

\[ p_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} \]

where \(\eta_i\) is the linear predictor term of observation \(i\) (see vignette("brms_overview") for more details on linear predictors in brms). Predicting incidence by period and a varying intercept of herd is straight forward in brms:

fit1 <- brm(incidence | trials(size) ~ period + (1|herd), 
            data = cbpp, family = binomial())

In the summary output, we see that the incidence probability varies substantially over herds, but reduces over the course of the time as indicated by the negative coefficients of period.

summary(fit1)
 Family: binomial 
  Links: mu = logit 
Formula: incidence | trials(size) ~ period + (1 | herd) 
   Data: cbpp (Number of observations: 56) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~herd (Number of levels: 15) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.76      0.23     0.39     1.31 1.00     1445     2015

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.41      0.26    -1.96    -0.91 1.00     2105     2262
period2      -1.00      0.31    -1.62    -0.40 1.00     4299     3017
period3      -1.15      0.34    -1.84    -0.49 1.00     4247     3127
period4      -1.62      0.42    -2.48    -0.85 1.00     4449     2833

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

A drawback of the binomial model is that – after taking into account the linear predictor – its variance is fixed to \(\text{Var}(y_i) = T_i p_i (1 - p_i)\). All variance exceeding this value cannot be not taken into account by the model. There are multiple ways of dealing with this so called overdispersion and the solution described below will serve as an illustrative example of how to define custom families in brms.

The Beta-Binomial Distribution

The beta-binomial model is a generalization of the binomial model with an additional parameter to account for overdispersion. In the beta-binomial model, we do not predict the binomial probability \(p_i\) directly, but assume it to be beta distributed with hyperparameters \(\alpha > 0\) and \(\beta > 0\):

\[ p_i \sim \text{Beta}(\alpha_i, \beta_i) \]

The \(\alpha\) and \(\beta\) parameters are both hard to interpret and generally not recommended for use in regression models. Thus, we will apply a different parameterization with parameters \(\mu \in [0, 1]\) and \(\phi > 0\), which we will call \(\text{Beta2}\):

\[ \text{Beta2}(\mu, \phi) = \text{Beta}(\mu \phi, (1-\mu) \phi) \]

The parameters \(\mu\) and \(\phi\) specify the mean and precision parameter, respectively. By defining

\[ \mu_i = \frac{\exp(\eta_i)}{1 + \exp(\eta_i)} \]

we still predict the expected probability by means of our transformed linear predictor (as in the original binomial model), but account for potential overdispersion via the parameter \(\phi\).

Fitting Custom Family Models

The beta-binomial distribution is not natively supported in brms and so we will have to define it ourselves using the custom_family function. This function requires the family’s name, the names of its parameters (mu and phi in our case), corresponding link functions (only applied if parameters are predicted), their theoretical lower and upper bounds (only applied if parameters are not predicted), information on whether the distribution is discrete or continuous, and finally, whether additional non-parameter variables need to be passed to the distribution. For our beta-binomial example, this results in the following custom family:

beta_binomial2 <- custom_family(
  "beta_binomial2", dpars = c("mu", "phi"),
  links = c("logit", "log"), lb = c(NA, 0),
  type = "int", vars = "vint1[n]"
)

The name vint1 for the variable containing the number of trials is not chosen arbitrarily as we will see below. Next, we have to provide the relevant Stan functions if the distribution is not defined in Stan itself. For the beta_binomial2 distribution, this is straight forward since the ordinal beta_binomial distribution is already implemented.

stan_funs <- "
  real beta_binomial2_lpmf(int y, real mu, real phi, int T) {
    return beta_binomial_lpmf(y | T, mu * phi, (1 - mu) * phi);
  }
  int beta_binomial2_rng(real mu, real phi, int T) {
    return beta_binomial_rng(T, mu * phi, (1 - mu) * phi);
  }
"

For the model fitting, we will only need beta_binomial2_lpmf, but beta_binomial2_rng will come in handy when it comes to post-processing. We define:

stanvars <- stanvar(scode = stan_funs, block = "functions")

To provide information about the number of trials (an integer variable), we are going to use the addition argument vint(), which can only be used in custom families. Similarly, if we needed to include additional vectors of real data, we would use vreal(). Actually, for this particular example, we could more elegantly apply the addition argument trials() instead of vint()as in the basic binomial model. However, since the present vignette is meant to give a general overview of the topic, we will go with the more general method.

We now have all components together to fit our custom beta-binomial model:

fit2 <- brm(
  incidence | vint(size) ~ period + (1|herd), data = cbpp, 
  family = beta_binomial2, stanvars = stanvars
)

The summary output reveals that the uncertainty in the coefficients of period is somewhat larger than in the basic binomial model, which is the result of including the overdispersion parameter phi in the model. Apart from that, the results looks pretty similar.

summary(fit2)
 Family: beta_binomial2 
  Links: mu = logit; phi = identity 
Formula: incidence | vint(size) ~ period + (1 | herd) 
   Data: cbpp (Number of observations: 56) 
  Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup draws = 4000

Group-Level Effects: 
~herd (Number of levels: 15) 
              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)     0.38      0.26     0.02     0.95 1.00     1217     1803

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    -1.33      0.25    -1.85    -0.87 1.00     4229     2921
period2      -1.02      0.41    -1.84    -0.24 1.00     4213     2908
period3      -1.27      0.45    -2.21    -0.41 1.00     4187     2754
period4      -1.55      0.53    -2.67    -0.60 1.00     4286     2988

Family Specific Parameters: 
    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
phi    17.58     20.67     5.50    58.12 1.00     1750     1189

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Post-Processing Custom Family Models

Some post-processing methods such as summary or plot work out of the box for custom family models. However, there are three particularly important methods, which require additional input by the user. These are posterior_epred, posterior_predict and log_lik computing predicted mean values, predicted response values, and log-likelihood values, respectively. They are not only relevant for their own sake, but also provide the basis of many other post-processing methods. For instance, we may be interested in comparing the fit of the binomial model with that of the beta-binomial model by means of approximate leave-one-out cross-validation implemented in method loo, which in turn requires log_lik to be working.

The log_lik function of a family should be named log_lik_<family-name> and have the two arguments i (indicating observations) and prep. You don’t have to worry too much about how prep is created (if you are interested, check out the prepare_predictions function). Instead, all you need to know is that parameters are stored in slot dpars and data are stored in slot data. Generally, parameters take on the form of a \(S \times N\) matrix (with \(S =\) number of posterior draws and \(N =\) number of observations) if they are predicted (as is mu in our example) and a vector of size \(N\) if the are not predicted (as is phi).

We could define the complete log-likelihood function in R directly, or we can expose the self-defined Stan functions and apply them. The latter approach is usually more convenient, but the former is more stable and the only option when implementing custom families in other R packages building upon brms. For the purpose of the present vignette, we will go with the latter approach.

expose_functions(fit2, vectorize = TRUE)

and define the required log_lik functions with a few lines of code.

log_lik_beta_binomial2 <- function(i, prep) {
  mu <- brms::get_dpar(prep, "mu", i = i)
  phi <- brms::get_dpar(prep, "phi", i = i)
  trials <- prep$data$vint1[i]
  y <- prep$data$Y[i]
  beta_binomial2_lpmf(y, mu, phi, trials)
}

The get_dpar function will do the necessary transformations to handle both the case when the distributional parameters are predicted separately for each row and when they are the same for the whole fit.

With that being done, all of the post-processing methods requiring log_lik will work as well. For instance, model comparison can simply be performed via

loo(fit1, fit2)
Output of model 'fit1':

Computed from 4000 by 56 log-likelihood matrix

         Estimate   SE
elpd_loo   -100.1 10.2
p_loo        22.4  4.3
looic       200.3 20.4
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     40    71.4%   915       
 (0.5, 0.7]   (ok)       11    19.6%   197       
   (0.7, 1]   (bad)       5     8.9%   62        
   (1, Inf)   (very bad)  0     0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Output of model 'fit2':

Computed from 4000 by 56 log-likelihood matrix

         Estimate   SE
elpd_loo    -95.1  8.3
p_loo        10.8  2.1
looic       190.1 16.7
------
Monte Carlo SE of elpd_loo is NA.

Pareto k diagnostic values:
                         Count Pct.    Min. n_eff
(-Inf, 0.5]   (good)     46    82.1%   1119      
 (0.5, 0.7]   (ok)        7    12.5%   785       
   (0.7, 1]   (bad)       3     5.4%   54        
   (1, Inf)   (very bad)  0     0.0%   <NA>      
See help('pareto-k-diagnostic') for details.

Model comparisons:
     elpd_diff se_diff
fit2  0.0       0.0   
fit1 -5.1       4.3   

Since larger ELPD values indicate better fit, we see that the beta-binomial model fits somewhat better, although the corresponding standard error reveals that the difference is not that substantial.

Next, we will define the function necessary for the posterior_predict method:

posterior_predict_beta_binomial2 <- function(i, prep, ...) {
  mu <- brms::get_dpar(prep, "mu", i = i)
  phi <- brms::get_dpar(prep, "phi", i = i)
  trials <- prep$data$vint1[i]
  beta_binomial2_rng(mu, phi, trials)
}

The posterior_predict function looks pretty similar to the corresponding log_lik function, except that we are now creating random draws of the response instead of log-likelihood values. Again, we are using an exposed Stan function for convenience. Make sure to add a ... argument to your posterior_predict function even if you are not using it, since some families require additional arguments. With posterior_predict to be working, we can engage for instance in posterior-predictive checking:

pp_check(fit2)

When defining the posterior_epred function, you have to keep in mind that it has only a prep argument and should compute the mean response values for all observations at once. Since the mean of the beta-binomial distribution is \(\text{E}(y) = \mu T\) definition of the corresponding posterior_epred function is not too complicated, but we need to get the dimension of parameters and data in line.

posterior_epred_beta_binomial2 <- function(prep) {
  mu <- brms::get_dpar(prep, "mu")
  trials <- prep$data$vint1
  trials <- matrix(trials, nrow = nrow(mu), ncol = ncol(mu), byrow = TRUE)
  mu * trials
}

A post-processing method relying directly on posterior_epred is conditional_effects, which allows to visualize effects of predictors.

conditional_effects(fit2, conditions = data.frame(size = 1))

For ease of interpretation we have set size to 1 so that the y-axis of the above plot indicates probabilities.

Turning a Custom Family into a Native Family

Family functions built natively into brms are safer to use and more convenient, as they require much less user input. If you think that your custom family is general enough to be useful to other users, please feel free to open an issue on GitHub so that we can discuss all the details. Provided that we agree it makes sense to implement your family natively in brms, the following steps are required (foo is a placeholder for the family name):

  • In family-lists.R, add function .family_foo which should contain basic information about your family (you will find lots of examples for other families there).
  • In families.R, add family function foo which should be a simple wrapper around .brmsfamily.
  • In stan-likelihood.R, add function stan_log_lik_foo which provides the likelihood of the family in Stan language.
  • If necessary, add self-defined Stan functions in separate files under inst/chunks.
  • Add functions posterior_predict_foo, posterior_epred_foo and log_lik_foo to posterior_predict.R, posterior_epred.R and log_lik.R, respectively.
  • If necessary, add distribution functions to distributions.R.
brms/inst/doc/brms_missings.R0000644000175000017500000000422014146736272016105 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- data("nhanes", package = "mice") head(nhanes) ## --------------------------------------------------------------------------------------- library(mice) imp <- mice(nhanes, m = 5, print = FALSE) ## ---- results = 'hide', message = FALSE------------------------------------------------- fit_imp1 <- brm_multiple(bmi ~ age*chl, data = imp, chains = 2) ## --------------------------------------------------------------------------------------- summary(fit_imp1) ## --------------------------------------------------------------------------------------- plot(fit_imp1, variable = "^b", regex = TRUE) ## --------------------------------------------------------------------------------------- round(fit_imp1$rhats, 2) ## --------------------------------------------------------------------------------------- conditional_effects(fit_imp1, "age:chl") ## ---- results = 'hide', message = FALSE------------------------------------------------- bform <- bf(bmi | mi() ~ age * mi(chl)) + bf(chl | mi() ~ age) + set_rescor(FALSE) fit_imp2 <- brm(bform, data = nhanes) ## --------------------------------------------------------------------------------------- summary(fit_imp2) conditional_effects(fit_imp2, "age:chl", resp = "bmi") ## --------------------------------------------------------------------------------------- nhanes$se <- rexp(nrow(nhanes), 2) ## ---- results = 'hide', message = FALSE, eval = FALSE----------------------------------- # bform <- bf(bmi | mi() ~ age * mi(chl)) + # bf(chl | mi(se) ~ age) + set_rescor(FALSE) # fit_imp3 <- brm(bform, data = nhanes) brms/inst/doc/brms_nonlinear.Rmd0000644000175000017500000003071414105230573016552 0ustar nileshnilesh--- title: "Estimating Non-Linear Models with brms" author: "Paul Bürkner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: yes vignette: > %\VignetteIndexEntry{Estimating Non-Linear Models with brms} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r, SETTINGS-knitr, include=FALSE} stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ``` ## Introduction This vignette provides an introduction on how to fit non-linear multilevel models with **brms**. Non-linear models are incredibly flexible and powerful, but require much more care with respect to model specification and priors than typical generalized linear models. Ignoring group-level effects for the moment, the predictor term $\eta_n$ of a generalized linear model for observation $n$ can be written as follows: $$\eta_n = \sum_{i = 1}^K b_i x_{ni}$$ where $b_i$ is the regression coefficient of predictor $i$ and $x_{ni}$ is the data of predictor $i$ for observation $n$. This also compromises interaction terms and various other data transformations. However, the structure of $\eta_n$ is always linear in the sense that the regression coefficients $b_i$ are multiplied by some predictor values and then summed up. This implies that the hypothetical predictor term $$\eta_n = b_1 \exp(b_2 x_n)$$ would *not* be a *linear* predictor anymore and we could not fit it using classical techniques of generalized linear models. We thus need a more general model class, which we will call *non-linear* models. Note that the term 'non-linear' does not say anything about the assumed distribution of the response variable. In particular it does not mean 'not normally distributed' as we can apply non-linear predictor terms to all kinds of response distributions (for more details on response distributions available in **brms** see `vignette("brms_families")`). ## A Simple Non-Linear Model We begin with a simple example using simulated data. ```{r} b <- c(2, 0.75) x <- rnorm(100) y <- rnorm(100, mean = b[1] * exp(b[2] * x)) dat1 <- data.frame(x, y) ``` As stated above, we cannot use a generalized linear model to estimate $b$ so we go ahead an specify a non-linear model. ```{r, results='hide'} prior1 <- prior(normal(1, 2), nlpar = "b1") + prior(normal(0, 2), nlpar = "b2") fit1 <- brm(bf(y ~ b1 * exp(b2 * x), b1 + b2 ~ 1, nl = TRUE), data = dat1, prior = prior1) ``` When looking at the above code, the first thing that becomes obvious is that we changed the `formula` syntax to display the non-linear formula including predictors (i.e., `x`) and parameters (i.e., `b1` and `b2`) wrapped in a call to `bf`. This stands in contrast to classical **R** formulas, where only predictors are given and parameters are implicit. The argument `b1 + b2 ~ 1` serves two purposes. First, it provides information, which variables in `formula` are parameters, and second, it specifies the linear predictor terms for each parameter. In fact, we should think of non-linear parameters as placeholders for linear predictor terms rather than as parameters themselves (see also the following examples). In the present case, we have no further variables to predict `b1` and `b2` and thus we just fit intercepts that represent our estimates of $b_1$ and $b_2$ in the model equation above. The formula `b1 + b2 ~ 1` is a short form of `b1 ~ 1, b2 ~ 1` that can be used if multiple non-linear parameters share the same formula. Setting `nl = TRUE` tells **brms** that the formula should be treated as non-linear. In contrast to generalized linear models, priors on population-level parameters (i.e., 'fixed effects') are often mandatory to identify a non-linear model. Thus, **brms** requires the user to explicitly specify these priors. In the present example, we used a `normal(1, 2)` prior on (the population-level intercept of) `b1`, while we used a `normal(0, 2)` prior on (the population-level intercept of) `b2`. Setting priors is a non-trivial task in all kinds of models, especially in non-linear models, so you should always invest some time to think of appropriate priors. Quite often, you may be forced to change your priors after fitting a non-linear model for the first time, when you observe different MCMC chains converging to different posterior regions. This is a clear sign of an identification problem and one solution is to set stronger (i.e., more narrow) priors. To obtain summaries of the fitted model, we apply ```{r} summary(fit1) plot(fit1) plot(conditional_effects(fit1), points = TRUE) ``` The `summary` method reveals that we were able to recover the true parameter values pretty nicely. According to the `plot` method, our MCMC chains have converged well and to the same posterior. The `conditional_effects` method visualizes the model-implied (non-linear) regression line. We might be also interested in comparing our non-linear model to a classical linear model. ```{r, results='hide'} fit2 <- brm(y ~ x, data = dat1) ``` ```{r} summary(fit2) ``` To investigate and compare model fit, we can apply graphical posterior predictive checks, which make use of the **bayesplot** package on the backend. ```{r} pp_check(fit1) pp_check(fit2) ``` We can also easily compare model fit using leave-one-out cross-validation. ```{r} loo(fit1, fit2) ``` Since smaller `LOOIC` values indicate better model fit, it is immediately evident that the non-linear model fits the data better, which is of course not too surprising since we simulated the data from exactly that model. ## A Real-World Non-Linear model On his blog, Markus Gesmann predicts the growth of cumulative insurance loss payments over time, originated from different origin years (see https://www.magesblog.com/post/2015-11-03-loss-developments-via-growth-curves-and/). We will use a slightly simplified version of his model for demonstration purposes here. It looks as follows: $$cum_{AY, dev} \sim N(\mu_{AY, dev}, \sigma)$$ $$\mu_{AY, dev} = ult_{AY} \left(1 - \exp\left(- \left( \frac{dev}{\theta} \right)^\omega \right) \right)$$ The cumulative insurance payments $cum$ will grow over time, and we model this dependency using the variable $dev$. Further, $ult_{AY}$ is the (to be estimated) ultimate loss of accident each year. It constitutes a non-linear parameter in our framework along with the parameters $\theta$ and $\omega$, which are responsible for the growth of the cumulative loss and are assumed to be the same across years. The data is already shipped with brms. ```{r} data(loss) head(loss) ``` and translate the proposed model into a non-linear **brms** model. ```{r, results='hide'} fit_loss <- brm( bf(cum ~ ult * (1 - exp(-(dev/theta)^omega)), ult ~ 1 + (1|AY), omega ~ 1, theta ~ 1, nl = TRUE), data = loss, family = gaussian(), prior = c( prior(normal(5000, 1000), nlpar = "ult"), prior(normal(1, 2), nlpar = "omega"), prior(normal(45, 10), nlpar = "theta") ), control = list(adapt_delta = 0.9) ) ``` We estimate a group-level effect of accident year (variable `AY`) for the ultimate loss `ult`. This also shows nicely how a non-linear parameter is actually a placeholder for a linear predictor, which in case of `ult`, contains only an varying intercept over year. Again, priors on population-level effects are required and, for the present model, are actually mandatory to ensure identifiability. We summarize the model using well known methods. ```{r} summary(fit_loss) plot(fit_loss, N = 3, ask = FALSE) conditional_effects(fit_loss) ``` Next, we show marginal effects separately for each year. ```{r} conditions <- data.frame(AY = unique(loss$AY)) rownames(conditions) <- unique(loss$AY) me_loss <- conditional_effects( fit_loss, conditions = conditions, re_formula = NULL, method = "predict" ) plot(me_loss, ncol = 5, points = TRUE) ``` It is evident that there is some variation in cumulative loss across accident years, for instance due to natural disasters happening only in certain years. Further, we see that the uncertainty in the predicted cumulative loss is larger for later years with fewer available data points. For a more detailed discussion of this data set, see Section 4.5 in Gesmann & Morris (2020). ## Advanced Item-Response Models As a third example, we want to show how to model more advanced item-response models using the non-linear model framework of **brms**. For simplicity, suppose we have a single forced choice item with three alternatives of which only one is correct. Our response variable is whether a person answers the item correctly (1) or not (0). Person are assumed to vary in their ability to answer the item correctly. However, every person has a 33% chance of getting the item right just by guessing. We thus simulate some data to reflect this situation. ```{r} inv_logit <- function(x) 1 / (1 + exp(-x)) ability <- rnorm(300) p <- 0.33 + 0.67 * inv_logit(ability) answer <- ifelse(runif(300, 0, 1) < p, 1, 0) dat_ir <- data.frame(ability, answer) ``` The most basic item-response model is equivalent to a simple logistic regression model. ```{r, results='hide'} fit_ir1 <- brm(answer ~ ability, data = dat_ir, family = bernoulli()) ``` However, this model completely ignores the guessing probability and will thus likely come to biased estimates and predictions. ```{r} summary(fit_ir1) plot(conditional_effects(fit_ir1), points = TRUE) ``` A more sophisticated approach incorporating the guessing probability looks as follows: ```{r, results='hide'} fit_ir2 <- brm( bf(answer ~ 0.33 + 0.67 * inv_logit(eta), eta ~ ability, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = prior(normal(0, 5), nlpar = "eta") ) ``` It is very important to set the link function of the `bernoulli` family to `identity` or else we will apply two link functions. This is because our non-linear predictor term already contains the desired link function (`0.33 + 0.67 * inv_logit`), but the `bernoulli` family applies the default `logit` link on top of it. This will of course lead to strange and uninterpretable results. Thus, please make sure that you set the link function to `identity`, whenever your non-linear predictor term already contains the desired link function. ```{r} summary(fit_ir2) plot(conditional_effects(fit_ir2), points = TRUE) ``` Comparing model fit via leave-one-out cross-validation ```{r} loo(fit_ir1, fit_ir2) ``` shows that both model fit the data equally well, but remember that predictions of the first model might still be misleading as they may well be below the guessing probability for low ability values. Now, suppose that we don't know the guessing probability and want to estimate it from the data. This can easily be done changing the previous model just a bit. ```{r, results='hide'} fit_ir3 <- brm( bf(answer ~ guess + (1 - guess) * inv_logit(eta), eta ~ 0 + ability, guess ~ 1, nl = TRUE), data = dat_ir, family = bernoulli("identity"), prior = c( prior(normal(0, 5), nlpar = "eta"), prior(beta(1, 1), nlpar = "guess", lb = 0, ub = 1) ) ) ``` Here, we model the guessing probability as a non-linear parameter making sure that it cannot exceed the interval $[0, 1]$. We did not estimate an intercept for `eta`, as this will lead to a bias in the estimated guessing parameter (try it out; this is an excellent example of how careful one has to be in non-linear models). ```{r} summary(fit_ir3) plot(fit_ir3) plot(conditional_effects(fit_ir3), points = TRUE) ``` The results show that we are able to recover the simulated model parameters with this non-linear model. Of course, real item-response data have multiple items so that accounting for item and person variability (e.g., using a multilevel model with varying intercepts) becomes necessary as we have multiple observations per item and person. Luckily, this can all be done within the non-linear framework of **brms** and I hope that this vignette serves as a good starting point. ## References Gesmann M. & Morris J. (2020). Hierarchical Compartmental Reserving Models. *CAS Research Papers*. brms/inst/doc/brms_phylogenetics.R0000644000175000017500000001366314146744715017142 0ustar nileshnileshparams <- list(EVAL = TRUE) ## ---- SETTINGS-knitr, include=FALSE----------------------------------------------------- stopifnot(require(knitr)) options(width = 90) opts_chunk$set( comment = NA, message = FALSE, warning = FALSE, eval = if (isTRUE(exists("params"))) params$EVAL else FALSE, dev = "png", dpi = 150, fig.asp = 0.8, fig.width = 5, out.width = "60%", fig.align = "center" ) library(brms) ggplot2::theme_set(theme_default()) ## --------------------------------------------------------------------------------------- phylo <- ape::read.nexus("https://paul-buerkner.github.io/data/phylo.nex") data_simple <- read.table( "https://paul-buerkner.github.io/data/data_simple.txt", header = TRUE ) head(data_simple) ## --------------------------------------------------------------------------------------- A <- ape::vcv.phylo(phylo) ## ---- results='hide'-------------------------------------------------------------------- model_simple <- brm( phen ~ cofactor + (1|gr(phylo, cov = A)), data = data_simple, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "b"), prior(normal(0, 50), "Intercept"), prior(student_t(3, 0, 20), "sd"), prior(student_t(3, 0, 20), "sigma") ) ) ## --------------------------------------------------------------------------------------- summary(model_simple) plot(model_simple, N = 2, ask = FALSE) plot(conditional_effects(model_simple), points = TRUE) ## --------------------------------------------------------------------------------------- hyp <- "sd_phylo__Intercept^2 / (sd_phylo__Intercept^2 + sigma^2) = 0" (hyp <- hypothesis(model_simple, hyp, class = NULL)) plot(hyp) ## --------------------------------------------------------------------------------------- data_repeat <- read.table( "https://paul-buerkner.github.io/data/data_repeat.txt", header = TRUE ) data_repeat$spec_mean_cf <- with(data_repeat, sapply(split(cofactor, phylo), mean)[phylo]) head(data_repeat) ## ---- results='hide'-------------------------------------------------------------------- model_repeat1 <- brm( phen ~ spec_mean_cf + (1|gr(phylo, cov = A)) + (1|species), data = data_repeat, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0,10), "b"), prior(normal(0,50), "Intercept"), prior(student_t(3,0,20), "sd"), prior(student_t(3,0,20), "sigma") ), sample_prior = TRUE, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_repeat1) ## --------------------------------------------------------------------------------------- hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat1, hyp, class = NULL)) plot(hyp) ## --------------------------------------------------------------------------------------- data_repeat$within_spec_cf <- data_repeat$cofactor - data_repeat$spec_mean_cf ## ---- results='hide'-------------------------------------------------------------------- model_repeat2 <- update( model_repeat1, formula = ~ . + within_spec_cf, newdata = data_repeat, chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_repeat2) ## --------------------------------------------------------------------------------------- hyp <- paste( "sd_phylo__Intercept^2 /", "(sd_phylo__Intercept^2 + sd_species__Intercept^2 + sigma^2) = 0" ) (hyp <- hypothesis(model_repeat2, hyp, class = NULL)) ## --------------------------------------------------------------------------------------- data_fisher <- read.table( "https://paul-buerkner.github.io/data/data_effect.txt", header = TRUE ) data_fisher$obs <- 1:nrow(data_fisher) head(data_fisher) ## ---- results='hide'-------------------------------------------------------------------- model_fisher <- brm( Zr | se(sqrt(1 / (N - 3))) ~ 1 + (1|gr(phylo, cov = A)) + (1|obs), data = data_fisher, family = gaussian(), data2 = list(A = A), prior = c( prior(normal(0, 10), "Intercept"), prior(student_t(3, 0, 10), "sd") ), control = list(adapt_delta = 0.95), chains = 2, cores = 2, iter = 4000, warmup = 1000 ) ## --------------------------------------------------------------------------------------- summary(model_fisher) plot(model_fisher) ## --------------------------------------------------------------------------------------- data_pois <- read.table( "https://paul-buerkner.github.io/data/data_pois.txt", header = TRUE ) data_pois$obs <- 1:nrow(data_pois) head(data_pois) ## ---- results='hide'-------------------------------------------------------------------- model_pois <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)) + (1|obs), data = data_pois, family = poisson("log"), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(model_pois) plot(conditional_effects(model_pois), points = TRUE) ## ---- results='hide'-------------------------------------------------------------------- model_normal <- brm( phen_pois ~ cofactor + (1|gr(phylo, cov = A)), data = data_pois, family = gaussian(), data2 = list(A = A), chains = 2, cores = 2, iter = 4000, control = list(adapt_delta = 0.95) ) ## --------------------------------------------------------------------------------------- summary(model_normal) ## --------------------------------------------------------------------------------------- pp_check(model_pois) pp_check(model_normal) ## --------------------------------------------------------------------------------------- loo(model_pois, model_normal) brms/data/0000755000175000017500000000000014105230573012260 5ustar nileshnileshbrms/data/loss.rda0000644000175000017500000000146014105230573013731 0ustar nileshnileshBZh91AY&SY,P^w}cwj;P|ui("mOM 4=M !z4hiCdhڀ h<&J)dfijhzzzj!@@4d*!CC@dF hh h@*5=OԏS C@12@ɣA@4h Iik e!EǼO$g`Y6hs D+"D`F#Vjf@MbbRY F}#Ju ˢ-x< -`!ҽR@؍6 +瘄Sogi?qRWoX90=l{)']CJ?{"(Őbj>Exm̔!ԴBIa9xo?F{*V4F6/r:4M]jI.X^ZhDEJ)_ H`hჃIR(Zqr D M-j׊><̃L4nbhV36IB)p7$M_l$# 0`H0qٹ`X8Bg|cD Tv$ūR$v5$Xbix_2S!0Yj**"ł$@PY"X1U`,X1F( RR#"jL'SHa Hmdd< h'/pg4:r$xLX5bԚ2?fR_!uʮjRbE°q AZ 㲆LحEΟJ4f[DTA(S229}՜b h۔1P.WLВ.p Jbrms/data/epilepsy.rda0000644000175000017500000000542713625764732014631 0ustar nileshnileshBZh91AY&SY 4>w@h4L*zzAꪦ?O424hh24&&44hL! MF@ M  FL@рahd2ih4L@""&&ELLhڛDGbjD6)4̦G=LDOO&d4z=Fe4<Ѧi3)R%w4jm{\ ̛<0gc1ɑYrr~.&5:kYtsKvN"xLSUPv' ƗSi|SXdgj=w([zDܗ˫n̶}fnMɞr2& Ѿcdh&֛.k &SPg::Pjތ_.olYQ4MQfٰ3L I\79cV83SJi9l4Ɖ8Pf1333i1ߵ->¤-U$ p* FB*IQ\^8 RI%Ta0IQY-kF*82+B8ۦY%i"ʹtlie}dzs-UV,G$#OG#z4d2FTr!TBFh) 4CQ $i@CY Dii! HBC d2B$'9iM]UP%l123456789:9:;<=>?@ABCDDEFGHIJKLMNOPRSTUVWXYZ[\]^_`abcddefghiS5Uu6V KO͐B,U})@]B@x>]y#<-L $̤%TBJfLoƜ""""""""""""""""""""""""*5~-@)zI $JO$,T"BLbRWI!#*@ ‰! ֕'[GdJ{!dD*8rW$TG:WŨ$PPPT'^sĘI3 Eʏ*LEj9#t,tPFTh2^7gl-TmGKF2J- F1.TkHۅȴg #wGxØ5+^=zM&۞ sܪI$I$]vи\/˥۷n!UYB(TUUE {.{)e )`EYeMir {^ZZ۲Z[qJR 0 1x^zUUWd oL0ֲֲֲֲYjU*4 YY]uZ*+Yp0J뮺UUZʡ!!!!!!!!4@”0,0 ",UU[/I$I$@2[Bд/KKKmUUU}1ZI$I$ƀҁm KmUUV$I$I?`Ja/뮵UU\ *I$I$P ,Ey` r'F:c:AkD4ώ~=#xGfSpp=ޝ6 1QԚάOr'2A >6f3%'Y^U%𴪲UMAklMz "*"cQ/UDI(Q@¡b-hK PQ% Eh BT% T+-CJ;M>k 7IV'8Poǽ/#!~--:q~F]֍,|U#\Q7eo+tWb\{nc?T ђ|c|~811Oh NM0't^GcYv'x=0 zN :GmOKy{;S'DN?@|9# ϳY?g0Hek m;\kWVd > 啪HO4s7#)b|ڔ_a˚}dv% {3:pɵ9" v oZS?H"ŹV-瞼y+=i3OH/G"aCW0ݖy%Ke^o 僨%L}𮔯vJt!Q}AIƓ+*8i,Ri- Af@N\:ӷ;;H|8SVvtyGݞGNj!=@͉?k>ٝAMrjMA3AMl &йϟPN"`Gt=L.E8i o,3v\_bd9ŻZۜ`` w-8pS wfra͓:!9jGt2ī|830b8n蘓/ ܔ_9r;AU;S|O0b2`G2`P珸q%mfEx& ”e