bayestestR/0000755000175000017500000000000014137247742012544 5ustar nileshnileshbayestestR/MD50000644000175000017500000003107214137247742013057 0ustar nileshnilesh5b3c21e316cfbbeb84533d417ac8f49e *DESCRIPTION 2a485493637cf7ee084934fddb79df23 *NAMESPACE 41725781bc57062804413dc9e3dd7eb7 *NEWS.md 1716d7f376368b378e7407dfd216f1a4 *R/area_under_curve.R a20acab068281c522e713aba13986d94 *R/as.list.R 6a9c1d3a9386da5be8ba910d9aea2ba3 *R/backports.R 2c2cc5b34f0e2408c03733ab09f03c83 *R/bayesfactor.R 1bae1d4ce5021f07d160675785119e15 *R/bayesfactor_inclusion.R 709bbfa3c08ac9dea6402c6d9a007bd4 *R/bayesfactor_models.R e17bed63c8c90d8d6c0af868fdd2b4b1 *R/bayesfactor_parameters.R ddfe9d1c85315571d81cf0e15b40fa76 *R/bayesfactor_restricted.R 89684a3bf875cfa869fb0c3f0fcf115a *R/bci.R a95ad7de2b47793d51cc76d488fc869c *R/bic_to_bf.R 3e506868d9735f9c8404a9ad7f673e27 *R/check_prior.R 1d23ffc8c109a4d9c88e3b68eb8ccf1d *R/ci.R a592e018438047ba0877889bb9edb8ef *R/contr.orthonorm.R 5a72d08f7537464a7ef429264332c61a *R/convert_bayesian_to_frequentist.R 484605e2a29dd677c26b111cb17bb0a9 *R/convert_pd_to_p.R 7c34e77a0b043f7a09589fce530fadd2 *R/cwi.R 209ffcbc972299d195680083aaf78684 *R/describe_posterior.R 1eff8c6cbc72b9371278e0ea1ad3f8a6 *R/describe_prior.R bb3f5e807518bfddb8e16f7adb7b9492 *R/diagnostic_draws.R 185465499b7ec9a0b39473ff382b72ae *R/diagnostic_posterior.R fa774bca522443a7b3e47585819fddba *R/distribution.R 40397cac5d420e8242c1aa5ca9ad3e50 *R/effective_sample.R 4730d028c9aa01389807d48fbbe9888a *R/equivalence_test.R ae35bb7edbe9690ba5018095b220380d *R/estimate_density.R 81802dd1894d09f2e3a62a76fee6c50c *R/eti.R cc5c993157949f30479d6bbb4d4dc91e *R/format.R 2b7b49f71e91812412753b1689ad41ed *R/hdi.R 1a9a1722e536fc2c558a0a06ba723e4d *R/map_estimate.R 465826ada4abdc5795c9d06d6aa37591 *R/mcse.R 5a0b976dd9874547a68bb87d7ddce92d *R/mediation.R 1e37348daf1df0ec8adf3cc1a87105b3 *R/model_to_priors.R adccec3c59eb70d5be6bd9e8373730ac *R/overlap.R 4dbbe80d54df598b0b663cfc72ed532f *R/p_direction.R 8ea6ac36e4574a05fdb40c25166291bc *R/p_map.R f40ffc5599235f135113900bd2a10cd4 *R/p_rope.R 5f849a8aeb1321d467cdbe24ab8de75a *R/p_significance.R 59ade3f5bcacc33e3bc9f8fedaede7b7 *R/plot.R dd9c2740ae21c0446617d8cf0a0acfb8 *R/point_estimate.R a2f89839f22564388b0198451b7d31b4 *R/print.R d9eaf32911303e6bbcdd5a1e409b5585 *R/print.bayesfactor_models.R 9299f3b9b247c1f90793f39938a00206 *R/print.equivalence_test.R 3e8998c2b62e370417f9672f21686a54 *R/print.rope.R 4a08124b71fe78cbeba8a1306a9747c4 *R/print_html.R fa9d951d133fcc8ed0457c89d900d798 *R/print_md.R c3424c1dccab80a289d35bb820b3dbc0 *R/reexports.R c37c6f7c96548a577b7a3745b77d0152 *R/reshape_iterations.R 4f43de89f3425bd83001f007d8125302 *R/rope.R 0a640d995cd04d4daef163fccf0efb6a *R/rope_range.R e6393aa1c56dcfe705924454633d2b96 *R/sensitivity_to_prior.R 41cf7506ff48c214adf872c43349ed08 *R/sexit.R 4beb62b5c245d438fa0f30d09703599f *R/sexit_thresholds.R 40c65132b132a4265d9762fb654b1576 *R/si.R fa8ba346fa24c8f589e1e2b1cb8bb3b7 *R/simulate_data.R c9ff1637599339dd3fbea7cc29a15162 *R/simulate_priors.R 0c2fa97cd94d0546d37b0be67e86b0ac *R/simulate_simpson.R 575e3a0cc95bae742cf733e562e3c49c *R/unupdate.R e6c802c1107875bce8b8730424eadf33 *R/utils.R c98aa4520f3c9deffd17f0a3a808cce8 *R/utils_bayesfactor.R 6aedc96ad6b8f0bd211d276583281224 *R/utils_check_collinearity.R 8117073ed9981ca4f4cc32780cbeffaf *R/utils_clean_stan_parameters.R 60064bd2553a426d55406ae563c84424 *R/utils_flatten_list.R 7ed1899c8aca503a7bcadfe2b47a1190 *R/utils_hdi_ci.R 4b61678953c8c2502238304fb730ce38 *R/utils_print_data_frame.R 0ff58a5c4b0ff49b1b9a4b6299a838bd *R/weighted_posteriors.R 3cf29c50754f70f3c670a4d3ca6dbc5d *R/z_old_print_methods.R 4533f523d6cb92676f1d7912e088e29b *R/zzz.R 0e853133c55dfb12613038269c17838b *README.md 59ded16811be6898a46f2904960dacf4 *build/partial.rdb b2844d64a744e9f086f5bf360a42a1a6 *build/vignette.rds c5cfd3e44877e6f8487a7d57c28dd0e2 *inst/CITATION 5734dafa27483f45a41ad704cf87ba3b *inst/WORDLIST 819d09771c8c846f1ee850f395a3067b *inst/doc/bayes_factors.R acd276354ab450a634158652188c392b *inst/doc/bayes_factors.Rmd ce9220b22027378cc1ca24dd5771bd82 *inst/doc/bayes_factors.html b64a5463cd0dab716b01a5eae51c9892 *inst/doc/bayestestR.R 81a300860da94cf3c9f5c652e017a306 *inst/doc/bayestestR.Rmd d50d601a1b127501881bb4ac594edd50 *inst/doc/bayestestR.html f1d4f429e6be3227335eabe13a900d91 *inst/doc/credible_interval.R 5246ceab55788fae7c018ea6bd7e3494 *inst/doc/credible_interval.Rmd 2665d9465fa6747a28f684bdadcba464 *inst/doc/credible_interval.html 89b05a2f8990884f5f16b666dd84649b *inst/doc/example1.R bc5e230bdd58d28ad17c966a9e8dedb6 *inst/doc/example1.Rmd 2051ee6622bfbae94761f8406a5d7ee7 *inst/doc/example1.html 05236cd11f637a23d55c5f9d2da0517e *inst/doc/example2.R 7b427e6f39404c4dd5af7bb0a1eb505f *inst/doc/example2.Rmd 644043b09cde52f09c1727366194716e *inst/doc/example2.html 42142628fc291b2a02186f1eaad9aaf5 *inst/doc/example3.R c7e8dd1950147acaf49350689cc7c504 *inst/doc/example3.Rmd 45c2182de678ae7d14114571af4c690b *inst/doc/example3.html 921edef273c58a7c01e71085fc1c52d9 *inst/doc/guidelines.R f32cd4f5e2af52795d0c08685a6934e2 *inst/doc/guidelines.Rmd 017e9a68215b22ee1e5d058113b58715 *inst/doc/guidelines.html 2fe6ff5260558629de3627ccece9876a *inst/doc/indicesExistenceComparison.R 76a35a6269b0b585814e5ac7856a0a93 *inst/doc/indicesExistenceComparison.Rmd baab52de1416e5005d976390028c6086 *inst/doc/indicesExistenceComparison.html 9285095d0779e34e6ce638e8bb98cb67 *inst/doc/mediation.R a89570ec2d2c7f40e60c9d40119262e0 *inst/doc/mediation.Rmd 78f72f8acb11b1ac8c6c1b96d86c5937 *inst/doc/mediation.html ab676b25ae2723432a7897f0e0d4cb49 *inst/doc/probability_of_direction.R 39bf7498a4642a40b27e7dcbdbe23cc9 *inst/doc/probability_of_direction.Rmd 953ed59aeacd731a4f62b62e0ce39673 *inst/doc/probability_of_direction.html 67f2a4c56cb8473a9c518e0b9ae677c7 *inst/doc/region_of_practical_equivalence.R 1425ba537c6dc8ed90721a761569bcb5 *inst/doc/region_of_practical_equivalence.Rmd 7745cf1c36565fd7d7905fb0e7cc5474 *inst/doc/region_of_practical_equivalence.html 40487b1288ed77673574855fdefe53b3 *man/area_under_curve.Rd 6860290cbdd452ec9f23f98ddf68fb99 *man/as.data.frame.density.Rd 619ee0de20ed8922966245c53c3f1fc4 *man/as.numeric.p_direction.Rd 25cc4b9c2411b102e5842c5e3761b43b *man/bayesfactor.Rd aa20127e2520fce9760b8dcadcacc402 *man/bayesfactor_inclusion.Rd 852158d15214bbf2adb49886e67b801e *man/bayesfactor_models.Rd b1df77ed0cc5c5c99c550a2f93f74a61 *man/bayesfactor_parameters.Rd d6ebea69efacae8496ab79c4141ad67b *man/bayesfactor_restricted.Rd bd9b2b23029b3a97f4b94573c23741a5 *man/bci.Rd ff99a43ea472aadae073739e26451d88 *man/bic_to_bf.Rd 0479200e045e97a59ce65b6c3210826e *man/check_prior.Rd f942cad6b0e159e284cc5e963f8b5097 *man/ci.Rd f49dcab9a18cefae93091a9b7e319810 *man/contr.orthonorm.Rd c278e10689c69d11edc34bd4a32a3cec *man/convert_bayesian_as_frequentist.Rd 6e653deb8b8b059a66d81853eff2174c *man/cwi.Rd fa1543a447e6151aed7ef39a97005d3e *man/density_at.Rd 661364b0ed9c9aefd2e864a34945113d *man/describe_posterior.Rd 1f3f80c6a2e42e6c7b97459242227dc3 *man/describe_prior.Rd 84624cc6895e2139d9760a2bfa930104 *man/diagnostic_draws.Rd 15732b6e59ea0b0e3988e231f71d2489 *man/diagnostic_posterior.Rd dc30418ea1fc35eb0d8bd81a37ff15d8 *man/distribution.Rd 0b1d93b59d19425ddb3a0d40f38210c1 *man/dot-extract_priors_rstanarm.Rd 653ca4d8629dec5456872e45a8b98499 *man/dot-flatten_list.Rd e450b5ed09ce1a54bb53cf57a436a1a5 *man/dot-prior_new_location.Rd 1991efd66189082be157e0b5d706e148 *man/dot-select_nums.Rd a468f325263ed71461d8ef89ce9b6010 *man/effective_sample.Rd 3766e14e7e24ef714bf69f317a7b8520 *man/equivalence_test.Rd 227122b85905ef2fc705c1c6c84f72eb *man/estimate_density.Rd 7b13eb1c580af15556986dcf819e6bec *man/eti.Rd eb7d0167ce73dac95391a13ff31fbfbe *man/figures/LetsPokeAPizza.jpg 362eaa68b9115844595da6fe57f08030 *man/figures/YodaBayes.jpg a80f4c3f7502858b85397e379054d38d *man/figures/banner.png 2f2da441cb0c97f9c4f9f61d7d011700 *man/figures/bayesianMaster.jpg 7ca42d46088d8172b09e716640c76a23 *man/figures/deathsticks.jpg 27e0ea3ff40617aff2e5f74afd47970c *man/figures/logo.png 7aa881552ec531b236310e248fab559b *man/figures/profsanders.jpg 5eb38f24b401c23eddd6342f9448571c *man/figures/unnamed-chunk-13-1.png 6c4755f5b04e5e09a9041ddb1904f091 *man/figures/unnamed-chunk-15-1.png 263baa325ef8475183ab719268ef83ed *man/figures/unnamed-chunk-17-1.png 1b0acc1be578247c2a1350be5f186262 *man/figures/unnamed-chunk-8-1.png c5db101c496becbf06ea1be104212630 *man/figures/unnamed-chunk-9-1.png e7261b36a36d8a7dd7f12106c32473dc *man/figures/watto.jpg 2b01621e01694b161fa638e289fc63f1 *man/hdi.Rd 67c325de2de6db7cdf53ab3424c4c65e *man/map_estimate.Rd dc7b77180b7f78430849dba7efbc69a9 *man/mcse.Rd 3b234115bba992fd483ab3181c20833e *man/mediation.Rd 79d15a5bdc95a3d9861f7d70c438ac01 *man/model_to_priors.Rd 4d4617709255a7b19da590a89c0ededc *man/overlap.Rd 033f20dbbd8adb19298afaa149dd0847 *man/p_direction.Rd 7c92dde3d669aab5e827b5bf1b47c539 *man/p_map.Rd ad0337cf7666d3f5ef76b99e7e06e540 *man/p_rope.Rd beab3ccd6ea40831ba8d050a93eacb4a *man/p_significance.Rd cfd901e16ebdfb40af423676f40b673f *man/pd_to_p.Rd 2ccadd2476495eff0f17c6cddde177a9 *man/point_estimate.Rd c5e00860cbc69810f44890e13a46446d *man/reexports.Rd 74650a20a0b6f0c0916c516a1096e792 *man/reshape_iterations.Rd 4f621abcd89d229f767a74f9829222f5 *man/rope.Rd 917226251909a14e78da176759b8a985 *man/rope_range.Rd ce48412dc0edce650696196360f78baf *man/sensitivity_to_prior.Rd 60c6e4bdd902455d87de2596e6ebaf54 *man/sexit.Rd db38df89481f05fa59cad9277a8db2d1 *man/sexit_thresholds.Rd 8f44ef47a1f27ca2536f55f1e6f7c47a *man/si.Rd 60af0dd02bc2aa849f1e57947ee6f9c0 *man/simulate_correlation.Rd 5a7f8566d743a5d737ced61a2016213c *man/simulate_prior.Rd 75edf873b80d3ad0c322a0b3ac1071b3 *man/simulate_simpson.Rd b9581b70c34ef137c49531d8c8ba4072 *man/unupdate.Rd da2dd13b891636367070a1f5fa979fc6 *man/weighted_posteriors.Rd dbd9bab057163c01c320d0b9d3bb9620 *tests/spelling.R 163821bedd02b4663965364cae72b2a2 *tests/testthat.R 6ec148c5df70f5561c210973da1e38ff *tests/testthat/test-BFBayesFactor.R a9cbb2928bdc3e39c85d40383e12f538 *tests/testthat/test-as.data.frame.density.R 3146db0c4a9c8e1a77e786439360511d *tests/testthat/test-bayesfactor_models.R c6336076808c7a9c3ba0f72126f958fa *tests/testthat/test-bayesfactor_parameters.R 4faafb091df37bc1467f4cbf7524d83b *tests/testthat/test-bayesfactor_restricted.R f52c344362e472b54ce16ea5bac9170e *tests/testthat/test-blavaan.R e154d233ee13471488d2d399fd11872a *tests/testthat/test-brms.R 2023d927275c9cf530bf8ed1ee6788ad *tests/testthat/test-check_prior.R 702479b12181bb48c83456e603b55950 *tests/testthat/test-ci.R 8cfbb3b3a84cc76ad6fac4e191b705a7 *tests/testthat/test-density_at.R d26dd9396e4fe14ac9e3897ac88a8d3d *tests/testthat/test-describe_posterior.R 1640f231f0d4e718f8eaf20aeafaafd1 *tests/testthat/test-describe_prior.R 566da6597f2f8a5444c8a9169c60251a *tests/testthat/test-different_models.R c3e8df73c40ed30691a8e126781b1f14 *tests/testthat/test-distributions.R cbe1f8070298000edbef017009b39b44 *tests/testthat/test-effective_sample.R 29b09d70af2e450cdda9222c8379b25c *tests/testthat/test-emmGrid.R ce909e1c3a81e3c269e4451914588ce0 *tests/testthat/test-estimate_density.R 547fed92d2f8ffdcdc3f9de9c3ff4f07 *tests/testthat/test-hdi.R 20e700eafcbd03ed3cbe6c2226e853d6 *tests/testthat/test-map_estimate.R 0b7105cb674357eb204fe98458f38678 *tests/testthat/test-overlap.R f9a2729c99ae597ae60f0f06e10e59f6 *tests/testthat/test-p_direction.R 2bc30e839a848ae34cd3b064f0946676 *tests/testthat/test-p_map.R 6ac862072880e8ac2a73ddbf2bec1244 *tests/testthat/test-p_significance.R 3ae1aa1a7fba36be38646447d356a7b3 *tests/testthat/test-point_estimate.R 53df30d14138515aa000489e671c8f0b *tests/testthat/test-rope.R bd8fa2d48aa4304c6229370ecae00e6d *tests/testthat/test-rope_range.R 7e00aadbe3bc9516f1de13215ee20286 *tests/testthat/test-rstanarm.R 5c965fea1e32bc12d99b11b10941d983 *tests/testthat/test-si.R c274339a7219fe3acb6862d94ce1304a *tests/testthat/test-simulate_data.R 3748e635e161a4c30fc5385bf03718f7 *tests/testthat/test-weighted_posteriors.R 9ca941f5f2faa90c7d7c0729e22bc376 *vignettes/apa.csl acd276354ab450a634158652188c392b *vignettes/bayes_factors.Rmd 81a300860da94cf3c9f5c652e017a306 *vignettes/bayestestR.Rmd 6ea86da55c1e19e166fc89268fa45cef *vignettes/bibliography.bib 5246ceab55788fae7c018ea6bd7e3494 *vignettes/credible_interval.Rmd bc5e230bdd58d28ad17c966a9e8dedb6 *vignettes/example1.Rmd 7b427e6f39404c4dd5af7bb0a1eb505f *vignettes/example2.Rmd c7e8dd1950147acaf49350689cc7c504 *vignettes/example3.Rmd f32cd4f5e2af52795d0c08685a6934e2 *vignettes/guidelines.Rmd 76a35a6269b0b585814e5ac7856a0a93 *vignettes/indicesExistenceComparison.Rmd a89570ec2d2c7f40e60c9d40119262e0 *vignettes/mediation.Rmd 39bf7498a4642a40b27e7dcbdbe23cc9 *vignettes/probability_of_direction.Rmd 1425ba537c6dc8ed90721a761569bcb5 *vignettes/region_of_practical_equivalence.Rmd 9ca941f5f2faa90c7d7c0729e22bc376 *vignettes/web_only/apa.csl 6ea86da55c1e19e166fc89268fa45cef *vignettes/web_only/bibliography.bib 3dde761240c05e8c0b0b415af562375f *vignettes/web_only/indicesEstimationComparison.Rmd bayestestR/NEWS.md0000644000175000017500000003232014125235747013641 0ustar nileshnilesh# bayestestR 0.11.1 ## New functions * `describe_posterior()` gains a `plot()` method, which is a short cut for `plot(estimate_density(describe_posterior()))`. # bayestestR 0.11 ## Bug fixes * Fixed issues related to last *brms* update. * Fixed bug in `describe_posterior.BFBayesFactor()` where Bayes factors were missing from out put ( #442 ). # bayestestR 0.10.0 ## Breaking * All Bayes factors are now returned as `log(BF)` (column name `log_BF`). Printing is unaffected. To retrieve the raw BFs, you can run `exp(result$log_BF)`. ## New functions * `bci()` (and its alias `bcai()`) to compute bias-corrected and accelerated bootstrap intervals. Along with this new function, `ci()` and `describe_posterior()` gain a new `ci_method` type, `"bci"`. ## Changes * `contr.bayes` has been renamed *`contr.orthonorm`* to be more explicit about its function. # bayestestR 0.9.0 ## Breaking * The default `ci` width has been changed to 0.95 instead of 0.89 (see [here](https://github.com/easystats/bayestestR/discussions/250)). This should not come as a surprise to the long-time users of `bayestestR` as we have been warning about this impending change for a while now :) * Column names for `bayesfactor_restricted()` are now `p_prior` and `p_posterior` (was `Prior_prob` and `Posterior_prob`), to be consistent with `bayesfactor_inclusion()` output. * Removed the experimental function `mhdior`. ## General * Support for `blavaan` models. * Support for `blrm` models (*rmsb*). * Support for `BGGM` models (*BGGM*). * `check_prior()` and `describe_prior()` should now also work for more ways of prior definition in models from *rstanarm* or *brms*. ## Bug fixes * Fixed bug in `print()` method for the `mediation()` function. * Fixed remaining inconsistencies with CI values, which were not reported as fraction for `rope()`. * Fixed issues with special prior definitions in `check_prior()`, `describe_prior()` and `simulate_prior()`. # bayestestR 0.8.2 ## General * Support for `bamlss` models. * Roll-back R dependency to R >= 3.4. ## Changes to functions * All `.stanreg` methods gain a `component` argument, to also include auxiliary parameters. ## Bug fixes * `bayesfactor_parameters()` no longer errors for no reason when computing extremely un/likely direction hypotheses. * `bayesfactor_pointull()` / `bf_pointull()` are now `bayesfactor_pointnull()` / `bf_pointnull()` (can *you* spot the difference? #363 ). # bayestestR 0.8.0 ## New functions * `sexit()`, a function for sequential effect existence and significance testing (SEXIT). ## General * Added startup-message to warn users that default ci-width might change in a future update. * Added support for *mcmc.list* objects. ## Bug fixes * `unupdate()` gains a `newdata` argument to work with `brmsfit_multiple` models. * Fixed issue in Bayes factor vignette (don't evaluate code chunks if packages not available). # bayestestR 0.7.5 ## New functions * Added `as.matrix()` function for `bayesfactor_model` arrays. * `unupdate()`, a utility function to get Bayesian models un-fitted from the data, representing the priors only. ## Changes to functions * `ci()` supports `emmeans` - both Bayesian and frequentist ( #312 - cross fix with `parameters`) ## Bug fixes * Fixed issue with *default* rope range for `BayesFactor` models. * Fixed issue in collinearity-check for `rope()` for models with less than two parameters. * Fixed issue in print-method for `mediation()` with `stanmvreg`-models, which displays the wrong name for the response-value. * Fixed issue in `effective_sample()` for models with only one parameter. * `rope_range()` for `BayesFactor` models returns non-`NA` values ( #343 ) # bayestestR 0.7.2 ## New functions - `mediation()`, to compute average direct and average causal mediation effects of multivariate response models (`brmsfit`, `stanmvreg`). ## Bug fixes - `bayesfactor_parameters()` works with `R<3.6.0`. # bayestestR 0.7.0 ## General - Preliminary support for *stanfit* objects. - Added support for *bayesQR* objects. ## Changes to functions - `weighted_posteriors()` can now be used with data frames. - Revised `print()` for `describe_posterior()`. - Improved value formatting for Bayesfactor functions. ## Bug fixes - Link transformation are now taken into account for `emmeans` objets. E.g., in `describe_posterior()`. - Fix `diagnostic_posterior()` when algorithm is not "sampling". - Minor revisions to some documentations. - Fix CRAN check issues for win-old-release. # bayestestR 0.6.0 ## Changes to functions - `describe_posterior()` now also works on `effectsize::standardize_posteriors()`. - `p_significance()` now also works on `parameters::simulate_model()`. - `rope_range()` supports more (frequentis) models. ## Bug fixes - Fixed issue with `plot()` `data.frame`-methods of `p_direction()` and `equivalence_test()`. - Fix check issues for forthcoming insight-update. # bayestestR 0.5.3 ## General - Support for *bcplm* objects (package **cplm**) ## Changes to functions - `estimate_density()` now also works on grouped data frames. ## Bug fixes - Fixed bug in `weighted_posteriors()` to properly weight Intercept-only `BFBayesFactor` models. - Fixed bug in `weighted_posteriors()` when models have very low posterior probability ( #286 ). - Fixed bug in `describe_posterior()`, `rope()` and `equivalence_test()` for *brmsfit* models with monotonic effect. - Fixed issues related to latest changes in `as.data.frame.brmsfit()` from the *brms* package. # bayestestR 0.5.0 ## General - Added `p_pointnull()` as an alias to `p_MAP()`. - Added `si()` function to compute support intervals. - Added `weighted_posteriors()` for generating posterior samples averaged across models. - Added `plot()`-method for `p_significance()`. - `p_significance()` now also works for *brmsfit*-objects. - `estimate_density()` now also works for *MCMCglmm*-objects. - `equivalence_test()` gets `effects` and `component` arguments for *stanreg* and *brmsfit* models, to print specific model components. - Support for *mcmc* objects (package **coda**) - Provide more distributions via `distribution()`. - Added `distribution_tweedie()`. - Better handling of `stanmvreg` models for `describe_posterior()`, `diagnostic_posterior()` and `describe_prior()`. ## Breaking changes - `point_estimate()`: argument `centrality` default value changed from 'median' to 'all'. - `p_rope()`, previously as exploratory index, was renamed as `mhdior()` (for *Max HDI inside/outside ROPE*), as `p_rope()` will refer to `rope(..., ci = 1)` ( #258 ) ## Bug fixes - Fixed mistake in description of `p_significance()`. - Fixed error when computing BFs with `emmGrid` based on some non-linear models ( #260 ). - Fixed wrong output for percentage-values in `print.equivalence_test()`. - Fixed issue in `describe_posterior()` for `BFBayesFactor`-objects with more than one model. # bayestestR 0.4.0 ## New functions / features - `convert_bayesian_to_frequentist()` Convert (refit) Bayesian model as frequentist - `distribution_binomial()` for perfect binomial distributions - `simulate_ttest()` Simulate data with a mean difference - `simulate_correlation()` Simulate correlated datasets - `p_significance()` Compute the probability of Practical Significance (ps) - `overlap()` Compute overlap between two empirical distributions - `estimate_density()`: `method = "mixture"` argument added for mixture density estimation ## Bug fixes - Fixed bug in `simulate_prior()` for stanreg-models when `autoscale` was set to `FALSE` # bayestestR 0.3.0 ## General - revised `print()`-methods for functions like `rope()`, `p_direction()`, `describe_posterior()` etc., in particular for model objects with random effects and/or zero-inflation component ## New functions / features - `check_prior()` to check if prior is informative - `simulate_prior()` to simulate model's priors as distributions - `distribution_gamma()` to generate a (near-perfect or random) Gamma distribution - `contr.bayes` function for orthogonal factor coding (implementation from Singmann & Gronau's [`bfrms`](https://github.com/bayesstuff/bfrms/), used for proper prior estimation when factor have 3 levels or more. See Bayes factor vignette ## Changes to functions - Added support for `sim`, `sim.merMod` (from `arm::sim()`) and `MCMCglmm`-objects to many functions (like `hdi()`, `ci()`, `eti()`, `rope()`, `p_direction()`, `point_estimate()`, ...) - `describe_posterior()` gets an `effects` and `component` argument, to include the description of posterior samples from random effects and/or zero-inflation component. - More user-friendly warning for non-supported models in `bayesfactor()`-methods ## Bug fixes - Fixed bug in `bayesfactor_inclusion()` where the same interaction sometimes appeared more than once (#223) - Fixed bug in `describe_posterior()` for *stanreg* models fitted with fullrank-algorithm # bayestestR 0.2.5 ## Breaking changes - `rope_range()` for binomial model has now a different default (-.18; .18 ; instead of -.055; .055) - `rope()`: returns a proportion (between 0 and 1) instead of a value between 0 and 100 - `p_direction()`: returns a proportion (between 0.5 and 1) instead of a value between 50 and 100 ([#168](https://github.com/easystats/bayestestR/issues/168)) - `bayesfactor_savagedickey()`: `hypothesis` argument replaced by `null` as part of the new `bayesfactor_parameters()` function ## New functions / features - `density_at()`, `p_map()` and `map_estimate()`: `method` argument added - `rope()`: `ci_method` argument added - `eti()`: Computes equal-tailed intervals - `reshape_ci()`: Reshape CIs between wide/long - `bayesfactor_parameters()`: New function, replacing `bayesfactor_savagedickey()`, allows for computing Bayes factors against a *point-null* or an *interval-null* - `bayesfactor_restricted()`: Function for computing Bayes factors for order restricted models ## Minor changes ## Bug fixes - `bayesfactor_inclusion()` now works with `R < 3.6`. # bayestestR 0.2.2 ## Breaking changes - `equivalence_test()`: returns capitalized output (e.g., `Rejected` instead of `rejected`) - `describe_posterior.numeric()`: `dispersion` defaults to `FALSE` for consistency with the other methods ## New functions / features - `pd_to_p()` and `p_to_pd()`: Functions to convert between probability of direction (pd) and p-value - Support of `emmGrid` objects: `ci()`, `rope()`, `bayesfactor_savagedickey()`, `describe_posterior()`, ... ## Minor changes - Improved tutorial 2 ## Bug fixes - `describe_posterior()`: Fixed column order restoration - `bayesfactor_inclusion()`: Inclusion BFs for matched models are more inline with JASP results. # bayestestR 0.2.0 ## Breaking changes - plotting functions now require the installation of the `see` package - `estimate` argument name in `describe_posterior()` and `point_estimate()` changed to `centrality` - `hdi()`, `ci()`, `rope()` and `equivalence_test()` default `ci` to `0.89` - `rnorm_perfect()` deprecated in favour of `distribution_normal()` - `map_estimate()` now returns a single value instead of a dataframe and the `density` parameter has been removed. The MAP density value is now accessible via `attributes(map_output)$MAP_density` ## New functions / features - `describe_posterior()`, `describe_prior()`, `diagnostic_posterior()`: added wrapper function - `point_estimate()` added function to compute point estimates - `p_direction()`: new argument `method` to compute pd based on AUC - `area_under_curve()`: compute AUC - `distribution()` functions have been added - `bayesfactor_savagedickey()`, `bayesfactor_models()` and `bayesfactor_inclusion()` functions has been added - Started adding plotting methods (currently in the [`see`](https://github.com/easystats/see) package) for `p_direction()` and `hdi()` - `probability_at()` as alias for `density_at()` - `effective_sample()` to return the effective sample size of Stan-models - `mcse()` to return the Monte Carlo standard error of Stan-models ## Minor changes - Improved documentation - Improved testing - `p_direction()`: improved printing - `rope()` for model-objects now returns the HDI values for all parameters as attribute in a consistent way - Changes legend-labels in `plot.equivalence_test()` to align plots with the output of the `print()`-method (#78) ## Bug fixes - `hdi()` returned multiple class attributes (#72) - Printing results from `hdi()` failed when `ci`-argument had fractional parts for percentage values (e.g. `ci = .995`). - `plot.equivalence_test()` did not work properly for *brms*-models (#76). # bayestestR 0.1.0 - CRAN initial publication and [0.1.0 release](https://github.com/easystats/bayestestR/releases/tag/v0.1.0) - Added a `NEWS.md` file to track changes to the package bayestestR/DESCRIPTION0000644000175000017500000001046214137247742014255 0ustar nileshnileshType: Package Package: bayestestR Title: Understand and Describe Bayesian Models and Posterior Distributions Version: 0.11.5 Authors@R: c(person(given = "Dominique", family = "Makowski", role = c("aut", "cre"), email = "dom.makowski@gmail.com", comment = c(ORCID = "0000-0001-5375-9967", Twitter = "@Dom_Makowski")), person(given = "Daniel", family = "Lüdecke", role = "aut", email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206", Twitter = "@strengejacke")), person(given = "Mattan S.", family = "Ben-Shachar", role = "aut", email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801", Twitter = "@mattansb")), person(given = "Indrajeet", family = "Patil", role = "aut", email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@patilindrajeets")), person(given = "Michael D.", family = "Wilson", role = "aut", email = "michael.d.wilson@curtin.edu.au", comment = c(ORCID = "0000-0003-4143-7308")), person(given = "Brenton M.", family = "Wiernik", role = "aut", email = "brenton@wiernik.org", comment = c(ORCID = "0000-0001-9560-6336", Twitter = "@bmwiernik")), person(given = "Paul-Christian", family = "Bürkner", role = "rev", email = "paul.buerkner@gmail.com"), person(given = "Tristan", family = "Mahr", role = "rev", email = "tristan.mahr@wisc.edu", comment = c(ORCID = "0000-0002-8890-5116")), person(given = "Henrik", family = "Singmann", role = "ctb", email = "singmann@gmail.com", comment = c(ORCID = "0000-0002-4842-3657")), person(given = "Quentin F.", family = "Gronau", role = "ctb", comment = c(ORCID = "0000-0001-5510-6943")), person(given = "Sam", family = "Crawley", role = "ctb", email = "sam@crawley.nz", comment = c(ORCID = "0000-0002-7847-0411"))) Maintainer: Dominique Makowski Description: Provides utilities to describe posterior distributions and Bayesian models. It includes point-estimates such as Maximum A Posteriori (MAP), measures of dispersion (Highest Density Interval - HDI; Kruschke, 2015 ) and indices used for null-hypothesis testing (such as ROPE percentage, pd and Bayes factors). Depends: R (>= 3.4) Imports: insight (>= 0.14.1), datawizard (>= 0.2.0), methods, stats, utils Suggests: BayesFactor, bayesQR, blavaan, bridgesampling, brms, dplyr, effectsize, emmeans, GGally, ggdist, ggplot2, ggridges, httr, KernSmooth, knitr, lavaan, lme4, logspline, MASS, mclust, mediation, modelbased, parameters, performance, rmarkdown, rstan, rstanarm, see, spelling, stringr, testthat, tidyr, tweedie License: GPL-3 URL: https://easystats.github.io/bayestestR/ BugReports: https://github.com/easystats/bayestestR/issues VignetteBuilder: knitr Encoding: UTF-8 Language: en-US RoxygenNote: 7.1.2 Config/testthat/edition: 3 NeedsCompilation: no Packaged: 2021-10-26 03:12:03 UTC; domma Author: Dominique Makowski [aut, cre] (, @Dom_Makowski), Daniel Lüdecke [aut] (, @strengejacke), Mattan S. Ben-Shachar [aut] (, @mattansb), Indrajeet Patil [aut] (, @patilindrajeets), Michael D. Wilson [aut] (), Brenton M. Wiernik [aut] (, @bmwiernik), Paul-Christian Bürkner [rev], Tristan Mahr [rev] (), Henrik Singmann [ctb] (), Quentin F. Gronau [ctb] (), Sam Crawley [ctb] () Repository: CRAN Date/Publication: 2021-10-30 14:00:02 UTC bayestestR/README.md0000644000175000017500000005240314133140641014011 0ustar nileshnilesh # bayestestR [![DOI](https://joss.theoj.org/papers/10.21105/joss.01541/status.svg)](https://doi.org/10.21105/joss.01541) [![downloads](http://cranlogs.r-pkg.org/badges/bayestestR)](https://cran.r-project.org/package=bayestestR) [![total](https://cranlogs.r-pkg.org/badges/grand-total/bayestestR)](https://cranlogs.r-pkg.org/) ***Become a Bayesian master you will*** ------------------------------------------------------------------------ :warning: We changed the default the CI width! Please make an [informed decision](https://easystats.github.io/bayestestR/articles/credible_interval.html) and set it explicitly (`ci = 0.89`, `ci = 0.95` or anything else that you decide) :warning: ------------------------------------------------------------------------ Existing R packages allow users to easily fit a large variety of models and extract and visualize the posterior draws. However, most of these packages only return a limited set of indices (e.g., point-estimates and CIs). **bayestestR** provides a comprehensive and consistent set of functions to analyze and describe posterior distributions generated by a variety of models objects, including popular modeling packages such as **rstanarm**, **brms** or **BayesFactor**. You can reference the package and its documentation as follows: - Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. [10.21105/joss.01541](https://doi.org/10.21105/joss.01541) - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) ## Installation [![CRAN](http://www.r-pkg.org/badges/version/bayestestR)](https://cran.r-project.org/package=bayestestR) ![Tests](https://github.com/easystats/bayestestR/workflows/Tests/badge.svg) Run the following to install the stable release of **bayestestR** from CRAN: ``` r install.packages("bayestestR") ``` Or this one to install the latest development version: ``` r install.packages("remotes") remotes::install_github("easystats/bayestestR") ``` ## Documentation [![Documentation](https://img.shields.io/badge/documentation-bayestestR-orange.svg?colorB=E91E63)](https://easystats.github.io/bayestestR/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-bayestestR-orange.svg?colorB=2196F3)](https://easystats.github.io/bayestestR/reference/index.html) Click on the buttons above to access the package [**documentation**](https://easystats.github.io/bayestestR/) and the [**easystats blog**](https://easystats.github.io/blog/posts/), and check-out these vignettes: #### Tutorials - [Get Started with Bayesian Analysis](https://easystats.github.io/bayestestR/articles/bayestestR.html) - [Example 1: Initiation to Bayesian models](https://easystats.github.io/bayestestR/articles/example1.html) - [Example 2: Confirmation of Bayesian skills](https://easystats.github.io/bayestestR/articles/example2.html) - [Example 3: Become a Bayesian master](https://easystats.github.io/bayestestR/articles/example3.html) #### Articles - [Credible Intervals (CI)](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [Probability of Direction (pd)](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) - [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) - [Bayes Factors (BF)](https://easystats.github.io/bayestestR/articles/bayes_factors.html) - [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) - [Comparison of Indices of Effect Existence](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html) - [Reporting Guidelines](https://easystats.github.io/bayestestR/articles/guidelines.html) # Features In the Bayesian framework, parameters are estimated in a probabilistic fashion as *distributions*. These distributions can be summarised and described by **reporting 4 types of indices**: - [**Centrality**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) - `mean()`, `median()` or [**`map_estimate()`**](https://easystats.github.io/bayestestR/reference/map_estimate.html) for an estimation of the mode. - [**`point_estimate()`**](https://easystats.github.io/bayestestR/reference/point_estimate.html) can be used to get them at once and can be run directly on models. - [**Uncertainty**](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [**`hdi()`**](https://easystats.github.io/bayestestR/reference/hdi.html) for *Highest Density Intervals (HDI)* or [**`eti()`**](https://easystats.github.io/bayestestR/reference/eti.html) for *Equal-Tailed Intervals (ETI)*. - [**`ci()`**](https://easystats.github.io/bayestestR/reference/ci.html) can be used as a general method for Confidence and Credible Intervals (CI). - [**Effect Existence**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether an effect is different from 0. - [**`p_direction()`**](https://easystats.github.io/bayestestR/reference/p_direction.html) for a Bayesian equivalent of the frequentist *p*-value (see [Makowski et al., 2019](https://doi.org/10.3389/fpsyg.2019.02767)) - [**`p_pointnull()`**](https://easystats.github.io/bayestestR/reference/p_map.html) represents the odds of null hypothesis (*h0 = 0*) compared to the most likely hypothesis (the MAP). - [**`bf_pointnull()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) for a classic *Bayes Factor (BF)* assessing the likelihood of effect presence against its absence (*h0 = 0*). - [**Effect Significance**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether the effect size can be considered as non-negligible. - [**`p_rope()`**](https://easystats.github.io/bayestestR/reference/p_rope.html) is the probability of the effect falling inside a [*Region of Practical Equivalence (ROPE)*](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). - [**`bf_rope()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes a Bayes factor against the null as defined by a region (the ROPE). - [**`p_significance()`**](https://easystats.github.io/bayestestR/reference/p_significance.html) that combines a region of equivalence with the probability of direction. [**`describe_posterior()`**](https://easystats.github.io/bayestestR/reference/describe_posterior.html) is the master function with which you can compute all of the indices cited below at once. ``` r describe_posterior( rnorm(10000), centrality = "median", test = c("p_direction", "p_significance") ) ## Summary of Posterior Distribution ## ## Parameter | Median | 95% CI | pd | ps ## -------------------------------------------------- ## Posterior | -0.01 | [-2.01, 1.93] | 50.31% | 0.46 ``` `describe_posterior()` works for many objects, including more complex *brmsfit*-models. For better readability, the output is separated by model components: ``` r zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") set.seed(123) model <- brm( bf( count ~ child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = zinb, family = zero_inflated_poisson(), chains = 1, iter = 500 ) describe_posterior( model, effects = "all", component = "all", test = c("p_direction", "p_significance"), centrality = "all" ) ``` ## Summary of Posterior Distribution ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## -------------------------------------------------------------------------------------- ## (Intercept) | 0.96 | 0.96 | 0.96 | [-0.64, 2.74] | 90.00% | 0.88 | 1.011 | 110.00 ## child | -1.16 | -1.16 | -1.16 | [-1.39, -0.97] | 100% | 1.00 | 0.996 | 278.00 ## camper | 0.73 | 0.72 | 0.73 | [ 0.51, 0.89] | 100% | 1.00 | 0.996 | 271.00 ## ## # Fixed effects (zero-inflated) ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## -------------------------------------------------------------------------------------- ## (Intercept) | -0.48 | -0.51 | -0.22 | [-1.75, 1.16] | 78.00% | 0.73 | 0.997 | 138.00 ## child | 1.85 | 1.86 | 1.81 | [ 1.28, 2.61] | 100% | 1.00 | 0.996 | 303.00 ## camper | -0.88 | -0.86 | -0.99 | [-1.61, -0.04] | 98.40% | 0.96 | 0.996 | 292.00 ## ## # Random effects (conditional) Intercept: persons ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## --------------------------------------------------------------------------------------- ## persons.1 | -0.99 | -1.01 | -0.84 | [-2.71, 0.78] | 92.00% | 0.90 | 1.007 | 106.00 ## persons.2 | -4.65e-03 | -0.04 | 0.03 | [-1.73, 1.54] | 50.00% | 0.45 | 1.013 | 109.00 ## persons.3 | 0.69 | 0.66 | 0.69 | [-1.06, 2.28] | 79.60% | 0.78 | 1.010 | 114.00 ## persons.4 | 1.57 | 1.56 | 1.56 | [-0.22, 3.12] | 96.80% | 0.96 | 1.009 | 114.00 ## ## # Random effects (zero-inflated) Intercept: persons ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ------------------------------------------------------------------------------------ ## persons.1 | 1.10 | 1.11 | 1.08 | [-0.29, 2.73] | 94.80% | 0.93 | 0.997 | 166.00 ## persons.2 | 0.18 | 0.18 | 0.22 | [-0.94, 1.60] | 63.20% | 0.54 | 0.996 | 154.00 ## persons.3 | -0.30 | -0.31 | -0.54 | [-1.54, 1.17] | 64.00% | 0.59 | 0.997 | 154.00 ## persons.4 | -1.45 | -1.46 | -1.44 | [-3.03, -0.13] | 98.00% | 0.97 | 1.000 | 189.00 ## ## # Random effects (conditional) SD/Cor: persons ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ---------------------------------------------------------------------------------- ## (Intercept) | 1.42 | 1.58 | 1.07 | [ 0.60, 3.07] | 100% | 1.00 | 1.010 | 126.00 ## ## # Random effects (zero-inflated) SD/Cor: persons ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ---------------------------------------------------------------------------------- ## (Intercept) | 1.30 | 1.49 | 0.99 | [ 0.47, 2.90] | 100% | 1.00 | 0.996 | 129.00 *bayestestR* also includes [**many other features**](https://easystats.github.io/bayestestR/reference/index.html) useful for your Bayesian analsyes. Here are some more examples: ## Point-estimates ``` r library(bayestestR) posterior <- distribution_gamma(10000, 1.5) # Generate a skewed distribution centrality <- point_estimate(posterior) # Get indices of centrality centrality ## Point Estimate ## ## Median | Mean | MAP ## -------------------- ## 1.18 | 1.50 | 0.51 ``` As for other [**easystats**](https://github.com/easystats) packages, `plot()` methods are available from the [**see**](https://easystats.github.io/see/) package for many functions: ![](man/figures/unnamed-chunk-9-1.png) While the **median** and the **mean** are available through base R functions, [**`map_estimate()`**](https://easystats.github.io/bayestestR/reference/map_estimate.html) in *bayestestR* can be used to directly find the **Highest Maximum A Posteriori (MAP)** estimate of a posterior, *i.e.*, the value associated with the highest probability density (the “peak” of the posterior distribution). In other words, it is an estimation of the *mode* for continuous parameters. ## Uncertainty (CI) [**`hdi()`**](https://easystats.github.io/bayestestR/reference/hdi.html) computes the **Highest Density Interval (HDI)** of a posterior distribution, i.e., the interval which contains all points within the interval have a higher probability density than points outside the interval. The HDI can be used in the context of Bayesian posterior characterization as **Credible Interval (CI)**. Unlike equal-tailed intervals (see [`eti()`](https://easystats.github.io/bayestestR/reference/eti.html)) that typically exclude 2.5% from each tail of the distribution, the HDI is *not* equal-tailed and therefore always includes the mode(s) of posterior distributions. By default, `hdi()` returns the 89% intervals (`ci = 0.89`), deemed to be more stable than, for instance, 95% intervals. An effective sample size of at least 10.000 is recommended if 95% intervals should be computed (**kruschke2015doing?**). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable 95% threshold (McElreath, 2018). ``` r posterior <- distribution_chisquared(10000, 4) hdi(posterior, ci = .89) ## 89% HDI: [0.18, 7.63] eti(posterior, ci = .89) ## 89% ETI: [0.75, 9.25] ``` ## Existence and Significance Testing ### Probability of Direction (*pd*) [**`p_direction()`**](https://easystats.github.io/bayestestR/reference/p_direction.html) computes the **Probability of Direction** (***p*d**, also known as the Maximum Probability of Effect - *MPE*). It varies between 50% and 100% (*i.e.*, `0.5` and `1`) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median’s sign. Although differently expressed, this index is fairly similar (*i.e.*, is strongly correlated) to the frequentist ***p*-value**. **Relationship with the p-value**: In most cases, it seems that the *pd* corresponds to the frequentist one-sided *p*-value through the formula `p-value = (1-pd/100)` and to the two-sided *p*-value (the most commonly reported) through the formula `p-value = 2*(1-pd/100)`. Thus, a `pd` of `95%`, `97.5%` `99.5%` and `99.95%` corresponds approximately to a two-sided *p*-value of respectively `.1`, `.05`, `.01` and `.001`. See the [*reporting guidelines*](https://easystats.github.io/bayestestR/articles/guidelines.html). ``` r posterior <- distribution_normal(10000, 0.4, 0.2) p_direction(posterior) ## Probability of Direction: 0.98 ``` ![](man/figures/unnamed-chunk-13-1.png) ### ROPE [**`rope()`**](https://easystats.github.io/bayestestR/reference/rope.html) computes the proportion (in percentage) of the HDI (default to the 89% HDI) of a posterior distribution that lies within a region of practical equivalence. Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are *equivalent to the null* value for practical purposes (**kruschke2018rejecting?**). Kruschke suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as `0 +/- .1 * sd(y)`. This ROPE range can be automatically computed for models using the [rope_range](https://easystats.github.io/bayestestR/reference/rope_range.html) function. Kruschke suggests using the proportion of the 95% (or 90%, considered more stable) HDI that falls within the ROPE as an index for “null-hypothesis” testing (as understood under the Bayesian framework, see [equivalence_test](https://easystats.github.io/bayestestR/reference/equivalence_test.html)). ``` r posterior <- distribution_normal(10000, 0.4, 0.2) rope(posterior, range = c(-0.1, 0.1)) ## # Proportion of samples inside the ROPE [-0.10, 0.10]: ## ## inside ROPE ## ----------- ## 4.41 % ``` ![](man/figures/unnamed-chunk-15-1.png) ### Bayes Factor [**`bayesfactor_parameters()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes Bayes factors against the null (either a point or an interval), bases on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null; When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers, Lodewyckx, Kuriyal, & Grasman, 2010). ``` r prior <- distribution_normal(10000, mean = 0, sd = 1) posterior <- distribution_normal(10000, mean = 1, sd = 0.7) bayesfactor_parameters(posterior, prior, direction = "two-sided", null = 0) ## Bayes Factor (Savage-Dickey density ratio) ## ## BF ## ---- ## 1.95 ## ## * Evidence Against The Null: 0 ``` ![](man/figures/unnamed-chunk-17-1.png) *The lollipops represent the density of a point-null on the prior distribution (the blue lollipop on the dotted distribution) and on the posterior distribution (the red lollipop on the yellow distribution). The ratio between the two - the Savage-Dickey ratio - indicates the degree by which the mass of the parameter distribution has shifted away from or closer to the null.* For more info, see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). ## Utilities ### Find ROPE’s appropriate range [**`rope_range()`**](https://easystats.github.io/bayestestR/reference/rope_range.html): This function attempts at automatically finding suitable “default” values for the Region Of Practical Equivalence (ROPE). Kruschke (2018) suggests that such null value could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988), which can be generalised for linear models to `-0.1 * sd(y), 0.1 * sd(y)`. For logistic models, the parameters expressed in log odds ratio can be converted to standardized difference through the formula `sqrt(3)/pi`, resulting in a range of `-0.05` to `0.05`. ``` r rope_range(model) ``` ### Density Estimation [**`estimate_density()`**](https://easystats.github.io/bayestestR/reference/estimate_density.html): This function is a wrapper over different methods of density estimation. By default, it uses the base R `density` with by default uses a different smoothing bandwidth (`"SJ"`) from the legacy default implemented the base R `density` function (`"nrd0"`). However, Deng & Wickham suggest that `method = "KernSmooth"` is the fastest and the most accurate. ### Perfect Distributions [**`distribution()`**](https://easystats.github.io/bayestestR/reference/distribution.html): Generate a sample of size n with near-perfect distributions. ``` r distribution(n = 10) ## [1] -1.28 -0.88 -0.59 -0.34 -0.11 0.11 0.34 0.59 0.88 1.28 ``` ### Probability of a Value [**`density_at()`**](https://easystats.github.io/bayestestR/reference/density_at.html): Compute the density of a given point of a distribution. ``` r density_at(rnorm(1000, 1, 1), 1) ## [1] 0.39 ``` # References
Kruschke, J. K., & Liddell, T. M. (2018). The bayesian new statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a bayesian perspective. *Psychonomic Bulletin & Review*, *25*(1), 178–206.
McElreath, R. (2018). *Statistical rethinking: A bayesian course with examples in r and stan*. Chapman; Hall/CRC.
Wagenmakers, E.-J., Lodewyckx, T., Kuriyal, H., & Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the savage–dickey method. *Cognitive Psychology*, *60*(3), 158–189.
bayestestR/man/0000755000175000017500000000000014133414607013307 5ustar nileshnileshbayestestR/man/simulate_simpson.Rd0000644000175000017500000000233314101116425017162 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_simpson.R \name{simulate_simpson} \alias{simulate_simpson} \title{Simpson's paradox dataset simulation} \usage{ simulate_simpson( n = 100, r = 0.5, groups = 3, difference = 1, group_prefix = "G_" ) } \arguments{ \item{n}{The number of observations for each group to be generated (minimum 4).} \item{r}{A value or vector corresponding to the desired correlation coefficients.} \item{groups}{Number of groups (groups can be participants, clusters, anything).} \item{difference}{Difference between groups.} \item{group_prefix}{The prefix of the group name (e.g., "G_1", "G_2", "G_3", ...).} } \value{ A dataset. } \description{ Simpson's paradox, or the Yule-Simpson effect, is a phenomenon in probability and statistics, in which a trend appears in several different groups of data but disappears or reverses when these groups are combined. } \examples{ data <- simulate_simpson(n = 10, groups = 5, r = 0.5) if (require("ggplot2")) { ggplot(data, aes(x = V1, y = V2)) + geom_point(aes(color = Group)) + geom_smooth(aes(color = Group), method = "lm") + geom_smooth(method = "lm") } } bayestestR/man/dot-flatten_list.Rd0000644000175000017500000000061013636776614017067 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils_flatten_list.R \name{.flatten_list} \alias{.flatten_list} \title{Flatten a list} \usage{ .flatten_list(object, name = "name") } \arguments{ \item{object}{A list.} \item{name}{Name of column of keys in the case the output is a dataframe.} } \description{ Flatten a list } \keyword{internal} bayestestR/man/rope_range.Rd0000644000175000017500000000552014101116425015711 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope_range.R \name{rope_range} \alias{rope_range} \alias{rope_range.default} \title{Find Default Equivalence (ROPE) Region Bounds} \usage{ rope_range(x, ...) \method{rope_range}{default}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A \code{stanreg}, \code{brmsfit} or \code{BFBayesFactor} object.} \item{...}{Currently not used.} \item{verbose}{Toggle warnings.} } \description{ This function attempts at automatically finding suitable "default" values for the Region Of Practical Equivalence (ROPE). } \details{ \cite{Kruschke (2018)} suggests that the region of practical equivalence could be set, by default, to a range from \code{-0.1} to \code{0.1} of a standardized parameter (negligible effect size according to Cohen, 1988). \itemize{ \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{-0.1 * SDy, 0.1 * SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}.\preformatted{\\item For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a range of `-0.18` to `0.18`. \\item For other models with **binary outcome**, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. \\item For models from **count data**, the residual variance is used. This is a rather experimental threshold and is probably often similar to `-0.1, 0.1`, but should be used with care! \\item For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). \\item For **correlations**, `-0.05, 0.05` is used, i.e., half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. \\item For all other models, `-0.1, 0.1` is used to determine the ROPE limits, but it is strongly advised to specify it manually. } } } \examples{ \dontrun{ if (require("rstanarm")) { model <- stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 ) rope_range(model) model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) rope_range(model) } if (require("brms")) { model <- brm(mpg ~ wt + cyl, data = mtcars) rope_range(model) } if (require("BayesFactor")) { model <- ttestBF(mtcars[mtcars$vs == 1, "mpg"], mtcars[mtcars$vs == 0, "mpg"]) rope_range(model) model <- lmBF(mpg ~ vs, data = mtcars) rope_range(model) } } } \references{ Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. } bayestestR/man/simulate_correlation.Rd0000644000175000017500000000436614057264527020044 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_data.R \name{simulate_correlation} \alias{simulate_correlation} \alias{simulate_ttest} \alias{simulate_difference} \title{Data Simulation} \usage{ simulate_correlation(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) simulate_ttest(n = 100, d = 0.5, names = NULL, ...) simulate_difference(n = 100, d = 0.5, names = NULL, ...) } \arguments{ \item{n}{The number of observations to be generated.} \item{r}{A value or vector corresponding to the desired correlation coefficients.} \item{mean}{A value or vector corresponding to the mean of the variables.} \item{sd}{A value or vector corresponding to the SD of the variables.} \item{names}{A character vector of desired variable names.} \item{...}{Arguments passed to or from other methods.} \item{d}{A value or vector corresponding to the desired difference between the groups.} } \description{ Simulate data with specific characteristics. } \examples{ # Correlation -------------------------------- data <- simulate_correlation(r = 0.5) plot(data$V1, data$V2) cor.test(data$V1, data$V2) summary(lm(V2 ~ V1, data = data)) # Specify mean and SD data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) cor.test(data$V1, data$V2) round(c(mean(data$V1), sd(data$V1)), 1) round(c(mean(data$V2), sd(data$V2)), 1) summary(lm(V2 ~ V1, data = data)) # Generate multiple variables cor_matrix <- matrix(c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) cor(data) summary(lm(y ~ x1, data = data)) # t-test -------------------------------- data <- simulate_ttest(n = 30, d = 0.3) plot(data$V1, data$V0) round(c(mean(data$V1), sd(data$V1)), 1) diff(t.test(data$V1 ~ data$V0)$estimate) summary(lm(V1 ~ V0, data = data)) summary(glm(V0 ~ V1, data = data, family = "binomial")) # Difference -------------------------------- data <- simulate_difference(n = 30, d = 0.3) plot(data$V1, data$V0) round(c(mean(data$V1), sd(data$V1)), 1) diff(t.test(data$V1 ~ data$V0)$estimate) summary(lm(V1 ~ V0, data = data)) summary(glm(V0 ~ V1, data = data, family = "binomial")) } bayestestR/man/overlap.Rd0000644000175000017500000000250314101116425015236 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/overlap.R \name{overlap} \alias{overlap} \title{Overlap Coefficient} \usage{ overlap( x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ... ) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of x values.} \item{method_density}{Density estimation method. See \code{\link[=estimate_density]{estimate_density()}}.} \item{method_auc}{Area Under the Curve (AUC) estimation method. See \code{\link[=area_under_curve]{area_under_curve()}}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{...}{Currently not used.} } \description{ A method to calculate the overlap coefficient between two empirical distributions (that can be used as a measure of similarity between two samples). } \examples{ library(bayestestR) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) overlap(x, y) plot(overlap(x, y)) } bayestestR/man/unupdate.Rd0000644000175000017500000000246214023526535015431 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unupdate.R \name{unupdate} \alias{unupdate} \alias{unupdate.stanreg} \alias{unupdate.brmsfit} \alias{unupdate.brmsfit_multiple} \alias{unupdate.blavaan} \title{Un-update Bayesian models to their prior-to-data state} \usage{ unupdate(model, verbose = TRUE, ...) \method{unupdate}{stanreg}(model, verbose = TRUE, ...) \method{unupdate}{brmsfit}(model, verbose = TRUE, ...) \method{unupdate}{brmsfit_multiple}(model, verbose = TRUE, newdata = NULL, ...) \method{unupdate}{blavaan}(model, verbose = TRUE, ...) } \arguments{ \item{model}{A fitted Bayesian model.} \item{verbose}{Toggle warnings.} \item{...}{Not used} \item{newdata}{List of \code{data.frames} to update the model with new data. Required even if the original data should be used.} } \value{ A model un-fitted to the data, representing the prior model. } \description{ As posteriors are priors that have been updated after observing some data, the goal of this function is to un-update the posteriors to obtain models representing the priors. These models can then be used to examine the prior predictive distribution, or to compare priors with posteriors. \cr\cr This function in used internally to compute Bayes factors. } \keyword{internal} bayestestR/man/as.numeric.p_direction.Rd0000644000175000017500000000121714023530470020134 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R, R/p_direction.R, R/p_map.R, % R/p_significance.R \name{as.numeric.map_estimate} \alias{as.numeric.map_estimate} \alias{as.numeric.p_direction} \alias{as.numeric.p_map} \alias{as.numeric.p_significance} \title{Convert to Numeric} \usage{ \method{as.numeric}{map_estimate}(x, ...) \method{as.numeric}{p_direction}(x, ...) \method{as.numeric}{p_map}(x, ...) \method{as.numeric}{p_significance}(x, ...) } \arguments{ \item{x}{object to be coerced or tested.} \item{...}{further arguments passed to or from other methods.} } \description{ Convert to Numeric } bayestestR/man/diagnostic_posterior.Rd0000644000175000017500000001136614101116425020027 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostic_posterior.R \name{diagnostic_posterior} \alias{diagnostic_posterior} \alias{diagnostic_posterior.stanreg} \alias{diagnostic_posterior.stanmvreg} \alias{diagnostic_posterior.brmsfit} \title{Posteriors Sampling Diagnostic} \usage{ diagnostic_posterior(posteriors, diagnostic = c("ESS", "Rhat"), ...) \method{diagnostic_posterior}{stanreg}( posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{diagnostic_posterior}{stanmvreg}( posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{diagnostic_posterior}{brmsfit}( posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{posteriors}{A stanreg or brms model.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{...}{Currently not used.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects from \pkg{mfx}. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned.} } \description{ Extract diagnostic metrics (Effective Sample Size (\code{ESS}), \code{Rhat} and Monte Carlo Standard Error \code{MCSE}). } \details{ \strong{Effective Sample (ESS)} should be as large as possible, although for most applications, an effective sample size greater than 1000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\cite{Kruschke 2015, p182-3}). \cr \cr \strong{Rhat} should be the closest to 1. It should not be larger than 1.1 (\cite{Gelman and Rubin, 1992}) or 1.01 (\cite{Vehtari et al., 2019}). The split Rhat statistic quantifies the consistency of an ensemble of Markov chains. \cr \cr \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. } \examples{ \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm", quietly = TRUE)) { model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) diagnostic_posterior(model) } # brms models # ----------------------------------------------- if (require("brms", quietly = TRUE)) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) diagnostic_posterior(model) } } } \references{ \itemize{ \item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472. \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., \& Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } } bayestestR/man/density_at.Rd0000644000175000017500000000165113672421177015753 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{density_at} \alias{density_at} \title{Density Probability at a Given Value} \usage{ density_at(posterior, x, precision = 2^10, method = "kernel", ...) } \arguments{ \item{posterior}{Vector representing a posterior distribution.} \item{x}{The value of which to get the approximate probability.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} } \description{ Compute the density value at a given point of a distribution (i.e., the value of the \code{y} axis of a value \code{x} of a distribution). } \examples{ library(bayestestR) posterior <- distribution_normal(n = 10) density_at(posterior, 0) density_at(posterior, c(0, 1)) } bayestestR/man/bayesfactor_parameters.Rd0000644000175000017500000002705214101116425020321 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_parameters.R \name{bayesfactor_parameters} \alias{bayesfactor_parameters} \alias{bayesfactor_pointnull} \alias{bayesfactor_rope} \alias{bf_parameters} \alias{bf_pointnull} \alias{bf_rope} \alias{bayesfactor_parameters.numeric} \alias{bayesfactor_parameters.stanreg} \alias{bayesfactor_parameters.brmsfit} \alias{bayesfactor_parameters.blavaan} \alias{bayesfactor_parameters.data.frame} \title{Bayes Factors (BF) for a Single Parameter} \usage{ bayesfactor_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bayesfactor_pointnull( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bayesfactor_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ... ) bf_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bf_pointnull( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bf_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ... ) \method{bayesfactor_parameters}{numeric}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) \method{bayesfactor_parameters}{stanreg}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "smooth_terms", "sigma", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{bayesfactor_parameters}{brmsfit}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "smooth_terms", "sigma", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{bayesfactor_parameters}{blavaan}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) \method{bayesfactor_parameters}{data.frame}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scalar (for point-null) or a range (for a interval-null).} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to and from other methods. (Can be used to pass arguments to internal \code{\link[logspline:logspline]{logspline::logspline()}}.)} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame containing the (log) Bayes factor representing evidence \emph{against} the null. } \description{ This method computes Bayes factors against the null (either a point or an interval), based on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. \cr \cr When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null interval (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers et al., 2010; Heck, 2019). \cr \cr Note that the \code{logspline} package is used for estimating densities and probabilities, and must be installed for the function to work. \cr \cr \code{bayesfactor_pointnull()} and \code{bayesfactor_rope()} are wrappers around \code{bayesfactor_parameters} with different defaults for the null to be tested against (a point and a range, respectively). Aliases of the main functions are prefixed with \verb{bf_*}, like \code{bf_parameters()} or \code{bf_pointnull()}. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors based on prior and posterior distributions. \subsection{One-sided & Dividing Tests (setting an order restriction)}{ One sided tests (controlled by \code{direction}) are conducted by restricting the prior and posterior of the non-null values (the "alternative") to one side of the null only (\cite{Morey & Wagenmakers, 2014}). For example, if we have a prior hypothesis that the parameter should be positive, the alternative will be restricted to the region to the right of the null (point or interval). For example, for a Bayes factor comparing the "null" of \code{0-0.1} to the alternative \verb{>0.1}, we would set \code{bayesfactor_parameters(null = c(0, 0.1), direction = ">")}. \cr\cr It is also possible to compute a Bayes factor for \strong{dividing} hypotheses - that is, for a null and alternative that are complementary, opposing one-sided hypotheses (\cite{Morey & Wagenmakers, 2014}). For example, for a Bayes factor comparing the "null" of \verb{<0} to the alternative \verb{>0}, we would set \code{bayesfactor_parameters(null = c(-Inf, 0))}. } } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equivalent to \code{posterior} but with samples from the priors \emph{only}. See \code{\link[=unupdate]{unupdate()}}. \item \strong{Note:} When \code{posterior} is a \code{brmsfit_multiple} model, \code{prior} \strong{must} be provided. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equivalent to \code{posterior} but created with a model of priors samples \emph{only}. \item \strong{Note:} When the \code{emmGrid} has undergone any transformations (\code{"log"}, \code{"response"}, etc.), or \code{regrid}ing, then \code{prior} must be an \code{emmGrid} object, as stated above. } } } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ library(bayestestR) if (require("logspline")) { prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) bayesfactor_parameters(posterior, prior) } \dontrun{ # rstanarm models # --------------- if (require("rstanarm") && require("emmeans") && require("logspline")) { contrasts(sleep$group) <- contr.orthonorm # see vingette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) bayesfactor_parameters(stan_model) bayesfactor_parameters(stan_model, null = rope_range(stan_model)) # emmGrid objects # --------------- group_diff <- pairs(emmeans(stan_model, ~group)) bayesfactor_parameters(group_diff, prior = stan_model) } # brms models # ----------- if (require("brms")) { contrasts(sleep$group) <- contr.orthonorm # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors ) bayesfactor_parameters(brms_model) } } } \references{ \itemize{ \item Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the Savage-Dickey method. Cognitive psychology, 60(3), 158-189. \item Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The case of computing Bayes factors for regression parameters. British Journal of Mathematical and Statistical Psychology, 72(2), 316-333. \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting the Bayes factor and a modified ROPE procedure for testing interval null hypotheses. The American Statistician, 1-19. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/cwi.Rd0000644000175000017500000000654314135670552014375 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cwi.R \name{cwi} \alias{cwi} \alias{cwi.data.frame} \title{Curvewise Intervals (CWI)} \usage{ cwi(x, ...) \method{cwi}{data.frame}(x, ci = 0.95, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Curvewise interval (CWI)} (also called the "simultaneous interval" or "joint interval") of posterior distributions using \code{ggdist::curve_interval()}. Whereas the more typical "pointwise intervals" contain xx\% of the posterior for a single parameter, joint/curvewise intervals contain xx\% of the posterior distribution for \strong{all} parameters. } \details{ Applied model predictions, pointwise intervals contain xx\% of the predicted response values \strong{conditional} on specific predictor values. In contrast, curvewise intervals contain xx\% of the predicted response values across all predictor values. Put another way, curvewise intervals contain xx\% of the full \strong{prediction lines} from the model. For more details, see the \href{https://mjskay.github.io/ggdist/articles/lineribbon.html#curve-boxplots-aka-lineribbons-with-joint-intervals-or-curvewise-intervals-}{\emph{ggdist} documentation on curvewise intervals}. } \examples{ \donttest{ library(bayestestR) if (require("ggplot2") && require("rstanarm") && require("ggdist")) { # Generate data ============================================= k = 11 # number of curves (iterations) n = 201 # number of rows data <- data.frame(x = seq(-15,15,length.out = n)) # Simulate iterations as new columns for(i in 1:k) { data[paste0("iter_", i)] <- dnorm(data$x, seq(-5,5, length.out = k)[i], 3) } # Note: first, we need to transpose the data to have iters as rows iters <- datawizard::data_transpose(data[paste0("iter_", 1:k)]) # Compute Median data$Median <- point_estimate(iters)[["Median"]] # Compute Credible Intervals ================================ # Compute ETI (default type of CI) data[c("ETI_low", "ETI_high")] <- eti(iters, ci = 0.5)[c("CI_low", "CI_high")] # Compute CWI # ggdist::curve_interval(reshape_iterations(data), iter_value .width = c(.5)) # Visualization ============================================= ggplot(data, aes(x = x, y = Median)) + geom_ribbon(aes(ymin = ETI_low, ymax = ETI_high), fill = "red", alpha = 0.3) + geom_line(size = 1) + geom_line(data = reshape_iterations(data), aes(y = iter_value, group = iter_group), alpha = 0.3) } } } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()} } \concept{ci} bayestestR/man/convert_bayesian_as_frequentist.Rd0000644000175000017500000000255514057264527022265 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_bayesian_to_frequentist.R \name{convert_bayesian_as_frequentist} \alias{convert_bayesian_as_frequentist} \alias{bayesian_as_frequentist} \title{Convert (refit) a Bayesian model to frequentist} \usage{ convert_bayesian_as_frequentist(model, data = NULL) bayesian_as_frequentist(model, data = NULL) } \arguments{ \item{model}{A Bayesian model.} \item{data}{Data used by the model. If \code{NULL}, will try to extract it from the model.} } \description{ Refit Bayesian model as frequentist. Can be useful for comparisons. } \examples{ \donttest{ # Rstanarm ---------------------- if (require("rstanarm")) { # Simple regressions model <- stan_glm(Sepal.Length ~ Species, data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) } } \dontrun{ if (require("rstanarm")) { model <- stan_glm(vs ~ mpg, family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) # Mixed models model <- stan_glmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) model <- stan_glmer(vs ~ mpg + (1 | cyl), family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) } } } bayestestR/man/eti.Rd0000644000175000017500000001546114111635557014373 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eti.R \name{eti} \alias{eti} \alias{eti.numeric} \alias{eti.data.frame} \alias{eti.MCMCglmm} \alias{eti.sim.merMod} \alias{eti.sim} \alias{eti.emmGrid} \alias{eti.stanreg} \alias{eti.brmsfit} \alias{eti.BFBayesFactor} \title{Equal-Tailed Interval (ETI)} \usage{ eti(x, ...) \method{eti}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{eti}{data.frame}(x, ci = 0.95, verbose = TRUE, ...) \method{eti}{MCMCglmm}(x, ci = 0.95, verbose = TRUE, ...) \method{eti}{sim.merMod}( x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{sim}(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) \method{eti}{emmGrid}(x, ci = 0.95, verbose = TRUE, ...) \method{eti}{stanreg}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{brmsfit}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{BFBayesFactor}(x, ci = 0.95, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Equal-Tailed Interval (ETI)} of posterior distributions using the quantiles method. The probability of being below this interval is equal to the probability of being above it. The ETI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{eti()}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. \cr \cr The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). \cr The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\cite{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\cite{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\cite{McElreath, 2015}). \cr However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. \cr \cr A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5h percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. \cr \cr This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. \cr \cr On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \examples{ library(bayestestR) posterior <- rnorm(1000) eti(posterior) eti(posterior, ci = c(.80, .89, .95)) df <- data.frame(replicate(4, rnorm(100))) eti(df) eti(df, ci = c(.80, .89, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) eti(model) eti(model, ci = c(.80, .89, .95)) library(emmeans) eti(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) eti(model) eti(model, ci = c(.80, .89, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) eti(bf) eti(bf, ci = c(.80, .89, .95)) } } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{cwi}()}, \code{\link{hdi}()}, \code{\link{si}()} } \concept{ci} bayestestR/man/bayesfactor_models.Rd0000644000175000017500000001606114101116425017437 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_models.R \name{bayesfactor_models} \alias{bayesfactor_models} \alias{bf_models} \alias{update.bayesfactor_models} \alias{as.matrix.bayesfactor_models} \title{Bayes Factors (BF) for model comparison} \usage{ bayesfactor_models(..., denominator = 1, verbose = TRUE) bf_models(..., denominator = 1, verbose = TRUE) \method{update}{bayesfactor_models}(object, subset = NULL, reference = NULL, ...) \method{as.matrix}{bayesfactor_models}(x, ...) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object (see 'Details'). Ignored in \code{as.matrix()}, \code{update()}.} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{verbose}{Toggle off warnings.} \item{object, x}{A \code{\link[=bayesfactor_models]{bayesfactor_models()}} object.} \item{subset}{Vector of model indices to keep or remove.} \item{reference}{Index of model to reference to, or \code{"top"} to reference to the best model, or \code{"bottom"} to reference to the worst model.} } \value{ A data frame containing the models' formulas (reconstructed fixed and random effects) and their \code{log(BF)}s, that prints nicely. } \description{ This function computes or extracts Bayes factors from fitted models. \cr \cr The \verb{bf_*} function is an alias of the main function. } \details{ If the passed models are supported by \pkg{insight} the DV of all models will be tested for equality (else this is assumed to be true), and the models' terms will be extracted (allowing for follow-up analysis with \code{bayesfactor_inclusion}). \itemize{ \item For \code{brmsfit} or \code{stanreg} models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. \itemize{ \item \code{brmsfit} models must have been fitted with \code{save_pars = save_pars(all = TRUE)}. \item \code{stanreg} models must have been fitted with a defined \code{diagnostic_file}. } \item For \code{BFBayesFactor}, \code{bayesfactor_models()} is mostly a wraparound \code{BayesFactor::extractBF()}. \item BIC approximations are used to compute Bayes factors for all other model types (with a BIC method). \itemize{ \item \strong{Note} that BICs are extracted from models as-is. So if for example you want to compare mixed-models bases on ML instead of REML, you must supply models fit with ML. } } In order to correctly and precisely estimate Bayes factors, a rule of thumb are the 4 P's: \strong{P}roper \strong{P}riors and \strong{P}lentiful \strong{P}osteriors. How many? The number of posterior samples needed for testing is substantially larger than for estimation (the default of 4000 samples may not be enough in many cases). A conservative rule of thumb is to obtain 10 times more samples than would be required for estimation (\cite{Gronau, Singmann, & Wagenmakers, 2017}). If less than 40,000 samples are detected, \code{bayesfactor_models()} gives a warning. \cr \cr See also \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ # With lm objects: # ---------------- lm1 <- lm(Sepal.Length ~ 1, data = iris) lm2 <- lm(Sepal.Length ~ Species, data = iris) lm3 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm4 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1) bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result BFM <- bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result update(BFM, reference = "bottom") as.matrix(BFM) \dontrun{ # With lmerMod objects: # --------------------- if (require("lme4")) { lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) lmer3 <- lmer( Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), data = iris ) bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) bayesfactor_models(lmer1, lmer2, lmer3, denominator = lmer1) } # rstanarm models # --------------------- # (note that a unique diagnostic_file MUST be specified in order to work) if (require("rstanarm")) { stan_m0 <- stan_glm(Sepal.Length ~ 1, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_m1 <- stan_glm(Sepal.Length ~ Species, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df1.csv") ) stan_m2 <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df2.csv") ) bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0) } # brms models # -------------------- # (note the save_pars MUST be set to save_pars(all = TRUE) in order to work) if (require("brms")) { brm1 <- brm(Sepal.Length ~ 1, data = iris, save_all_pars = TRUE) brm2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) brm3 <- brm( Sepal.Length ~ Species + Petal.Length, data = iris, save_pars = save_pars(all = TRUE) ) bayesfactor_models(brm1, brm2, brm3, denominator = 1) } # BayesFactor # --------------------------- if (require("BayesFactor")) { data(puzzles) BF <- anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID", progress = FALSE ) BF bayesfactor_models(BF) # basically the same } } } \references{ \itemize{ \item Gronau, Q. F., Singmann, H., & Wagenmakers, E. J. (2017). Bridgesampling: An R package for estimating normalizing constants. arXiv preprint arXiv:1710.08162. \item Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, 90(430), 773-795. \item Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, 72, 33–37. \item Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/reexports.Rd0000644000175000017500000000113014076521542015627 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_html.R, R/print_md.R, R/reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{print_html} \alias{print_md} \alias{reshape_ci} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{datawizard}{\code{\link[datawizard]{reshape_ci}}} \item{insight}{\code{\link[insight:display]{print_html}}, \code{\link[insight:display]{print_md}}} }} bayestestR/man/as.data.frame.density.Rd0000644000175000017500000000061613636776614017703 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{as.data.frame.density} \alias{as.data.frame.density} \title{Coerce to a Data Frame} \usage{ \method{as.data.frame}{density}(x, ...) } \arguments{ \item{x}{any \R object.} \item{...}{additional arguments to be passed to or from methods.} } \description{ Coerce to a Data Frame } bayestestR/man/describe_prior.Rd0000644000175000017500000000161113616544116016574 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_prior.R \name{describe_prior} \alias{describe_prior} \title{Describe Priors} \usage{ describe_prior(model, ...) } \arguments{ \item{model}{A Bayesian model.} \item{...}{Currently not used.} } \description{ Returns a summary of the priors used in the model. } \examples{ \dontrun{ library(bayestestR) # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) describe_prior(bf) } } } bayestestR/man/simulate_prior.Rd0000644000175000017500000000150614101116425016626 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_priors.R \name{simulate_prior} \alias{simulate_prior} \title{Returns Priors of a Model as Empirical Distributions} \usage{ simulate_prior(model, n = 1000, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{n}{Size of the simulated prior distributions.} \item{...}{Currently not used.} } \description{ Transforms priors information to actual distributions. } \examples{ \dontrun{ library(bayestestR) if (require("rstanarm")) { model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) simulate_prior(model) } } } \seealso{ \code{\link[=unupdate]{unupdate()}} for directly sampling from the prior distribution (useful for complex priors and designs). } bayestestR/man/equivalence_test.Rd0000644000175000017500000002040214101116425017124 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test} \alias{equivalence_test} \alias{equivalence_test.default} \alias{equivalence_test.numeric} \alias{equivalence_test.data.frame} \alias{equivalence_test.emmGrid} \alias{equivalence_test.BFBayesFactor} \alias{equivalence_test.stanreg} \alias{equivalence_test.brmsfit} \title{Test for Practical Equivalence} \usage{ equivalence_test(x, ...) \method{equivalence_test}{default}(x, ...) \method{equivalence_test}{numeric}(x, range = "default", ci = 0.95, verbose = TRUE, ...) \method{equivalence_test}{data.frame}(x, range = "default", ci = 0.95, verbose = TRUE, ...) \method{equivalence_test}{emmGrid}(x, range = "default", ci = 0.95, verbose = TRUE, ...) \method{equivalence_test}{BFBayesFactor}(x, range = "default", ci = 0.95, verbose = TRUE, ...) \method{equivalence_test}{stanreg}( x, range = "default", ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{equivalence_test}{brmsfit}( x, range = "default", ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. In models with one response, \code{range} should be a vector of length two (e.g., \code{c(-0.1, 0.1)}). In multivariate models, \code{range} should be a list with a numeric vectors for each response variable. Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the HDI. \item \code{ROPE_low}, \code{ROPE_high} The limits of the ROPE. These values are identical for all parameters. \item \code{ROPE_Percentage} The proportion of the HDI that lies inside the ROPE. \item \code{ROPE_Equivalence} The "test result", as character. Either "rejected", "accepted" or "undecided". \item \code{HDI_low} , \code{HDI_high} The lower and upper HDI limits for the parameters. } } \description{ Perform a \strong{Test for Practical Equivalence} for Bayesian and frequentist models. } \details{ Documentation is accessible for: \itemize{ \item \href{https://easystats.github.io/bayestestR/reference/equivalence_test.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/equivalence_test.lm.html}{Frequentist models} } For Bayesian models, the \strong{Test for Practical Equivalence} is based on the \emph{"HDI+ROPE decision rule"} (\cite{Kruschke, 2014, 2018}) to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the percentage of the \verb{89\%} \link[=hdi]{HDI} that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. Using the \link[=rope]{ROPE} and the \link[=hdi]{HDI}, \cite{Kruschke (2018)} suggests using the percentage of the \verb{95\%} (or \verb{89\%}, considered more stable) HDI that falls within the ROPE as a decision rule. If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s undecided whether to accept or reject the null hypothesis. If the full ROPE is used (i.e., \verb{100\%} of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to \verb{2.5\%} or greater than \verb{97.5\%}. Desirable results are low proportions inside the ROPE (the closer to zero the better). \cr \cr Some attention is required for finding suitable values for the ROPE limits (argument \code{range}). See 'Details' in \code{\link[=rope_range]{rope_range()}} for further information. \cr \cr \strong{Multicollinearity: Non-independent covariates} \cr \cr When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. In such cases, the test for practical equivalence may have inappropriate results. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are the results of the "undecided" parameters, which may either move further towards "rejection" or away from it (\cite{Kruschke 2014, 340f}). \cr \cr \code{equivalence_test()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\cite{Piironen and Vehtari 2017}). } \note{ There is a \code{print()}-method with a \code{digits}-argument to control the amount of digits in the output, and there is a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} to visualize the results from the equivalence-test (for models only). } \examples{ library(bayestestR) equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) # print more digits test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) print(test, digits = 4) \dontrun{ library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) equivalence_test(model, ci = c(.50, 1)) # plot result test <- equivalence_test(model) plot(test) library(emmeans) equivalence_test(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) equivalence_test(model, ci = c(.50, .99)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) equivalence_test(bf) equivalence_test(bf, ci = c(.50, .99)) } } \references{ \itemize{ \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/bayesfactor.Rd0000644000175000017500000000643114101116425016074 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor.R \name{bayesfactor} \alias{bayesfactor} \title{Bayes Factors (BF)} \usage{ bayesfactor( ..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = c("fixed", "random", "all"), verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL ) } \arguments{ \item{...}{A numeric vector, model object(s), or the output from \code{bayesfactor_models}.} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scalar (for point-null) or a range (for a interval-null).} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{verbose}{Toggle off warnings.} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \verb{BayesFactor::priorOdds<-}.} } \value{ Some type of Bayes factor, depending on the input. See \code{\link[=bayesfactor_parameters]{bayesfactor_parameters()}}, \code{\link[=bayesfactor_models]{bayesfactor_models()}} or \code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} } \description{ This function compte the Bayes factors (BFs) that are appropriate to the input. For vectors or single models, it will compute \code{\link[=bayesfactor_parameters]{BFs for single parameters()}}, or is \code{hypothesis} is specified, \code{\link[=bayesfactor_restricted]{BFs for restricted models()}}. For multiple models, it will return the BF corresponding to \code{\link[=bayesfactor_models]{comparison between models()}} and if a model comparison is passed, it will compute the \code{\link[=bayesfactor_inclusion]{inclusion BF()}}. \cr\cr For a complete overview of these functions, read the \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factor vignette}. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) if (require("logspline")) { prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) bayesfactor(posterior, prior = prior) } \dontrun{ # rstanarm models # --------------- if (require("rstanarm")) { model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) bayesfactor(model) } } if (require("logspline")) { # Frequentist models # --------------- m0 <- lm(extra ~ 1, data = sleep) m1 <- lm(extra ~ group, data = sleep) m2 <- lm(extra ~ group + ID, data = sleep) comparison <- bayesfactor(m0, m1, m2) comparison bayesfactor(comparison) } } bayestestR/man/dot-extract_priors_rstanarm.Rd0000644000175000017500000000057013636776614021363 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.extract_priors_rstanarm} \alias{.extract_priors_rstanarm} \title{Extract and Returns the priors formatted for rstanarm} \usage{ .extract_priors_rstanarm(model, ...) } \description{ Extract and Returns the priors formatted for rstanarm } \keyword{internal} bayestestR/man/pd_to_p.Rd0000644000175000017500000000170114101116425015211 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_pd_to_p.R \name{pd_to_p} \alias{pd_to_p} \alias{p_to_pd} \alias{convert_p_to_pd} \alias{convert_pd_to_p} \title{Convert between Probability of Direction (pd) and p-value.} \usage{ pd_to_p(pd, direction = "two-sided", ...) p_to_pd(p, direction = "two-sided", ...) convert_p_to_pd(p, direction = "two-sided", ...) convert_pd_to_p(pd, direction = "two-sided", ...) } \arguments{ \item{pd}{A Probability of Direction (pd) value (between 0 and 1).} \item{direction}{What type of p-value is requested or provided. Can be \code{"two-sided"} (default, two tailed) or \code{"one-sided"} (one tailed).} \item{...}{Arguments passed to or from other methods.} \item{p}{A p-value.} } \description{ Enables a conversion between Probability of Direction (pd) and p-value. } \examples{ pd_to_p(pd = 0.95) pd_to_p(pd = 0.95, direction = "one-sided") } bayestestR/man/effective_sample.Rd0000644000175000017500000000543414101116425017075 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/effective_sample.R \name{effective_sample} \alias{effective_sample} \alias{effective_sample.brmsfit} \alias{effective_sample.stanreg} \alias{effective_sample.blavaan} \alias{effective_sample.MCMCglmm} \title{Effective Sample Size (ESS)} \usage{ effective_sample(model, ...) \method{effective_sample}{brmsfit}( model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{effective_sample}{stanreg}( model, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{effective_sample}{blavaan}(model, parameters = NULL, ...) \method{effective_sample}{MCMCglmm}( model, effects = c("fixed", "random", "all"), parameters = NULL, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with two columns: Parameter name and effective sample size (ESS). } \description{ This function returns the effective sample size (ESS). } \details{ \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). } \examples{ \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) effective_sample(model) } } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28 } } bayestestR/man/mcse.Rd0000644000175000017500000000362414101116425014522 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcse.R \name{mcse} \alias{mcse} \alias{mcse.stanreg} \title{Monte-Carlo Standard Error (MCSE)} \usage{ mcse(model, ...) \method{mcse}{stanreg}( model, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ This function returns the Monte Carlo Standard Error (MCSE). } \details{ \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. } \examples{ \dontrun{ library(bayestestR) library(rstanarm) model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) mcse(model) } } \references{ Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } bayestestR/man/bayesfactor_inclusion.Rd0000644000175000017500000000736014101116375020165 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_inclusion.R \name{bayesfactor_inclusion} \alias{bayesfactor_inclusion} \alias{bf_inclusion} \title{Inclusion Bayes Factors for testing predictors across Bayesian models} \usage{ bayesfactor_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) bf_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) } \arguments{ \item{models}{An object of class \code{\link[=bayesfactor_models]{bayesfactor_models()}} or \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \verb{BayesFactor::priorOdds<-}.} \item{...}{Arguments passed to or from other methods.} } \value{ a data frame containing the prior and posterior probabilities, and log(BF) for each effect. } \description{ The \verb{bf_*} function is an alias of the main function. \cr \cr For more info, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \details{ Inclusion Bayes factors answer the question: Are the observed data more probable under models with a particular effect, than they are under models without that particular effect? In other words, on average - are models with effect \eqn{X} more likely to have produced the observed data than models without effect \eqn{X}? \subsection{Match Models}{ If \code{match_models=FALSE} (default), Inclusion BFs are computed by comparing all models with a term against all models without that term. If \code{TRUE}, comparison is restricted to models that (1) do not include any interactions with the term of interest; (2) for interaction terms, averaging is done only across models that containe the main effect terms from which the interaction term is comprised. } } \note{ Random effects in the \code{lmer} style are converted to interaction terms: i.e., \code{(X|G)} will become the terms \code{1:G} and \code{X:G}. } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ library(bayestestR) # Using bayesfactor_models: # ------------------------------ mo0 <- lm(Sepal.Length ~ 1, data = iris) mo1 <- lm(Sepal.Length ~ Species, data = iris) mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) bayesfactor_inclusion(BFmodels) \dontrun{ # BayesFactor # ------------------------------- library(BayesFactor) BF <- generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF) # compare only matched models: bayesfactor_inclusion(BF, match_models = TRUE) } } \references{ \itemize{ \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80-101. \item Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP \href{https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp}{Blog post}. } } \seealso{ \code{\link[=weighted_posteriors]{weighted_posteriors()}} for Bayesian parameter averaging. } \author{ Mattan S. Ben-Shachar } bayestestR/man/p_rope.Rd0000644000175000017500000000556614101116425015066 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_rope.R \name{p_rope} \alias{p_rope} \alias{p_rope.default} \alias{p_rope.numeric} \alias{p_rope.data.frame} \alias{p_rope.emmGrid} \alias{p_rope.BFBayesFactor} \alias{p_rope.MCMCglmm} \alias{p_rope.stanreg} \alias{p_rope.brmsfit} \title{Probability of being in the ROPE} \usage{ p_rope(x, ...) \method{p_rope}{default}(x, ...) \method{p_rope}{numeric}(x, range = "default", ...) \method{p_rope}{data.frame}(x, range = "default", ...) \method{p_rope}{emmGrid}(x, range = "default", ...) \method{p_rope}{BFBayesFactor}(x, range = "default", ...) \method{p_rope}{MCMCglmm}(x, range = "default", ...) \method{p_rope}{stanreg}( x, range = "default", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{p_rope}{brmsfit}( x, range = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. In models with one response, \code{range} should be a vector of length two (e.g., \code{c(-0.1, 0.1)}). In multivariate models, \code{range} should be a list with a numeric vectors for each response variable. Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute the proportion of the whole posterior distribution that doesn't lie within a region of practical equivalence (ROPE). It is equivalent to running \code{rope(..., ci = 1)}. } \examples{ library(bayestestR) p_rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) p_rope(x = mtcars, range = c(-0.1, 0.1)) } bayestestR/man/ci.Rd0000644000175000017500000001206014135670552014175 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci.R \name{ci} \alias{ci} \alias{ci.numeric} \alias{ci.data.frame} \alias{ci.sim.merMod} \alias{ci.sim} \alias{ci.stanreg} \alias{ci.brmsfit} \alias{ci.BFBayesFactor} \alias{ci.MCMCglmm} \title{Confidence/Credible/Compatibility Interval (CI)} \usage{ ci(x, ...) \method{ci}{numeric}(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{data.frame}(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{sim.merMod}( x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{ci}{sim}(x, ci = 0.95, method = "ETI", parameters = NULL, verbose = TRUE, ...) \method{ci}{stanreg}( x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, BF = 1, ... ) \method{ci}{brmsfit}( x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ... ) \method{ci}{BFBayesFactor}(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{MCMCglmm}(x, ci = 0.95, method = "ETI", verbose = TRUE, ...) } \arguments{ \item{x}{A \code{stanreg} or \code{brmsfit} model, or a vector representing a posterior distribution.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{method}{Can be \link[=eti]{'ETI'} (default), \link[=hdi]{'HDI'}, \link[=bci]{'BCI'} or \link[=si]{'SI'}.} \item{verbose}{Toggle off warnings.} \item{BF}{The amount of support required to be included in the support interval.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals (SI) for Bayesian and frequentist models. The Documentation is accessible for: } \details{ \itemize{ \item \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/ci.default.html}{Frequentist models} } } \note{ When it comes to interpretation, we recommend thinking of the CI in terms of an "uncertainty" or "compatibility" interval, the latter being defined as \dQuote{Given any value in the interval and the background assumptions, the data should not seem very surprising} (\cite{Gelman & Greenland 2019}). \cr \cr There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) posterior <- rnorm(1000) ci(posterior, method = "ETI") ci(posterior, method = "HDI") df <- data.frame(replicate(4, rnorm(100))) ci(df, method = "ETI", ci = c(.80, .89, .95)) ci(df, method = "HDI", ci = c(.80, .89, .95)) \dontrun{ if (require("rstanarm")) { model <- stan_glm(mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0) ci(model, method = "ETI", ci = c(.80, .89)) ci(model, method = "HDI", ci = c(.80, .89)) ci(model, method = "SI") } if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) ci(model, method = "ETI") ci(model, method = "HDI") ci(model, method = "SI") } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) ci(bf, method = "ETI") ci(bf, method = "HDI") } if (require("emmeans")) { model <- emtrends(model, ~1, "wt") ci(model, method = "ETI") ci(model, method = "HDI") ci(model, method = "SI") } } } \references{ Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ 2019;l5381. 10.1136/bmj.l5381 } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{cwi}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()} } \concept{ci} bayestestR/man/sensitivity_to_prior.Rd0000644000175000017500000000320713616544116020113 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{sensitivity_to_prior} \alias{sensitivity_to_prior} \title{Sensitivity to Prior} \usage{ sensitivity_to_prior(model, index = "Median", magnitude = 10, ...) } \arguments{ \item{model}{A Bayesian model (\code{stanreg} or \code{brmsfit}).} \item{index}{The indices from which to compute the sensitivity. Can be one or multiple names of the columns returned by \code{describe_posterior}. The case is important here (e.g., write 'Median' instead of 'median').} \item{magnitude}{This represent the magnitude by which to shift the antagonistic prior (to test the sensitivity). For instance, a magnitude of 10 (default) means that the mode wil be updated with a prior located at 10 standard deviations from its original location.} \item{...}{Arguments passed to or from other methods.} } \description{ Computes the sensitivity to priors specification. This represents the proportion of change in some indices when the model is fitted with an antagonistic prior (a prior of same shape located on the opposite of the effect). } \examples{ \dontrun{ library(bayestestR) # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) sensitivity_to_prior(model) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) sensitivity_to_prior(model, index = c("Median", "MAP")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) # sensitivity_to_prior(model) } } } \seealso{ DescTools } bayestestR/man/bci.Rd0000644000175000017500000001435414135670552014347 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bci.R \name{bci} \alias{bci} \alias{bcai} \alias{bci.numeric} \alias{bci.data.frame} \alias{bci.MCMCglmm} \alias{bci.sim.merMod} \alias{bci.sim} \alias{bci.emmGrid} \alias{bci.stanreg} \alias{bci.brmsfit} \alias{bci.BFBayesFactor} \title{Bias Corrected and Accelerated Interval (BCa)} \usage{ bci(x, ...) bcai(x, ...) \method{bci}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{bci}{data.frame}(x, ci = 0.95, verbose = TRUE, ...) \method{bci}{MCMCglmm}(x, ci = 0.95, verbose = TRUE, ...) \method{bci}{sim.merMod}( x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{bci}{sim}(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) \method{bci}{emmGrid}(x, ci = 0.95, verbose = TRUE, ...) \method{bci}{stanreg}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{bci}{brmsfit}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) \method{bci}{BFBayesFactor}(x, ci = 0.95, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Bias Corrected and Accelerated Interval (BCa)} of posterior distributions. } \details{ Unlike equal-tailed intervals (see \code{eti()}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. \cr \cr The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). \cr The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\cite{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\cite{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\cite{McElreath, 2015}). \cr However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. \cr \cr A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5h percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. \cr \cr This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. \cr \cr On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \examples{ posterior <- rnorm(1000) bci(posterior) bci(posterior, ci = c(.80, .89, .95)) } \references{ DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. Statistical Science. 11(3): 189–212. 10.1214/ss/1032280214 } \seealso{ Other ci: \code{\link{ci}()}, \code{\link{cwi}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()} } \concept{ci} bayestestR/man/bayesfactor_restricted.Rd0000644000175000017500000001664114101116425020330 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_restricted.R \name{bayesfactor_restricted} \alias{bayesfactor_restricted} \alias{bf_restricted} \alias{bayesfactor_restricted.stanreg} \alias{bayesfactor_restricted.brmsfit} \alias{bayesfactor_restricted.blavaan} \alias{bayesfactor_restricted.emmGrid} \title{Bayes Factors (BF) for Order Restricted Models} \usage{ bayesfactor_restricted( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) bf_restricted(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) \method{bayesfactor_restricted}{stanreg}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ... ) \method{bayesfactor_restricted}{brmsfit}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ... ) \method{bayesfactor_restricted}{blavaan}( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) \method{bayesfactor_restricted}{emmGrid}( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) } \arguments{ \item{posterior}{A \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see Details).} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{prior}{An object representing a prior distribution (see Details).} \item{verbose}{Toggle off warnings.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame containing the (log) Bayes factor representing evidence \emph{against} the un-restricted model. } \description{ This method computes Bayes factors for comparing a model with an order restrictions on its parameters with the fully unrestricted model. \emph{Note that this method should only be used for confirmatory analyses}. \cr \cr The \verb{bf_*} function is an alias of the main function. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors for order-restricted models vs un-restricted models by setting an order restriction on the prior and posterior distributions (\cite{Morey & Wagenmakers, 2013}). \cr\cr (Though it is possible to use \code{bayesfactor_restricted()} to test interval restrictions, it is more suitable for testing order restrictions; see examples). } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equivalent to \code{posterior} but with samples from the priors \emph{only}. See \code{\link[=unupdate]{unupdate()}}. \item \strong{Note:} When \code{posterior} is a \code{brmsfit_multiple} model, \code{prior} \strong{must} be provided. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equivalent to \code{posterior} but created with a model of priors samples \emph{only}. \item \strong{Note:} When the \code{emmGrid} has undergone any transformations (\code{"log"}, \code{"response"}, etc.), or \code{regrid}ing, then \code{prior} must be an \code{emmGrid} object, as stated above. } } } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ library(bayestestR) prior <- data.frame( X = rnorm(100), X1 = rnorm(100), X3 = rnorm(100) ) posterior <- data.frame( X = rnorm(100, .4), X1 = rnorm(100, -.2), X3 = rnorm(100) ) hyps <- c( "X > X1 & X1 > X3", "X > X1" ) bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) \dontrun{ # rstanarm models # --------------- if (require("rstanarm") && require("emmeans")) { fit_stan <- stan_glm(mpg ~ wt + cyl + am, data = mtcars, refresh = 0 ) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) bayesfactor_restricted(fit_stan, hypothesis = hyps) # emmGrid objects # --------------- # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html disgust_data <- read.table(url("http://www.learnbayes.org/disgust_example.txt"), header = TRUE) contrasts(disgust_data$condition) <- contr.orthonorm # see vignette fit_model <- stan_glm(score ~ condition, data = disgust_data, family = gaussian()) em_condition <- emmeans(fit_model, ~condition) hyps <- c("lemon < control & control < sulfur") bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) # > # Bayes Factor (Order-Restriction) # > # > Hypothesis P(Prior) P(Posterior) BF # > lemon < control & control < sulfur 0.17 0.75 4.49 # > --- # > Bayes factors for the restricted model vs. the un-restricted model. } } } \references{ \itemize{ \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. Retrived from https://richarddmorey.org/category/order-restrictions/. } } bayestestR/man/p_map.Rd0000644000175000017500000001042714101116425014666 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_map.R \name{p_map} \alias{p_map} \alias{p_pointnull} \alias{p_map.stanreg} \alias{p_map.brmsfit} \title{Bayesian p-value based on the density at the Maximum A Posteriori (MAP)} \usage{ p_map(x, precision = 2^10, method = "kernel", ...) p_pointnull(x, precision = 2^10, method = "kernel", ...) \method{p_map}{stanreg}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{p_map}{brmsfit}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute a Bayesian equivalent of the \emph{p}-value, related to the odds that a parameter (described by its posterior distribution) has against the null hypothesis (\emph{h0}) using Mills' (2014, 2017) \emph{Objective Bayesian Hypothesis Testing} framework. It corresponds to the density value at 0 divided by the density at the Maximum A Posteriori (MAP). } \details{ Note that this method is sensitive to the density estimation \code{method} (see the section in the examples below). \subsection{Strengths and Limitations}{ \strong{Strengths:} Straightforward computation. Objective property of the posterior distribution. \cr \cr \strong{Limitations:} Limited information favoring the null hypothesis. Relates on density approximation. Indirect relationship between mathematical definition and interpretation. Only suitable for weak / very diffused priors. } } \examples{ library(bayestestR) p_map(rnorm(1000, 0, 1)) p_map(rnorm(1000, 10, 1)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) p_map(model) library(emmeans) p_map(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_map(model) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) p_map(bf) # --------------------------------------- # Robustness to density estimation method set.seed(333) data <- data.frame() for (iteration in 1:250) { x <- rnorm(1000, 1, 1) result <- data.frame( "Kernel" = p_map(x, method = "kernel"), "KernSmooth" = p_map(x, method = "KernSmooth"), "logspline" = p_map(x, method = "logspline") ) data <- rbind(data, result) } data$KernSmooth <- data$Kernel - data$KernSmooth data$logspline <- data$Kernel - data$logspline summary(data$KernSmooth) summary(data$logspline) boxplot(data[c("KernSmooth", "logspline")]) } } \references{ \itemize{ \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. } } \seealso{ \href{https://www.youtube.com/watch?v=Ip8Ci5KUVRc}{Jeff Mill's talk} } bayestestR/man/rope.Rd0000644000175000017500000002076614101116425014546 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope.R \name{rope} \alias{rope} \alias{rope.default} \alias{rope.numeric} \alias{rope.data.frame} \alias{rope.emmGrid} \alias{rope.BFBayesFactor} \alias{rope.MCMCglmm} \alias{rope.stanreg} \alias{rope.brmsfit} \title{Region of Practical Equivalence (ROPE)} \usage{ rope(x, ...) \method{rope}{default}(x, ...) \method{rope}{numeric}(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{data.frame}(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{emmGrid}(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{BFBayesFactor}(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{MCMCglmm}(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{stanreg}( x, range = "default", ci = 0.95, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{rope}{brmsfit}( x, range = "default", ci = 0.95, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. In models with one response, \code{range} should be a vector of length two (e.g., \code{c(-0.1, 0.1)}). In multivariate models, \code{range} should be a list with a numeric vectors for each response variable. Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{ci_method}{The type of interval to use to quantify the percentage in ROPE. Can be 'HDI' (default) or 'ETI'. See \code{\link[=ci]{ci()}}.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute the proportion of the HDI (default to the \verb{89\%} HDI) of a posterior distribution that lies within a region of practical equivalence. } \details{ \subsection{ROPE}{ Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of a single value null hypothesis in a continuous distribution is 0). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are \emph{equivalent to the null} value for practical purposes (\cite{Kruschke 2010, 2011, 2014}). \cr \cr Kruschke (2018) suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as \verb{0 +/- .1 * sd(y)}. This ROPE range can be automatically computed for models using the \link{rope_range} function. \cr \cr Kruschke (2010, 2011, 2014) suggests using the proportion of the \verb{95\%} (or \verb{89\%}, considered more stable) \link[=hdi]{HDI} that falls within the ROPE as an index for "null-hypothesis" testing (as understood under the Bayesian framework, see \code{\link[=equivalence_test]{equivalence_test()}}). } \subsection{Sensitivity to parameter's scale}{ It is important to consider the unit (i.e., the scale) of the predictors when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, the percentage in ROPE depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. } \subsection{Multicollinearity: Non-independent covariates}{ When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on \code{rope()} are inappropriate (\cite{Kruschke 2014, 340f}). \cr \cr \code{rope()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\cite{Piironen and Vehtari 2017}). } \subsection{Strengths and Limitations}{ \strong{Strengths:} Provides information related to the practical relevance of the effects. \cr \cr \strong{Limitations:} A ROPE range needs to be arbitrarily defined. Sensitive to the scale (the unit) of the predictors. Not sensitive to highly significant effects. } } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 1), ci = c(.90, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) rope(model) rope(model, ci = c(.90, .95)) library(emmeans) rope(emtrends(model, ~1, "wt"), ci = c(.90, .95)) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) rope(model) rope(model, ci = c(.90, .95)) library(brms) model <- brms::brm(brms::mvbind(mpg, disp) ~ wt + cyl, data = mtcars) rope(model) rope(model, ci = c(.90, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) rope(bf) rope(bf, ci = c(.90, .95)) } } \references{ \itemize{ \item Cohen, J. (1988). Statistical power analysis for the behavioural sciences. \item Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. \item Kruschke, J. K. (2011). Bayesian assessment of null values via parameter estimation and model comparison. Perspectives on Psychological Science, 6(3), 299-312. \doi{10.1177/1745691611406925}. \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/si.Rd0000644000175000017500000001766014111635557014230 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/si.R \name{si} \alias{si} \alias{si.numeric} \alias{si.stanreg} \alias{si.brmsfit} \alias{si.blavaan} \alias{si.emmGrid} \alias{si.data.frame} \title{Compute Support Intervals} \usage{ si(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{numeric}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{stanreg}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "zi", "zero_inflated", "all", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{si}{brmsfit}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "zi", "zero_inflated", "all", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{si}{blavaan}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "zi", "zero_inflated", "all", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{si}{emmGrid}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{data.frame}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{prior}{An object representing a prior distribution (see 'Details').} \item{BF}{The amount of support required to be included in the support interval.} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to and from other methods. (Can be used to pass arguments to internal \code{\link[logspline:logspline]{logspline::logspline()}}.)} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame containing the lower and upper bounds of the SI. \cr Note that if the level of requested support is higher than observed in the data, the interval will be \verb{[NA,NA]}. } \description{ A support interval contains only the values of the parameter that predict the observed data better than average, by some degree \emph{k}; these are values of the parameter that are associated with an updating factor greater or equal than \emph{k}. From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller than \emph{1/k}. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute support intervals based on prior and posterior distributions. For the computation of support intervals, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative} - note that by default, \code{brms::brm()} uses flat priors for fixed-effects; see example below). \subsection{Choosing a value of \code{BF}}{ The choice of \code{BF} (the level of support) depends on what we want our interval to represent: \itemize{ \item A \code{BF} = 1 contains values whose credibility is not decreased by observing the data. \item A \code{BF} > 1 contains values who received more impressive support from the data. \item A \code{BF} < 1 contains values whose credibility has \emph{not} been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than 1/\code{BF} in support of the alternative. E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null will be larger than 3. } } } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equivalent to \code{posterior} but with samples from the priors \emph{only}. See \code{\link[=unupdate]{unupdate()}}. \item \strong{Note:} When \code{posterior} is a \code{brmsfit_multiple} model, \code{prior} \strong{must} be provided. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equivalent to \code{posterior} but created with a model of priors samples \emph{only}. \item \strong{Note:} When the \code{emmGrid} has undergone any transformations (\code{"log"}, \code{"response"}, etc.), or \code{regrid}ing, then \code{prior} must be an \code{emmGrid} object, as stated above. } } } \examples{ library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si(posterior, prior) \dontrun{ # rstanarm models # --------------- library(rstanarm) contrasts(sleep$group) <- contr.orthonorm # see vingette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) si(stan_model) si(stan_model, BF = 3) # emmGrid objects # --------------- library(emmeans) group_diff <- pairs(emmeans(stan_model, ~group)) si(group_diff, prior = stan_model) # brms models # ----------- library(brms) contrasts(sleep$group) <- contr.orthonorm # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors ) si(brms_model) } } \references{ Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). The Support Interval. \doi{10.31234/osf.io/zwnxb} } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{cwi}()}, \code{\link{eti}()}, \code{\link{hdi}()} } \concept{ci} bayestestR/man/sexit_thresholds.Rd0000644000175000017500000000272013745661547017211 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sexit_thresholds.R \name{sexit_thresholds} \alias{sexit_thresholds} \title{Find Effect Size Thresholds} \usage{ sexit_thresholds(x, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} } \description{ This function attempts at automatically finding suitable default values for a "significant" (i.e., non-negligible) and "large" effect. This is to be used with care, and the chosen threshold should always be explicitly reported and justified. See the detail section in \code{\link[=sexit]{sexit()}} for more information. } \examples{ sexit_thresholds(rnorm(1000)) \dontrun{ if (require("rstanarm")) { model <- stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 ) sexit_thresholds(model) model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) sexit_thresholds(model) } if (require("brms")) { model <- brm(mpg ~ wt + cyl, data = mtcars) sexit_thresholds(model) } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) sexit_thresholds(bf) } } } \references{ Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. } bayestestR/man/contr.orthonorm.Rd0000644000175000017500000000607414125235747016767 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/contr.orthonorm.R \name{contr.orthonorm} \alias{contr.orthonorm} \alias{contr.bayes} \title{Orthonormal Contrast Matrices for Bayesian Estimation} \usage{ contr.orthonorm(n, contrasts = TRUE, sparse = FALSE) } \arguments{ \item{n}{a vector of levels for a factor, or the number of levels.} \item{contrasts}{a logical indicating whether contrasts should be computed.} \item{sparse}{logical indicating if the result should be sparse (of class \code{\link[Matrix:dgCMatrix-class]{dgCMatrix}}), using package \href{https://CRAN.R-project.org/package=Matrix}{\pkg{Matrix}}.} } \value{ A \code{matrix} with n rows and k columns, with k=n-1 if contrasts is \code{TRUE} and k=n if contrasts is \code{FALSE}. } \description{ Returns a design or model matrix of orthonormal contrasts such that the marginal prior on all effects is identical (see 'Details'). Implementation from Singmann & Gronau's \href{https://github.com/bayesstuff/bfrms/}{\code{bfrms}}, following the description in Rouder, Morey, Speckman, & Province (2012, p. 363). \cr\cr Though using this factor coding scheme might obscure the interpretation of parameters, it is essential for correct estimation of Bayes factors for contrasts and order restrictions of multi-level factors (where \code{k>2}). See info on specifying correct priors for factors with more than 2 levels in \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \details{ When \code{contrasts = FALSE}, the returned contrasts are equivalent to \code{contr.treatment(, contrasts = FALSE)}, as suggested by McElreath (also known as one-hot encoding). \subsection{Setting Priors}{ It is recommended to set 0-centered identically scaled priors of the dummy coded variables produced by this method. These priors then represent the distance the mean of one of the levels might have from the overall mean. \subsection{Contrasts}{ This method guarantees that any set of contrasts between the \emph{k} groups will have the same multivariate prior regardless of level order; However, different contrasts within a set contrasts can have different univariate prior shapes/scales. \cr\cr For example the contrasts \code{A - B} will have the same prior as \code{B - C}, as will \code{(A + C) - B} and \code{(B + A) - C}, but \code{A - B} and \code{(A + C) - B} will differ. } } } \examples{ contr.orthonorm(2) # Q_2 in Rouder et al. (2012, p. 363) contr.orthonorm(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) ## check decomposition Q3 <- contr.orthonorm(3) Q3 \%*\% t(Q3) ## 2/3 on diagonal and -1/3 on off-diagonal elements } \references{ \itemize{ \item McElreath, R. (2020). Statistical rethinking: A Bayesian course with examples in R and Stan. CRC press. \item Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). Default Bayes factors for ANOVA designs. \emph{Journal of Mathematical Psychology}, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 } } bayestestR/man/area_under_curve.Rd0000644000175000017500000000277213571067531017124 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_under_curve.R \name{area_under_curve} \alias{area_under_curve} \alias{auc} \title{Area under the Curve (AUC)} \usage{ area_under_curve(x, y, method = c("trapezoid", "step", "spline"), ...) auc(x, y, method = c("trapezoid", "step", "spline"), ...) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of y values.} \item{method}{Method to compute the Area Under the Curve (AUC). Can be \code{"trapezoid"} (default), \code{"step"} or \code{"spline"}. If "trapezoid", the curve is formed by connecting all points by a direct line (composite trapezoid rule). If "step" is chosen then a stepwise connection of two points is used. For calculating the area under a spline interpolation the splinefun function is used in combination with integrate.} \item{...}{Arguments passed to or from other methods.} } \description{ Based on the DescTools \code{AUC} function. It can calculate the area under the curve with a naive algorithm or a more elaborated spline approach. The curve must be given by vectors of xy-coordinates. This function can handle unsorted x values (by sorting x) and ties for the x values (by ignoring duplicates). } \examples{ library(bayestestR) posterior <- distribution_normal(1000) dens <- estimate_density(posterior) dens <- dens[dens$x > 0, ] x <- dens$x y <- dens$y area_under_curve(x, y, method = "trapezoid") area_under_curve(x, y, method = "step") area_under_curve(x, y, method = "spline") } \seealso{ DescTools } bayestestR/man/check_prior.Rd0000644000175000017500000000410414101116425016055 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_prior.R \name{check_prior} \alias{check_prior} \title{Check if Prior is Informative} \usage{ check_prior(model, method = "gelman", simulate_priors = TRUE, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{method}{Can be \code{"gelman"} or \code{"lakeland"}. For the \code{"gelman"} method, if the SD of the posterior is more than 0.1 times the SD of the prior, then the prior is considered as informative. For the \code{"lakeland"} method, the prior is considered as informative if the posterior falls within the \verb{95\%} HDI of the prior.} \item{simulate_priors}{Should prior distributions be simulated using \code{\link[=simulate_prior]{simulate_prior()}} (default; faster) or sampled via \code{\link[=unupdate]{unupdate()}} (slower, more accurate).} \item{...}{Currently not used.} } \value{ A data frame with two columns: The parameter names and the quality of the prior (which might be \code{"informative"}, \code{"uninformative"}) or \code{"not determinable"} if the prior distribution could not be determined). } \description{ Performs a simple test to check whether the prior is informative to the posterior. This idea, and the accompanying heuristics, were discussed in \href{https://statmodeling.stat.columbia.edu/2019/08/10/}{this blogpost}. } \examples{ \dontrun{ library(bayestestR) if (require("rstanarm")) { model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") # An extreme example where both methods diverge: model <- stan_glm(mpg ~ wt, data = mtcars[1:3, ], prior = normal(-3.3, 1, FALSE), prior_intercept = normal(0, 1000, FALSE), refresh = 0 ) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") plot(si(model)) # can provide visual confirmation to the Lakeland method } } } \references{ https://statmodeling.stat.columbia.edu/2019/08/10/ } bayestestR/man/p_significance.Rd0000644000175000017500000001003114101116425016522 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_significance.R \name{p_significance} \alias{p_significance} \alias{p_significance.numeric} \alias{p_significance.emmGrid} \alias{p_significance.stanreg} \alias{p_significance.brmsfit} \title{Practical Significance (ps)} \usage{ p_significance(x, ...) \method{p_significance}{numeric}(x, threshold = "default", ...) \method{p_significance}{emmGrid}(x, threshold = "default", ...) \method{p_significance}{stanreg}( x, threshold = "default", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{p_significance}{brmsfit}( x, threshold = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{threshold}{The threshold value that separates significant from negligible effect. If \code{"default"}, the range is set to \code{0.1} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{verbose}{Toggle off warnings.} } \value{ Values between 0 and 1 corresponding to the probability of practical significance (ps). } \description{ Compute the probability of \strong{Practical Significance} (\emph{\strong{ps}}), which can be conceptualized as a unidirectional equivalence test. It returns the probability that effect is above a given threshold corresponding to a negligible effect in the median's direction. Mathematically, it is defined as the proportion of the posterior distribution of the median sign above the threshold. } \details{ \code{p_significance()} returns the proportion of a probability distribution (\code{x}) that is outside a certain range (the negligible effect, or ROPE, see argument \code{threshold}). If there are values of the distribution both below and above the ROPE, \code{p_significance()} returns the higher probability of a value being outside the ROPE. Typically, this value should be larger than 0.5 to indicate practical significance. However, if the range of the negligible effect is rather large compared to the range of the probability distribution \code{x}, \code{p_significance()} will be less than 0.5, which indicates no clear practical significance. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_significance(posterior) # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_significance(df) \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_significance(model) } } } bayestestR/man/model_to_priors.Rd0000644000175000017500000000205014106656140016773 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_to_priors.R \name{model_to_priors} \alias{model_to_priors} \title{Convert model's posteriors to priors (EXPERIMENTAL)} \usage{ model_to_priors(model, scale_multiply = 3, ...) } \arguments{ \item{model}{A Bayesian model.} \item{scale_multiply}{The SD of the posterior will be multiplied by this amount before being set as a prior to avoid overly narrow priors.} \item{...}{Other arguments for \code{insight::get_prior()} or \code{\link{describe_posterior}}.} } \description{ Convert model's posteriors to (normal) priors. } \examples{ \dontrun{ # brms models # ----------------------------------------------- if (require("brms")) { formula <- brms::brmsformula(mpg ~ wt + cyl, center = FALSE) model <- brms::brm(formula, data = mtcars, refresh = 0) priors <- model_to_priors(model) priors <- brms::validate_prior(priors, formula, data = mtcars) priors model2 <- brms::brm(formula, data = mtcars, prior = priors, refresh = 0) } } } bayestestR/man/bic_to_bf.Rd0000644000175000017500000000164314057264527015521 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bic_to_bf.R \name{bic_to_bf} \alias{bic_to_bf} \title{Convert BIC indices to Bayes Factors via the BIC-approximation method.} \usage{ bic_to_bf(bic, denominator, log = FALSE) } \arguments{ \item{bic}{A vector of BIC values.} \item{denominator}{The BIC value to use as a denominator (to test against).} \item{log}{Return the \code{log(BF)}?} } \value{ The Bayes Factors corresponding to the BIC values against the denominator. } \description{ Convert BIC indices to Bayes Factors via the BIC-approximation method. } \examples{ bic1 <- BIC(lm(Sepal.Length ~ 1, data = iris)) bic2 <- BIC(lm(Sepal.Length ~ Species, data = iris)) bic3 <- BIC(lm(Sepal.Length ~ Species + Petal.Length, data = iris)) bic4 <- BIC(lm(Sepal.Length ~ Species * Petal.Length, data = iris)) bic_to_bf(c(bic1, bic2, bic3, bic4), denominator = bic1) } bayestestR/man/sexit.Rd0000644000175000017500000001763114101116425014732 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sexit.R \name{sexit} \alias{sexit} \title{Sequential Effect eXistence and sIgnificance Testing (SEXIT)} \usage{ sexit(x, significant = "default", large = "default", ci = 0.95, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit} or \code{BayesFactor}).} \item{significant, large}{The threshold values to use for significant and large probabilities. If left to 'default', will be selected through \code{\link[=sexit_thresholds]{sexit_thresholds()}}. See the details section below.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{...}{Currently not used.} } \value{ A dataframe and text as attribute. } \description{ The SEXIT is a new framework to describe Bayesian effects, guiding which indices to use. Accordingly, the \code{sexit()} function returns the minimal (and optimal) required information to describe models' parameters under a Bayesian framework. It includes the following indices: \itemize{ \item{Centrality: the median of the posterior distribution. In probabilistic terms, there is \verb{50\%} of probability that the effect is higher and lower. See \code{\link[=point_estimate]{point_estimate()}}.} \item{Uncertainty: the \verb{95\%} Highest Density Interval (HDI). In probabilistic terms, there is \verb{95\%} of probability that the effect is within this confidence interval. See \code{\link[=ci]{ci()}}.} \item{Existence: The probability of direction allows to quantify the certainty by which an effect is positive or negative. It is a critical index to show that an effect of some manipulation is not harmful (for instance in clinical studies) or to assess the direction of a link. See \code{\link[=p_direction]{p_direction()}}.} \item{Significance: Once existence is demonstrated with high certainty, we can assess whether the effect is of sufficient size to be considered as significant (i.e., not negligible). This is a useful index to determine which effects are actually important and worthy of discussion in a given process. See \code{\link[=p_significance]{p_significance()}}.} \item{Size: Finally, this index gives an idea about the strength of an effect. However, beware, as studies have shown that a big effect size can be also suggestive of low statistical power (see details section).} } } \details{ \subsection{Rationale}{ The assessment of "significance" (in its broadest meaning) is a pervasive issue in science, and its historical index, the p-value, has been strongly criticized and deemed to have played an important role in the replicability crisis. In reaction, more and more scientists have tuned to Bayesian methods, offering an alternative set of tools to answer their questions. However, the Bayesian framework offers a wide variety of possible indices related to "significance", and the debate has been raging about which index is the best, and which one to report. This situation can lead to the mindless reporting of all possible indices (with the hopes that with that the reader will be satisfied), but often without having the writer understanding and interpreting them. It is indeed complicated to juggle between many indices with complicated definitions and subtle differences. SEXIT aims at offering a practical framework for Bayesian effects reporting, in which the focus is put on intuitiveness, explicitness and usefulness of the indices' interpretation. To that end, we suggest a system of description of parameters that would be intuitive, easy to learn and apply, mathematically accurate and useful for taking decision. Once the thresholds for significance (i.e., the ROPE) and the one for a "large" effect are explicitly defined, the SEXIT framework does not make any interpretation, i.e., it does not label the effects, but just sequentially gives 3 probabilities (of direction, of significance and of being large, respectively) as-is on top of the characteristics of the posterior (using the median and HDI for centrality and uncertainty description). Thus, it provides a lot of information about the posterior distribution (through the mass of different 'sections' of the posterior) in a clear and meaningful way. } \subsection{Threshold selection}{ One of the most important thing about the SEXIT framework is that it relies on two "arbitrary" thresholds (i.e., that have no absolute meaning). They are the ones related to effect size (an inherently subjective notion), namely the thresholds for significant and large effects. They are set, by default, to \code{0.05} and \code{0.3} of the standard deviation of the outcome variable (tiny and large effect sizes for correlations according to Funder \& Ozer, 2019). However, these defaults were chosen by lack of a better option, and might not be adapted to your case. Thus, they are to be handled with care, and the chosen thresholds should always be explicitly reported and justified. \itemize{ \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{0.05 * SDy}}{\eqn{[0.05*SD_{y}]}} and \ifelse{html}{\out{0.3 * SDy}}{\eqn{[0.3*SD_{y}]}} for significant and large effects, respectively. \item For \strong{logistic models}, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting a threshold of \code{0.09} and \code{0.54}. \item For other models with \strong{binary outcome}, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. \item For models from \strong{count data}, the residual variance is used. This is a rather experimental threshold and is probably often similar to \code{0.05} and \code{0.3}, but should be used with care! \item For \strong{t-tests}, the standard deviation of the response is used, similarly to linear models (see above). \item For \strong{correlations},\code{0.05} and \code{0.3} are used. \item For all other models, \code{0.05} and \code{0.3} are used, but it is strongly advised to specify it manually. } } \subsection{Examples}{ The three values for existence, significance and size provide a useful description of the posterior distribution of the effects. Some possible scenarios include: \itemize{ \item{The probability of existence is low, but the probability of being large is high: it suggests that the posterior is very wide (covering large territories on both side of 0). The statistical power might be too low, which should warrant any confident conclusion.} \item{The probability of existence and significance is high, but the probability of being large is very small: it suggests that the effect is, with high confidence, not large (the posterior is mostly contained between the significance and the large thresholds).} \item{The 3 indices are very low: this suggests that the effect is null with high confidence (the posterior is closely centred around 0).}}} } \examples{ \dontrun{ library(bayestestR) s <- sexit(rnorm(1000, -1, 1)) s print(s, summary = TRUE) s <- sexit(iris) s print(s, summary = TRUE) if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt * cyl, data = mtcars, iter = 400, refresh = 0 ) s <- sexit(model) s print(s, summary = TRUE) } } } \references{ \itemize{ \item{Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. \doi{10.21105/joss.01541}} \item{Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767}} } } bayestestR/man/reshape_iterations.Rd0000644000175000017500000000261314101116425017460 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_iterations.R \name{reshape_iterations} \alias{reshape_iterations} \alias{reshape_draws} \title{Reshape estimations with multiple iterations (draws) to long format} \usage{ reshape_iterations(x, prefix = c("draw", "iter", "iteration", "sim")) reshape_draws(x, prefix = c("draw", "iter", "iteration", "sim")) } \arguments{ \item{x}{A data.frame containing posterior draws obtained from \code{estimate_response} or \code{estimate_link}.} \item{prefix}{The prefix of the draws (for instance, \code{"iter_"} for columns named as \verb{iter_1, iter_2, iter_3}). If more than one are provided, will search for the first one that matches.} } \value{ Data frame of reshaped draws in long format. } \description{ Reshape a wide data.frame of iterations (such as posterior draws or bootsrapped samples) as columns to long format. Instead of having all iterations as columns (e.g., \verb{iter_1, iter_2, ...}), will return 3 columns with the \verb{\\*_index} (the previous index of the row), the \verb{\\*_group} (the iteration number) and the \verb{\\*_value} (the value of said iteration). } \examples{ \donttest{ if (require("rstanarm")) { model <- stan_glm(mpg ~ am, data = mtcars, refresh = 0) draws <- insight::get_predicted(model) long_format <- reshape_iterations(draws) head(long_format) } } } bayestestR/man/mediation.Rd0000644000175000017500000001373014101116425015543 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mediation.R \name{mediation} \alias{mediation} \alias{mediation.brmsfit} \alias{mediation.stanmvreg} \title{Summary of Bayesian multivariate-response mediation-models} \usage{ mediation(model, ...) \method{mediation}{brmsfit}( model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ... ) \method{mediation}{stanmvreg}( model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ... ) } \arguments{ \item{model}{A \code{brmsfit} or \code{stanmvreg} object.} \item{...}{Not used.} \item{treatment}{Character, name of the treatment variable (or direct effect) in a (multivariate response) mediator-model. If missing, \code{mediation()} tries to find the treatment variable automatically, however, this may fail.} \item{mediator}{Character, name of the mediator variable in a (multivariate response) mediator-model. If missing, \code{mediation()} tries to find the treatment variable automatically, however, this may fail.} \item{response}{A named character vector, indicating the names of the response variables to be used for the mediation analysis. Usually can be \code{NULL}, in which case these variables are retrieved automatically. If not \code{NULL}, names should match the names of the model formulas, \code{names(insight::find_response(model, combine = TRUE))}. This can be useful if, for instance, the mediator variable used as predictor has a different name from the mediator variable used as response. This might occur when the mediator is transformed in one model, but used "as is" as response variable in the other model. Example: The mediator \code{m} is used as response variable, but the centered version \code{m_center} is used as mediator variable. The second response variable (for the treatment model, with the mediator as additional predictor), \code{y}, is not transformed. Then we could use \code{response} like this: \code{mediation(model, response = c(m = "m_center", y = "y"))}.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{method}{Can be \link[=eti]{'ETI'} (default), \link[=hdi]{'HDI'}, \link[=bci]{'BCI'} or \link[=si]{'SI'}.} } \value{ A data frame with direct, indirect, mediator and total effect of a multivariate-response mediation-model, as well as the proportion mediated. The effect sizes are median values of the posterior samples (use \code{centrality} for other centrality indices). } \description{ \code{mediation()} is a short summary for multivariate-response mediation-models, i.e. this function computes average direct and average causal mediation effects of multivariate response models. } \details{ \code{mediation()} returns a data frame with information on the \emph{direct effect} (mean value of posterior samples from \code{treatment} of the outcome model), \emph{mediator effect} (mean value of posterior samples from \code{mediator} of the outcome model), \emph{indirect effect} (mean value of the multiplication of the posterior samples from \code{mediator} of the outcome model and the posterior samples from \code{treatment} of the mediation model) and the total effect (mean value of sums of posterior samples used for the direct and indirect effect). The \emph{proportion mediated} is the indirect effect divided by the total effect. \cr \cr For all values, the \verb{89\%} credible intervals are calculated by default. Use \code{ci} to calculate a different interval. \cr \cr The arguments \code{treatment} and \code{mediator} do not necessarily need to be specified. If missing, \code{mediation()} tries to find the treatment and mediator variable automatically. If this does not work, specify these variables. \cr \cr The direct effect is also called \emph{average direct effect} (ADE), the indirect effect is also called \emph{average causal mediation effects} (ACME). See also \cite{Tingley et al. 2014} and \cite{Imai et al. 2010}. } \note{ There is an \code{as.data.frame()} method that returns the posterior samples of the effects, which can be used for further processing in the different \pkg{bayestestR} package. } \examples{ \dontrun{ library(mediation) library(brms) library(rstanarm) # load sample data data(jobs) set.seed(123) # linear models, for mediation analysis b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) # mediation analysis, for comparison with Stan models m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") # Fit Bayesian mediation model in brms f1 <- bf(job_seek ~ treat + econ_hard + sex + age) f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4, refresh = 0) # Fit Bayesian mediation model in rstanarm m3 <- stan_mvmer( list( job_seek ~ treat + econ_hard + sex + age + (1 | occp), depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp) ), data = jobs, cores = 4, refresh = 0 ) summary(m1) mediation(m2, centrality = "mean", ci = .95) mediation(m3, centrality = "mean", ci = .95) } } \references{ \itemize{ \item Imai, K., Keele, L. and Tingley, D. (2010) A General Approach to Causal Mediation Analysis, Psychological Methods, Vol. 15, No. 4 (December), pp. 309-334. \item Tingley, D., Yamamoto, T., Hirose, K., Imai, K. and Keele, L. (2014). mediation: R package for Causal Mediation Analysis, Journal of Statistical Software, Vol. 59, No. 5, pp. 1-38. } } \seealso{ The \pkg{mediation} package for a causal mediation analysis in the frequentist framework. } bayestestR/man/point_estimate.Rd0000644000175000017500000001054714101116425016621 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/point_estimate.R \name{point_estimate} \alias{point_estimate} \alias{point_estimate.numeric} \alias{point_estimate.stanreg} \alias{point_estimate.brmsfit} \alias{point_estimate.BFBayesFactor} \title{Point-estimates of posterior distributions} \usage{ point_estimate(x, centrality = "all", dispersion = FALSE, ...) \method{point_estimate}{numeric}(x, centrality = "all", dispersion = FALSE, threshold = 0.1, ...) \method{point_estimate}{stanreg}( x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{point_estimate}{brmsfit}( x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{point_estimate}{BFBayesFactor}(x, centrality = "all", dispersion = FALSE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{...}{Additional arguments to be passed to or from methods.} \item{threshold}{For \code{centrality = "trimmed"} (i.e. trimmed mean), indicates the fraction (0 to 0.5) of observations to be trimmed from each end of the vector before the mean is computed.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) point_estimate(rnorm(1000)) point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) point_estimate(rnorm(1000), centrality = c("median", "MAP")) df <- data.frame(replicate(4, rnorm(100))) point_estimate(df, centrality = "all", dispersion = TRUE) point_estimate(df, centrality = c("median", "MAP")) \dontrun{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # emmeans estimates # ----------------------------------------------- library(emmeans) point_estimate(emtrends(model, ~1, "wt"), centrality = c("median", "MAP")) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # BayesFactor objects # ----------------------------------------------- library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) point_estimate(bf, centrality = "all", dispersion = TRUE) point_estimate(bf, centrality = c("median", "MAP")) } } \references{ \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Vignette In-Depth 1: Comparison of Point-Estimates} } bayestestR/man/diagnostic_draws.Rd0000644000175000017500000000137714101116425017122 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostic_draws.R \name{diagnostic_draws} \alias{diagnostic_draws} \title{Diagnostic values for each iteration} \usage{ diagnostic_draws(posteriors, ...) } \arguments{ \item{posteriors}{A stanreg or brms model.} \item{...}{Currently not used.} } \description{ Returns the accumulated log-posterior, the average Metropolis acceptance rate, divergent transitions, treedepth rather than terminated its evolution normally. } \examples{ \dontrun{ set.seed(333) if (require("brms", quietly = TRUE)) { model <- brm(mpg ~ wt * cyl * vs, data = mtcars, iter = 100, control = list(adapt_delta = 0.80), refresh = 0 ) diagnostic_draws(model) } } } bayestestR/man/map_estimate.Rd0000644000175000017500000000717714101116425016252 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R \name{map_estimate} \alias{map_estimate} \alias{map_estimate.numeric} \alias{map_estimate.bayesQR} \alias{map_estimate.stanreg} \alias{map_estimate.brmsfit} \alias{map_estimate.data.frame} \alias{map_estimate.emmGrid} \title{Maximum A Posteriori probability estimate (MAP)} \usage{ map_estimate(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{numeric}(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{bayesQR}(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{stanreg}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{map_estimate}{brmsfit}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{map_estimate}{data.frame}(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{emmGrid}(x, precision = 2^10, method = "kernel", ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A numeric value if \code{posterior} is a vector. If \code{posterior} is a model-object, returns a data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{MAP_Estimate} The MAP estimate for the posterior or each model parameter. } } \description{ Find the \strong{Highest Maximum A Posteriori probability estimate (MAP)} of a posterior, i.e., the value associated with the highest probability density (the "peak" of the posterior distribution). In other words, it is an estimation of the \emph{mode} for continuous parameters. Note that this function relies on \link{estimate_density}, which by default uses a different smoothing bandwidth (\code{"SJ"}) compared to the legacy default implemented the base R \link{density} function (\code{"nrd0"}). } \examples{ \dontrun{ library(bayestestR) posterior <- rnorm(10000) map_estimate(posterior) plot(density(posterior)) abline(v = map_estimate(posterior), col = "red") library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) } } bayestestR/man/weighted_posteriors.Rd0000644000175000017500000001444614101116425017670 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/weighted_posteriors.R \name{weighted_posteriors} \alias{weighted_posteriors} \alias{weighted_posteriors.data.frame} \alias{weighted_posteriors.stanreg} \alias{weighted_posteriors.brmsfit} \alias{weighted_posteriors.blavaan} \alias{weighted_posteriors.BFBayesFactor} \title{Generate posterior distributions weighted across models} \usage{ weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE) \method{weighted_posteriors}{data.frame}(..., prior_odds = NULL, missing = 0, verbose = TRUE) \method{weighted_posteriors}{stanreg}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL ) \method{weighted_posteriors}{brmsfit}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL ) \method{weighted_posteriors}{blavaan}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL ) \method{weighted_posteriors}{BFBayesFactor}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, iterations = 4000 ) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object.} \item{prior_odds}{Optional vector of prior odds for the models compared to the first model (or the denominator, for \code{BFBayesFactor} objects). For \code{data.frame}s, this will be used as the basis of weighting.} \item{missing}{An optional numeric value to use if a model does not contain a parameter that appears in other models. Defaults to 0.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{iterations}{For \code{BayesFactor} models, how many posterior samples to draw.} } \value{ A data frame with posterior distributions (weighted across models) . } \description{ Extract posterior samples of parameters, weighted across models. Weighting is done by comparing posterior model probabilities, via \code{\link[=bayesfactor_models]{bayesfactor_models()}}. } \details{ Note that across models some parameters might play different roles. For example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B} (where it is a main effect) than it does in the model \code{Y ~ A + B + A:B} (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via \code{contr.sum} or orthonormal coding via {\code{\link[=contr.orthonorm]{contr.orthonorm()}}} for factors) can reduce this issue. In any case you should be mindful of this issue. \cr\cr See \code{\link[=bayesfactor_models]{bayesfactor_models()}} details for more info on passed models. \cr\cr Note that for \code{BayesFactor} models, posterior samples cannot be generated from intercept only models. \cr\cr This function is similar in function to \code{brms::posterior_average}. } \note{ For \verb{BayesFactor < 0.9.12-4.3}, in some instances there might be some problems of duplicate columns of random effects in the resulting data frame. } \examples{ \donttest{ if (require("rstanarm") && require("see")) { stan_m0 <- stan_glm(extra ~ 1, data = sleep, family = gaussian(), refresh = 0, diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_m1 <- stan_glm(extra ~ group, data = sleep, family = gaussian(), refresh = 0, diagnostic_file = file.path(tempdir(), "df1.csv") ) res <- weighted_posteriors(stan_m0, stan_m1) plot(eti(res)) } ## With BayesFactor if (require("BayesFactor")) { extra_sleep <- ttestBF(formula = extra ~ group, data = sleep) wp <- weighted_posteriors(extra_sleep) describe_posterior(extra_sleep, test = NULL) describe_posterior(wp$delta, test = NULL) # also considers the null } ## weighted prediction distributions via data.frames if (require("rstanarm")) { m0 <- stan_glm( mpg ~ 1, data = mtcars, family = gaussian(), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0 ) m1 <- stan_glm( mpg ~ carb, data = mtcars, family = gaussian(), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0 ) # Predictions: pred_m0 <- data.frame(posterior_predict(m0)) pred_m1 <- data.frame(posterior_predict(m1)) BFmods <- bayesfactor_models(m0, m1) wp <- weighted_posteriors(pred_m0, pred_m1, prior_odds = BFmods$BF[2] ) # look at first 5 prediction intervals hdi(pred_m0[1:5]) hdi(pred_m1[1:5]) hdi(wp[1:5]) # between, but closer to pred_m1 } } } \references{ \itemize{ \item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via orthogonalized model mixing. Journal of the American Statistical Association, 91(435), 1197-1208. \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors. Psychonomic bulletin & review, 25(1), 102-113. \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. (2019). A cautionary note on estimating effect size. } } \seealso{ \code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} for Bayesian model averaging. } bayestestR/man/describe_posterior.Rd0000644000175000017500000002131114101116425017452 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_posterior.R \name{describe_posterior} \alias{describe_posterior} \alias{describe_posterior.numeric} \alias{describe_posterior.stanreg} \alias{describe_posterior.stanmvreg} \alias{describe_posterior.brmsfit} \alias{describe_posterior.MCMCglmm} \alias{describe_posterior.BFBayesFactor} \title{Describe Posterior Distributions} \usage{ describe_posterior( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, ... ) \method{describe_posterior}{numeric}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, ... ) \method{describe_posterior}{stanreg}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, BF = 1, ... ) \method{describe_posterior}{stanmvreg}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = "p_direction", rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ... ) \method{describe_posterior}{brmsfit}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all", "location", "distributional", "auxiliary"), parameters = NULL, BF = 1, priors = FALSE, ... ) \method{describe_posterior}{MCMCglmm}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, diagnostic = "ESS", parameters = NULL, ... ) \method{describe_posterior}{BFBayesFactor}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope", "bf"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, priors = TRUE, verbose = TRUE, ... ) } \arguments{ \item{posteriors}{A vector, data frame or model of posterior draws.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[=hdi]{hdi()}}), \code{"ETI"} (see \code{\link[=eti]{eti()}}), \code{"BCI"} (see \code{\link[=bci]{bci()}}) or \code{"SI"} (see \code{\link[=si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[=rope]{rope()}} or \code{\link[=p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{keep_iterations}{If \code{TRUE}, will keep all iterations (draws) of bootstrapped or Bayesian models. They will be added as additional columns named \verb{iter_1, iter_2, ...}. You can reshape them to a long format by running \code{\link[=reshape_iterations]{reshape_iterations()}}.} \item{...}{Additional arguments to be passed to or from methods.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{BF}{The amount of support required to be included in the support interval.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{verbose}{Toggle off warnings.} } \description{ Compute indices relevant to describe and characterize the posterior distributions. } \details{ One or more components of point estimates (like posterior mean or median), intervals and tests can be omitted from the summary output by setting the related argument to \code{NULL}. For example, \code{test = NULL} and \code{centrality = NULL} would only return the HDI (or CI). } \examples{ library(bayestestR) if (require("logspline")) { x <- rnorm(1000) describe_posterior(x) describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(x, ci = c(0.80, 0.90)) df <- data.frame(replicate(4, rnorm(100))) describe_posterior(df) describe_posterior(df, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(df, ci = c(0.80, 0.90)) df <- data.frame(replicate(4, rnorm(20))) head(reshape_iterations(describe_posterior(df, keep_iterations = TRUE))) } \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm") && require("emmeans")) { model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) describe_posterior(model) describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(model, ci = c(0.80, 0.90)) # emmeans estimates # ----------------------------------------------- describe_posterior(emtrends(model, ~1, "wt")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) describe_posterior(model) describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(model, ci = c(0.80, 0.90)) } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) describe_posterior(bf) describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(bf, ci = c(0.80, 0.90)) } } } \references{ \itemize{ \item \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Comparison of Point-Estimates} \item \href{https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html}{Region of Practical Equivalence (ROPE)} \item \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factors} } } bayestestR/man/hdi.Rd0000644000175000017500000001710414111635557014352 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdi.R \name{hdi} \alias{hdi} \alias{hdi.numeric} \alias{hdi.data.frame} \alias{hdi.MCMCglmm} \alias{hdi.sim.merMod} \alias{hdi.sim} \alias{hdi.emmGrid} \alias{hdi.stanreg} \alias{hdi.brmsfit} \alias{hdi.BFBayesFactor} \title{Highest Density Interval (HDI)} \usage{ hdi(x, ...) \method{hdi}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{hdi}{data.frame}(x, ci = 0.95, verbose = TRUE, ...) \method{hdi}{MCMCglmm}(x, ci = 0.95, verbose = TRUE, ...) \method{hdi}{sim.merMod}( x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{sim}(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) \method{hdi}{emmGrid}(x, ci = 0.95, verbose = TRUE, ...) \method{hdi}{stanreg}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{brmsfit}( x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{BFBayesFactor}(x, ci = 0.95, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Highest Density Interval (HDI)} of posterior distributions. All points within this interval have a higher probability density than points outside the interval. The HDI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{eti()}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. \cr \cr The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). \cr The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\cite{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\cite{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\cite{McElreath, 2015}). \cr However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. \cr \cr A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5h percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. \cr \cr This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. \cr \cr On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) posterior <- rnorm(1000) hdi(posterior, ci = .89) hdi(posterior, ci = c(.80, .90, .95)) df <- data.frame(replicate(4, rnorm(100))) hdi(df) hdi(df, ci = c(.80, .90, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) hdi(model) hdi(model, ci = c(.80, .90, .95)) library(emmeans) hdi(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) hdi(model) hdi(model, ci = c(.80, .90, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) hdi(bf) hdi(bf, ci = c(.80, .90, .95)) } } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item McElreath, R. (2015). Statistical rethinking: A Bayesian course with examples in R and Stan. Chapman and Hall/CRC. } } \seealso{ Other interval functions, such as \code{\link[=hdi]{hdi()}}, \code{\link[=eti]{eti()}}, \code{\link[=bci]{bci()}}, \code{\link[=si]{si()}}, \code{\link[=cwi]{cwi()}}. Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{cwi}()}, \code{\link{eti}()}, \code{\link{si}()} } \author{ Credits go to \href{https://rdrr.io/cran/ggdistribute/src/R/stats.R}{ggdistribute} and \href{https://github.com/mikemeredith/HDInterval}{HDInterval}. } \concept{ci} bayestestR/man/dot-prior_new_location.Rd0000644000175000017500000000051413636776614020276 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.prior_new_location} \alias{.prior_new_location} \title{Set a new location for a prior} \usage{ .prior_new_location(prior, sign, magnitude = 10) } \description{ Set a new location for a prior } \keyword{internal} bayestestR/man/estimate_density.Rd0000644000175000017500000001045414022532547017155 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{estimate_density} \alias{estimate_density} \alias{estimate_density.data.frame} \title{Density Estimation} \usage{ estimate_density( x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ... ) \method{estimate_density}{data.frame}( x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, group_by = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit}, \code{MCMCglmm}, \code{mcmc} or \code{bcplm}) or a \code{BayesFactor} model.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{bw}{See the eponymous argument in \code{density}. Here, the default has been changed for \code{"SJ"}, which is recommended.} \item{...}{Currently not used.} \item{ci}{The confidence interval threshold. Only used when \code{method = "kernel"}.} \item{group_by}{Optional character vector. If not \code{NULL} and \code{x} is a data frame, density estimation is performed for each group (subset) indicated by \code{group_by}.} } \description{ This function is a wrapper over different methods of density estimation. By default, it uses the base R \code{density} with by default uses a different smoothing bandwidth (\code{"SJ"}) from the legacy default implemented the base R \code{density} function (\code{"nrd0"}). However, Deng \& Wickham suggest that \code{method = "KernSmooth"} is the fastest and the most accurate. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) set.seed(1) x <- rnorm(250, mean = 1) # Basic usage density_kernel <- estimate_density(x) # default method is "kernel" hist(x, prob = TRUE) lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) lines(density_kernel$x, density_kernel$CI_low, col = "gray", lty = 2) lines(density_kernel$x, density_kernel$CI_high, col = "gray", lty = 2) legend("topright", legend = c("Estimate", "95\% CI"), col = c("black", "gray"), lwd = 2, lty = c(1, 2) ) # Other Methods density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") hist(x, prob = TRUE) lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) # Extension density_extended <- estimate_density(x, extend = TRUE) density_default <- estimate_density(x, extend = FALSE) hist(x, prob = TRUE) lines(density_extended$x, density_extended$y, col = "red", lwd = 3) lines(density_default$x, density_default$y, col = "black", lwd = 3) # Multiple columns df <- data.frame(replicate(4, rnorm(100))) head(estimate_density(df)) # Grouped data estimate_density(iris, group_by = "Species") estimate_density(iris$Petal.Width, group_by = iris$Species) \dontrun{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) head(estimate_density(model)) library(emmeans) head(estimate_density(emtrends(model, ~1, "wt"))) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) estimate_density(model) } } \references{ Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. } bayestestR/man/p_direction.Rd0000644000175000017500000002054614101116425016074 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_direction.R \name{p_direction} \alias{p_direction} \alias{pd} \alias{p_direction.numeric} \alias{p_direction.data.frame} \alias{p_direction.MCMCglmm} \alias{p_direction.emmGrid} \alias{p_direction.stanreg} \alias{p_direction.brmsfit} \alias{p_direction.BFBayesFactor} \title{Probability of Direction (pd)} \usage{ p_direction(x, ...) pd(x, ...) \method{p_direction}{numeric}(x, method = "direct", null = 0, ...) \method{p_direction}{data.frame}(x, method = "direct", null = 0, ...) \method{p_direction}{MCMCglmm}(x, method = "direct", null = 0, ...) \method{p_direction}{emmGrid}(x, method = "direct", null = 0, ...) \method{p_direction}{stanreg}( x, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, method = "direct", null = 0, ... ) \method{p_direction}{brmsfit}( x, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, method = "direct", null = 0, ... ) \method{p_direction}{BFBayesFactor}(x, method = "direct", null = 0, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit} or \code{BayesFactor}).} \item{...}{Currently not used.} \item{method}{Can be \code{"direct"} or one of methods of \link[=estimate_density]{density estimation}, such as \code{"kernel"}, \code{"logspline"} or \code{"KernSmooth"}. If \code{"direct"} (default), the computation is based on the raw ratio of samples superior and inferior to 0. Else, the result is based on the \link[=auc]{Area under the Curve (AUC)} of the estimated \link[=estimate_density]{density} function.} \item{null}{The value considered as a "null" effect. Traditionally 0, but could also be 1 in the case of ratios.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ Values between 0.5 and 1 corresponding to the probability of direction (pd). \cr\cr Note that in some (rare) cases, especially when used with model averaged posteriors (see \code{\link[=weighted_posteriors]{weighted_posteriors()}} or \code{brms::posterior_average}), \code{pd} can be smaller than \code{0.5}, reflecting high credibility of \code{0}. To detect such cases, the \code{method = "direct"} must be used. } \description{ Compute the \strong{Probability of Direction} (\emph{\strong{pd}}, also known as the Maximum Probability of Effect - \emph{MPE}). It varies between \verb{50\%} and \verb{100\%} (\emph{i.e.}, \code{0.5} and \code{1}) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median's sign. Although differently expressed, this index is fairly similar (\emph{i.e.}, is strongly correlated) to the frequentist \strong{p-value}. \cr\cr Note that in some (rare) cases, especially when used with model averaged posteriors (see \code{\link[=weighted_posteriors]{weighted_posteriors()}} or \code{brms::posterior_average}), \code{pd} can be smaller than \code{0.5}, reflecting high credibility of \code{0}. } \details{ \subsection{What is the \emph{pd}?}{ The Probability of Direction (pd) is an index of effect existence, ranging from \verb{50\%} to \verb{100\%}, representing the certainty with which an effect goes in a particular direction (\emph{i.e.}, is positive or negative). Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties: \itemize{ \item It is independent from the model: It is solely based on the posterior distributions and does not require any additional information from the data or the model. \item It is robust to the scale of both the response variable and the predictors. \item It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. } } \subsection{Relationship with the p-value}{ In most cases, it seems that the \emph{pd} has a direct correspondence with the frequentist one-sided \emph{p}-value through the formula \ifelse{html}{\out{pone sided = 1 - p(d)/100}}{\eqn{p_{one sided}=1-\frac{p_{d}}{100}}} and to the two-sided p-value (the most commonly reported one) through the formula \ifelse{html}{\out{ptwo sided = 2 * (1 - p(d)/100)}}{\eqn{p_{two sided}=2*(1-\frac{p_{d}}{100})}}. Thus, a two-sided p-value of respectively \code{.1}, \code{.05}, \code{.01} and \code{.001} would correspond approximately to a \emph{pd} of \verb{95\%}, \verb{97.5\%}, \verb{99.5\%} and \verb{99.95\%}. See also \code{\link[=pd_to_p]{pd_to_p()}}. } \subsection{Methods of computation}{ The most simple and direct way to compute the \emph{pd} is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on \link[=estimate_density]{density estimation}. It starts by estimating the density function (for which many methods are available), and then computing the \link[=area_under_curve]{area under the curve} (AUC) of the density curve on the other side of 0. } \subsection{Strengths and Limitations}{ \strong{Strengths:} Straightforward computation and interpretation. Objective property of the posterior distribution. 1:1 correspondence with the frequentist p-value. \cr \cr \strong{Limitations:} Limited information favoring the null hypothesis. } } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_direction(posterior) p_direction(posterior, method = "kernel") # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_direction(df) p_direction(df, method = "kernel") \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_direction(model) p_direction(model, method = "kernel") } # emmeans # ----------------------------------------------- if (require("emmeans")) { p_direction(emtrends(model, ~1, "wt")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_direction(model) p_direction(model, method = "kernel") } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) p_direction(bf) p_direction(bf, method = "kernel") } } } \references{ Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} } \seealso{ \code{\link[=pd_to_p]{pd_to_p()}} to convert between Probability of Direction (pd) and p-value. } bayestestR/man/figures/0000755000175000017500000000000014135671003014750 5ustar nileshnileshbayestestR/man/figures/LetsPokeAPizza.jpg0000644000175000017500000035275314133140641020333 0ustar nileshnileshJFIF,,"ExifMM*C   %# , #&')*)-0-(0%()(C   (((((((((((((((((((((((((((((((((((((((((((((((((((cX" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?OjPu""$LKAi<\#XTĿƏLKAkĿƏLKAkĿƏLKAkĿƏLKAkcź̰kבƟ~G]%Lj5kڙOq^łxXY1d?Wkw{VZuB+&b:c75 PT:u7sMluFk1!֯%:no!DEI rzONIHcVKj7yk40d1G2Rs@&~%4g__\pnyaa{έƍ$j#2X:og__G&~%5!Պ)[Kܜ6=sG;mNT !~oր<Ādeo_-KWO$,<%Xm.dz13ڨQA@Ϟ-dιhj<{hϯģ2Vg&$PU3¾mQh9ğƽQy!nn+վ@᎕ʇܠq?y&~%k߉t奡ҹ2be8 AF&^%5ڊog__G&~%`Q@xğ?x_nos QH`O [[hg:H5nn8\o\ϵS2'׃@ se.I<xOœ*1 Q@ # }{#^0IdHr3[Ur;82x#րnaô,zZJFGsL+^l10y+vƋ7+Mj_i~*j;IO̱5K氈.pFO8'o7W# ݞF[OJiї9`Jz y'߶ ۊo8@Sw\vF>O֠!$pfq;SNǒ;h ̡ Y)M?SЎm_V~po;l(dVةF>xڲĂ !U,F?t+9MĒHr1l~U.Ss-[L܀Oݮ'h#rSZvy¥?ƃgڀ0㺘4:IR܅0\p_mX9ˏ֪1ˁeh_J%ۓ 06-+H<|UX yXe6֪EPx; xCzog__G&~%5Eog__G&~%5Eog__G&~%5Eog__G&~%5EwN{=&V &񼤆=ho>e4P2?u?_nG]s%ЍaP (BqקzZ*rc0?  !@o/ZJ{UIA}2^ԍq7]\!O@?<BOLT4[Dn+t՘+ni8oiDo\*F#?&[8,aZDF:$j~BViv4q(UMW2)a |ZR{YE,SH?B}ЉسERM4;h{Be!9VGb:dzsO/ :ϛ;x@^B@Uu ąqJ4χWzN-m4Hu)b2#{׳x_?QZ 5̠4As/o&R&cdf3dItO|]/kZÝ+T$}BI1p>Fv)}PoEyw;=].vTmm_;p@$W[N-.IypgcߜN:b>ήg[49=W:]p5—Ш^s}1JLdxl@@S9ɯ/~s~"30im7>Tt>7' I.y7aqw 9UhL=n!YnB=:u<3gvm:)RxF 6K߯-A6-B@ t! 򯘁[$c?i`TYL`Tצ˟ (X[Đ)829̅QR`WtQEQEV]_VӰIǜs[Px47>DYqxV+.洹)!3E*}^ҼMjcԭ.#N$}=(|)>48} \2p(TfxCbm_?@]{*y*?qJOzץy?/;9g4ƩjsI"dޫo#pC)pqӚñe4P2?u?_nG]s%ЍaP (OŞ Khc0 Y^-≼ހNĕwq z̙7"0mZ=BTŬ|3. އ~u<-.rYQ#Wgu~#B0#?62gU +d~Zc,]˻ȇqOozm5l&&_P"w ~=vkśKru.{?k=0.ź p<z.g\k)^.)h<&͵(Kf/#?+'`d ?vkDz v5])\t)TMOT=A51Q|SPh𵝭mD'0̙/n00 PŪEӠ9`(bR91G+ξ x7i=q#bnqwNRy_x}<=no&s=.f ƿkT D0gq upQKoVAwMWZ-@)Bڤ9`.?_5"ck2.d8J;E[M"zGoEz'o&KKy@ʬp7m>~ f';i]c ] I$m2Ɉ۬3AZƑF4TE*@kWe[]z]>F,Ro 1Cwoٛ X{mBxt@ 7Y4Zq\J>'\l>8KfXnUv_ PoGs.u2[:%;Ts]-Ҫ8~ .WӭtŦĦ=m0r1;:Ԓ`Vs2/#W"'|A?pdfh>3kϼwGG𷅴rUHIl@:uD>)Xx*;+8Kumt[-џ:ޠ( x#ᮙqe?5{۬_%vVxz97~f| v''~iQ8,ߚTf~RT>OO*q'{i4ϔ/١OO*ڲiZ]մM3i8Qu m 2ѠAXԃ"_Uy]إx)Չ2H0 :+;XtKTѴ{}(i%dA,.dh%Br~nKǩ.dH = AV1yvl2$B ďpѯG_ʼM5k]6_A=ŽJ)FJxU-]&ե hchԯظp3rǀ98:"/yc^5|Gկ,eN{yfe܆e$YqW!lk޸tmϮJZ2CF$H*1Ԏ3@ZĿv5QbC%jT$zEM̀ "x[sƀn;f$^s:Ji̙gQQ\ \gjWVl"{K28;T=9`H.bhn";dG(/wM|4E)y#_Fwd \5@X83ԕj9k$Z^l **y,hX<>m#2 /gsȒ&Ϩ<[PJԳw)%Bzg`Fprby8%6BHʏ`Nx-ˎYt۰ oZο1X݄D`$ y\c5Ov/|qͧ{c (l>??jxr6hXe L|Pg{?t xUXz,`}p?_EW> n-(Ye ǰ#B-&Pm\ q꧸>_((?ǭm|}]6/*"-@IrqGQYcFi? -qi0u+m< zf?Ø|Ak~4$%_29,9 x,-#1=PHgVT[x?p, i򰄟`89ߴxGXvv!Is9۴0?| o|uwOiRҸã>}y8><'oSmg^:0qsv2ʤ[$(¼]eG0My!mAa RId-;B19!!|-#^ɨ^JP]]9} 2;q_Q\|;noI-Ž|:ݞFn1浼eiO$˦f㖤'ǁ^Ţi]_\m!m|.sǖ 5[×<ZA`J nx*fNzgqJ#_ %v-_^{ko:DVU$Sֶ?xtk #ix`Ur= uhZssg)a2Ʈc鑝qަh.#XPW?Z-g]l^Kp9*O D]" <0Fq4Er$L5;$Q/q—#xg>t"$8|{#}7OGaԫF~S^_Cx{GE! ݟ xvб?:^TybOANB0=WUK?JJ~Q#t*fD$WCjF—d]yJ-E^i,((HOo$2IJITVw!B! wx*Y^TF>!ap!UbUQ2%y4+hsym;~_P$pOե֛ KvVl;I)8HNe4xB6QmVװ@Iғ}I*K jda^(I6)q K*WW↌<:ZGwBzU j$3wpAJ#8hGQlan#IbR&ĺ5ֈ$2lc= RrGAtBxdi)*Fޘ87@Qm->[}𙥛7s,2M7ܸl$񂄜 $tU-^4 ʪ0 OOc滜 |wCz_SŖ?"@+ΨT7"4-ˀ*`{o$~,iod3(u]>>?.(TP2O&.MFZ >l0W'#9$ ij1[#mI`?`UB_KG<\  @-4O-KKy}XN=Gz~ \G76BE6ӒeS+u>-Ble3) Ky ޼}{K˫9k%݁]~n94(/Ot_(i#]SEǖ=ύ5PQ YB{G56j+mʦ+ pv Һk|i Dw9Vyxc&fAE120@k}'i-P!JI@x+4g"pIHIu;+kq?n3 O_ ERyQ~BjIq} 2C u ׄxjѭte~C[/b6o Kͤ8->C|OQxd( y9`$E !A+x-U4?%3wYʞTc@QEQ\Ya k:"8i?u' H灮, n./%ʄ/76^'WBV;Q!88eOעZWhķf+D"{oX˯ Bч,olx8;n|OgOk!E)|>Ǟ.{wJf1iҕE?7phlԩ^'_MZlF `85 IGDLjr%n^@hx JH<]qRaWd@yڣm_Mƕ{T9 TTPu#O=!KKR]xzn F{=zƏ+¾u+MN)u662.L(;gw4_ tgWw Sfu<#Q=KO5&w| `#@$rdW|5~-<:O<;cRVY1-pkunOZijd[9Œhi^("x[w x$@㷊׾2i1ImNNHSBu' +HӢ,wfib`C9@ d;5w7ǧxcMM,Bbd u$ @ X~lcvX65)b #Pdu@O3UշC4jNMD.}FVJg^l?P*nZ_P~!J/-q0* .(m{?b?ةޗ4\_Z?U {Rxt[4-kxˌ4Nwz\PRSytY!hbk'nNTۗo(9lt>"𥮡.%ulw{:F )*`Q@e ={_6q]çi^m_)d 8'nk YWSX_( .bG9y^夐j6ze;eT T]'=z Yuy?sig\$vܶUcƭ&q3Q@4+|I $ťn|! DAB7'<3ï3iss,rd/>H݌01s1Wך][J]R-Q*,BU'zB-[&o8Dnaf072KPk֍.9OSuilax4x\$iHyu[ZhӴ>KxXHu.&:[_qMe}Ǟ)8(eD?8K@DŖ7WcQ96:H#uy*ΓcF~.W?5Vȋ"2H E}q:Щ1#\/+/{ٚ wxI< H݈ *1KuA^Wmg\2ԢzvѨD p+`s>L@~9/#2UfpU&v5Rǜ8=+4Ns} :>e4QG즊Zz$SioV0nH>r͢\4e͘ydZEa2#JY-H`Q"(]ْG',ޤh#ŒcoevyokŒnrܾnKo "+?#4x5k^gRϺФʍ34lpp^'5bSFgvFM.m%~_^~Y <]j1$Vh l J yKwEt]NZ ޥH^@\fae?,:0z6sE֧ӓdW@Ym*` w k =gUKon#=>P`[˨I .f8s_xQ[ᑨyӼ˒8]UK.2N·#S սi-c&&O,>\Poo xV{m 5g"8*IVivu NKgvHsȬm玼9sm:->[6)=pY Zh[5 0ĒIyXv,H`aUF8 !UmO"*!zzUVo~ե]Im?:J-Q#_K0}c"'u_{קX[_-Ŕ\@vH0?YH8oε4OO滊A1ۀ~tmxďj>(i3/K73?P¬h[:ݵD^.-?U-7ɨ,Wh0,IBē{IOVD1s·sfic,nRbx5<m.wIrp2p: 鬯mo\q8pV({H.n.I*6 WMck #XA*~B*juOaEug:Wr".k.=<$<#^@#Һ*(+kxmaHmaR5 =RQ@(5VP/C2\3@3X.&dcMzΧ|[*O^.Ɲh@}&!s#^$oA599DPv=9BPRGܟJ=+cc+m#u"c~U~9xְt><}ѥx\]ۖ_: DRG"1gLwe4.,'Tׁ]]|yhٴ{Dc$ 9)gtY8ʟ0?pi_I k)pGSYEAk, -GUmm E,p=p+Cf=ƥm5՚# 0AqFq(iwO<*Nܚ_xwXm?2|ώ灞E7\G//5˳sIJɅ!v!ʾO/š|GǤ.+@ZO ێ8'GOm>??ԅMQaښυv)oMs+ዝF74&ڱ.3\d9V/x tUʬr8(:sr?_.s|C/_Ɩ< K-wסyzGǏq>ky i$Mo{:3v&3|7i)MCq3/#c `g88xǾ&K KIc P02 1^"D7iR\Ʒ2"V$ P\c^mkr1ʇ#J^u0?h|+]6;>i%i,ԯ.Casv.\6;g$ _eeu$d̑+_iڴ~mqDZ}ktcO,7.Uf u`0'GK<7XQiĻV"H/;r-场:\6\tx^A j7XEw; 0>vӵM,C8B9>OlKGbn>ϭIؗD__k&Тw-m.Ld{w_#ڼZ@ԾK+ Eqld)h8 @ C^g[wq7XCïu*|u IImcnf=z7I y-kW"m[4Rc,W)ylv6v1:|!8{YeiK(DE +Ӿ؟m';"@K1^ ~ \׺ೀ9I84?5(Z&(%KKber?2J1'kB6^_sso0J{ϵ9QוJjd{d{ՓDB='>Ք@5&S?>cl4k[ג(y>҃l` O=*ziUF#] |Ԟ}I'ֽ+~#IFT W1N::n&rnuұs~z%XLjl4 q}r[3z|ހ6 s^(7^FwY„qX)EqK98YmvO1k5C}KY#{ʵw[(Zg#_/}7M WI!7ЊHW29AH$YOXZ.fiYKLz䳟Mz 湻LMY),{sTfR}kvf̻s@2G=^u*̸@fix&iz/~G)v9aSKiZ;nX[L@PhCqkJAgM k7&c<=x" 5Uy,Z6z 19U#@^F%H'Y=y^ Ҵwֹ~ ܌`s}3 h^1O65鵰Frv&/t^X2$v#3/ԡk{x?# X`@3e}qMo$W*4;K) yGzg1x ?xe"ntqؚ݊XQrFh|ٯ5dFzr|Pؔ9~5grKN??@-[^̓tnkhP)c$@&B1O™;A.}CP{wYȇ@9>]S:CGj(\Uo1jj^Oi0a~̿oiDѨH1h4 ݭʗD# W%Jcr[C||t݌-c6㑝$`t4·snxq vE"quw<ͅf ,@zWFM[XlQ}dsӡ&P]We\\Nx\ n'= ^ŨS6/(AiSf3&pcgR~Cs_:v'Y<$wHp!R6Ѻ~5E,D]Ɓ ]b`?bSoTU#%ϝP۲X؎B$DziVN6QfPe'=sjǃu-;vwmpI{wl.ʻPj8\PeYb{ZYj V(]g-&<ocjvxbs)k.J׃W.-.ncּ7)O&$##1Pgpך+G ҙ|,-+<dxtݷ(#0pOں=kJS`G䰙ƒ6HǞk_/VhF#sIDI<+n\X:͊cG]WVզgWeݯ;#QR>g[GM=/Z>$` w߈&4\i9e#¸A<085w i.Mլ Rnrw|ykwoSۍ%M{ù q1[nf2b9 )'0{Tv^$Vt6څq,JppXFCgcss}$7j&g\Gw$Nkw|Cg >An Cc2 T+OYP\Zڕv]pN 'z♡jZit٣d ޹ Nbt t+jG.q'M]'$Á\O^5MݑT1`q"\Ѧ4F{Ͷ`Vn CH-|+="OHD-V}$:WjpkSůEҕjqW5zNkգp+Rz ¹=2wZvg&&qG&#_:f΀> Nݴm sf(Ob~4")?1q=!V9O*?!gb~ ^ /JG|zn?@PPbs4~OG֡ދ?Fٺ?@t5~/0:Ub.zoC4?ghb} A|vyh40>z?4myQh)8hũؽ!hte= &5yj+e Y6 {,:Wbq>2:Υ&u,_lTG݀គꀜkN_g&Myu)'0sJ?OyǏ77FM51m^s@"IFcJ#oM߈5HWZ]z:Kq |şMl~s^Kqy3w%`?QPib4|:<.N}s^Gm-}}OLfX2FNB+5(.c $CuyO>'x@ֵYo-̱]A@dvK6GFA>=>k.&xeyfRX ]{UWvP"s,}{zJ+sĞ2_1>08Ԏ#TN[׃@'<tCp,_l.AqJpgo(r~cX=n &SCPI0)n߼G$ VfO@iIq˛2"LJF~wg'23"45(V9̊QYwׯGGJ'oޜ jxM[Xm4+\=sB zǞ ZNykנmx{MPƤn9L|fqӾ+m *)]yuQFeO, 5V GP𖫯kQӭ-\ 89Nxl>(hWZeŚ]^-ySKJAUPT٢9nˠh:Ljūq[%Ro/ֳmȷJ0A@NF5?n|oK'x! v:dο§3h6+>Q㳹%+c uP {Q"R̹٘дzu5+ĉXeaĞ`'{VvuO#fo2O5g=" FWISa0r;׮x8lI83+;5+Z-]ylc\$\+8q']FVgWu(u\oO_jqE-[V$;K3+w/Wwg[T?lcvi b.X6m*Gc;D#āX%$?|\\4Je#c}KS4hy+@ῡjw>`h/YӾ1kqsckuې GĐY3rqpP =H#|hCyXIc^_1q.WdJeW>Շ6GҌlWOG,Vhs b?:l?\_+4k-H\ 脜LJ7Ƭ=7^&z+A/bbe菼FAoҽ> w=C dm嵆G0Lr g-_ywABuO; R= F~uՇ9_;^EfȯƿIIaLwG{ }A>)xrL {)&襏*_)Urѫ/W_ea+͵X##xIkD}#ۚZA[b/])SjHR>"9ZOC̰iS悳=P;o}}zGKc9juC| Qka" j"ix'"6~/Q[nE| BοkΓ{%o,̢XѱRʍBk| Cgm>GfX\$|OJPX^1J8C' U-ZR\/ScXωy_"1e5U1 B0A(#uy|JŮ[\R9_,QX8oW$MCϑ"U*GO^]]:qҵnf|p̣ip?79:˭vyH#v&霰X%6:iqū<2}:T-n.sk A4f g6pC}gKIbmd B9=?*|\XGs5\L]iޜ95VCү%M'Ab^Ku5`qP =+ cx[h[V@,םFrW,ҁ+)?<Ұ5o$SjzZ$.YmzY^_4$%4S~?mRZPSʞPxFӵmVu+8nBwq2 ;X!#R8* ^VuJ<ↁ}}M!&Y~C6O޼nFWêrp +mwǠ¹ :Cյ=TjW+LZ;v|WvA F~P? Wg@}KZf% Z1mqm7:IK!B@qA^i.yQ9lG' 6O3(C\MAv ά[tIAQ ś?úmޑO Y}SO%ڲd]O]/jZaS|(p #>i: \:g6pYH^'ƸK[յ0E(5V[ZFAD^Tir{q@%4DZ SSwcڀ.ZƁu@Q uKI4|e@ %#ە?y#KuD.mn*'Bዐ0B}լ˕J6=y1kְvLG,3GQkBSzug NӬ7S4Z1K =면 .Ś:,$V̭<n/dӢ 2`㉘ xg{oJG~ZVr޻湡FukSv{-4N{Yqɴ)“1 Wj~g͍\#IxHUnsUdY,Dmq9! kt M&с%.Zg9v?{WͨO滲0ᷝm8 x[.l qe?IRrFڣ֦>u&QƧ.E A[Ofrb[sr犋g‘rX 9F K%Kڧb0QܜxB Ɂ5ө;AYɨņ @B˰d_X;Ջ mq'9 xa2 n^"y&<[^;Rx ]qqPqӂ1gu'KK";#'\ZJS]w;c,ſo'Ƥ>†>λ 8Oijf2iۏ J?ߥy橧kwM̖Ǖj{-L^㷖Y.u,70\;f2ݏlc]դhT;Ahj1QY<$13:9χ:Qiz)&V\BN%uK28>|D/t׫i[DŖz\o4K$+}۴ִcFY0 x9;Zvy4I-7\HpA=nDxnַ>C-c㏄'%%IYGᮇ+3L#Ѯ?3\W / x3M-t&:f7H0c3cچɎn2Nyɦ]mo+NӊyT]6HԼ'q RJW;ᾯWxj4Y2!F'9?Z(/?:Wu ^VuJ q,d)8Pk]IbGVRPtnBGn2x;ۭ]]lVǻg2_YX,*8PiA4&p\ѯ!6O@r.lϧ&fnK| O<KK;JuW9ʿi.'k2O+΀-ii{%z_I8DzhVxēQ䊿 C&6!ֵXgMT]R6VkW?Bտ*gUhM?MجmQĪ_[/c>FY/5:MDҦ-{͋YfmŶ1_zk#Zx٥`2 dIs`~@^q&X,c{RO-#ax_ omG<$y?JY]ޤHž6ws<`I-ボ Arv N1Nq/$fd>Ud#ez{b.PZ`+ˣ4{4c9^ڽ;K]B v 1ٔNR7`MKo+1Nu%s0Xn'uZ$f |xT_ϣXC4Ir\pP8Y37jLeaWz]0<.:ni4v\j`2Lzצhڒ0E,,] x'M Ԋz1\,+"<1:\p">acHYTFOhzEλt#(ruN=O'Nay=O)zk$[@ ̏* %AHz0*>#ߚ8E&|ڔ@QKs\'sŤp$$$AtqN~K,vc0LԀFxR{CzMtMui!+*R}5fKtM*޳Qn7k~i]'i#װ|:ƆQybAӷZ4v1Iv^ P;KqL[7|M1|ssǶMI|)&? e*%`S?S@ceV#' &e4ֳeh\M2ǣ@UҎq•sDIX~Μ`8@Wo8O8b?#(97堄tɢrt'_FHUc7G(?bQdfxֆ84á:q*Р tt?CyGRݍ{[wBޙ@j>"&dԆHUlAM/hiٙևq+V$T@#UU? a3}-q’[{~?Z3ӥAoH#S8A#OQZ`APB*zzV$QG#lQ3?ƀ%qj툁[!EE5[zWm48r/5&7DR9U?6?.^w %ԦMyAWPThQ@Q@Q@:(Y8G!UTd=ږ[Knmdи? L7Z+KCsG*!c랼ue]Z]\W ]Gٻ$+2_K 춎 mʎ=,3NK6yDnc- #vd=ɑ{K#gRTeS@'ȭ_6wKo*j/x%O˰⽈y!h"B8UߡmSp-s'${ZVc%=ԑ=z}bk˩vGπ큞x`xOѵ)eR.:~Q:CoC+DL cFy'8*ŏ/c&1RH * 1?9@W*y" dR.~vwvc׵fW$fFB'*zN,ckG01@äSĒEj׊{-2cQvѰb1%y I~{:_.o#1{w9ۿz7$yA0ȱwKe  z2N6p(&I&Mw$Y!F+ VG$}k5KM"PX^ :Zpv"/מ kUԦԯZgʪv *A8^iO\W%[m=nh:GӧE}yMMސO?<(^;t?sN"??:ҟЏJc >~lP?i~9~f%?JQޠ,V?:Qk k@ GAhGSG\hr3M=1փk :0=(1=M3튏ʏf=Ϡ4޴&=w?:ug`F?4 >ޔ*&!ȋ%ǥ.=_{?Z6Zךo^q^Ot %Fy>K|8Nj֋NGF? vs\EC"G6H4rœc3(v8^6s݌n|c> (3-ZR$3pFIFuB?\w~ x.d5PA Y?WA1~# =%;}`8.()oY~+WA1~# ?L ;EKxtWf^WJd [b S_( Po!%~Mxz5 /?Y~+ 3Ŀ?ɣP&ec'Q /?€3 /h3Ŀ?ɭ/Y~+ _O C/Lʟ}.=4c O0Ŗ!i-b.ko]Kz(xK_?%?ƸIu/v\IvPNdcK 2k&MNtWU-y G ~.xOIw;@D ѽ'--KY=ũg* o[r}b m6&K"\Oր=|'Q7?ƸI,jtv07Z#.mjAwU YI5甇^ a[]n FJ}+VLuC!-}O#Kb37s)?!50omM?{OlfZ(=.-K?MwL?ƾZ)P&,X| [? ?kx~-7?k3_?ZZ)Q [? š-ۈPqn8??jk~S?P#oχ i"9FjƏ= !'?x \O-o{_(k~;jq~/V>WןOZ)_(?fC|?ٯ?Z)_(? gٿ $?サZkx~-罯vUkzLgpm 8Q\"VVcy5[. PuB5[<\#XT(( QEWS'b873:ǽ{~N.:^e3?<N#lnnFX?$Pn=q} I a"EDQ) ?mG[$H-'?ldQ"?MoNS_ _xgNi"Bw,wS좮X )G@#WrqK8TPO-C+> qH`=։!,sǟր.o/ѿ!m΍0܎MFߘDM DV$ly`}*-VEj")UsOnDE܌ր,yM~)DmΓ_'VVd (Ʃ 1,IQ#;V!$dխ\22w~1)DN?g?”;7)Yۼg!7r< (X{|C3m?w瞂w6?'ǚ }$*H8^sc,רw4WYRW?ԑHmGٞG()2*6*rI,['<כjFR*WjF*԰5KzuS'\K[tğ_?$ ]([Z GS_+Ю>\ygohW]#%8*]Wܹ1(4DPzI}w.jv.{swbG*>*Je- aK& Ś?s3wr;P`VcԔl К/kTRvE>?4w45>DVgnnW"]NT]+bGq~;hz1%c Ϳ!\?j4xngx‘Mv0\υm |~Bz SJt[ b-e㤈z>ֆ{mssn CzP\XhC2B1r<꧐GU<?tW߈t2ȚOۿ)TzhÛ)4.{;kⵂ]x~\\M[Ný?ɷ@qEP (((((((i#]SE ?}he*~K AEPI4AoM<81=O@9Bм?gZ~kkwquO$nVAr>Ӧ{TJ/Lw"$}+ {}~݇Pahw;ց4]LӥӾr숈#UG[ b?M>ʦqVQAjKA+;MJTUE#=wc+|ϥw ]&8܀eT?z\hF"*%%4'f5_LMWGrwLA;^? x{K׼{MqkHh.s;4Ŝ|Ҁ=+Xgcc2b}jy'9#I$Dy  g׊IΫX;UG4%0K3ڸ%H8;ڵ|E/c OF YPK;bv{y1 c=5[DMM;"$ ۳G#G5^ ωe => {{6d$MhɅx*79WXiu s R *"o]AO} x@. g[4wwWI0$TW~0ݕZ܍K5hU$WxOĺkZFX k 2! ZDbH= 7NGa5 BVF#WUy=s@ ,6].Ūɕ3# ~gN񦁨h~P*Iwd1؞ÿ !f>U"-<< ?*M@i溧/뼟K=k5_ ^y?#@jHVd4{Shuocv1IFԌHaאG><qozHUW;@ІOy]:?Hc\0>]p?ŏҀ>V6S@DjH&psRַl~"x.kߵ/VM* #tnC 􏆷PK+;HTG- iva>T TSw@x3Yum*xI Uxr.G\zWGK| kufts e8*:#EFUgYb?tqp9] 3@fW(7Zl `h`s"xum|MZL捎U[9"o!mۗkWYE$zN1O1=;Qv9I3UPjZ`q5֡:Rw"@(((((((/wM|4E)y#_Fwǟ:/k Q@7wZk In.dv}Ǩhءffv裰p&;F?*D'cݙւ6y ! 7GO8a6?R;rZ+QHSx5;o5R(TYOUM[O @X#L+#hO.Uێ85:?#=I>LV-r89;Yh$8&,[>[6 gt#r mG,sG$f~D}P(xz֥6oy,sA 0Ru<ȵ5 #>ټ4i`(#fp"2F=}7~7ww{uy;WP?k߈5O._n&[đ""VPH*ON5,!-旤gZh/` iQA9H'ʓ뭽2ZjQ41$"q"ɧh7q=$h $e8$rwj­ u+R'w=̬rx-ߊw6W$7[X*ϊePc׊UhS;[qnQ(qL1xt%\Z{j7Z-$Ky,6p!FRp;p@5? *BWM{=dl0cڪ;/igXl`,Ւ]>_%'f*A! 10j=74&K6 rM#KK)rj<i#Y-!lѬhWvt=2SFaH` $d9xW:;=OiuYV1ƨ0H ǜg]7{+C|"w2۷zQ¿I&5 w%1+6|{V"Q55g$KXU9oþ$}hZircX3)s=FgX[HR9(U+X?kմ[b?W!6?5M[ʔыEԇG{RiV3Y#9mJ\ p@(DFO?I0I?&| <=$?“t?8 _,I?O?O?)wO<(Y'{Rni}tO@WJIϜåZߥ&eg(̟?Y#,>c pisRny֠ ҍf^“2 )cWF*N s ]SBO9O[k-FpkKA]J#rBtd~Q-UMJoZ5%uq<÷A4?6ogZt'T}5]Qe?n<?|(QEQ]-5\.,Wr.b=g ONۿS|5op!@OEhkz.5[9Ka+> ( ( ( (:_>e4QG즊W~K­~W r}x頳s&.HK1V?6;C=+ռS*Z<~=7wq&=ٖgX͐W d'ր=6@k"YHFpx3kjוppYd!aTtOnsZp]j4/.yXS6J^iߋ5-2xΌ!lgkxI}amn!x D$UI$PuEp &[m&cė!UcCO='P6GXt'lo&Tq $c?6=97.e?4ɼKaVגŸܣwZK; s,m*ƻQBFCOOCgcӒO=X76Az= j=9]N}Վ_ZKYT}jz AI)@1$ G_!_TPøcZl@!t83ՔMdAGt*GMĖg)=ķcHgsL{~ ]IigiZ otԞϊEoC;{*lΘ~m)\M2}g_vІ4P;FZړH*xT 6}LK@ '95;W#4 *6+7g"{W|y;>"Ljm'[^x-ؽl|mpB?rԊ(CY!%WqCj=㵏nx鯠>%_̑t GCzՋ/> ?丮H֍e+c~Q@Vڅq4r`:|(x<0ZD-f1nH~V>P/V$k Hsڇ&U?a/-۷E,xe:_nk&A}-k,.k5êJOអ+E4E)>e4P2?u?_ k}Ww><\#\w:5sco8)<N/qzǓ${׮i~Pw2-1,fI;'^Qy[pC$?1_CEt,4!"exw-fHdA: \qz~um ݻsM 2:³,-4KA*!]T0ax t4-0>XFW3BUGa@ <KwFhJW_t3mݡ3**m}f=i</E踅i#ѸW_ ׃]ƆMsU䠳#эxfAmXth1U'Z'\Dt( $dS%C~@skבjZE4pW_#=z׼KxejZ+>>[hy)"/cPG}jSŌyf1p1튯^g¦yx`:@5Ir ̃2#r+խ9mKBvӜcw&he,eHgL$kԯOQX>xB%{8qףy#^q(Ч=?~}-~y3˜NWSN8€+Yrmmk9UQIzm$3YV@w$qE  :)mb?¸J\p9|q0瓱 LޏrR+KU4*n=Im-ق O@2y+{UMݾ^?k>">xR!q|~u09E4Tgxp>Vj[~ŭE!'~C݆Q1ZjLU<'>O'>5]EhFff']ݎXrxI&ìxHW|ko2ijOEFYU/BG=5p2cĩ~Uld.T\jSKtMOhbd*sZ[kzG<# q9⾳ q<WFÍ,4SBz_Ak?M? 81'ynr?&€)M48g=c?@ /Wj@3vH p7Y} `F)su溧/뼟JS*5?){]ЍUjzu5(en9+duLi?u?|խēC( ۠Gˌզ#Z#;Z<z:mI~]j6už33_-@}cSWӷ1&ycϋH?_@U]RSrF>Vfx3\E([+,jW?5C^ yw;p:͙ }VǚZkYSlsS>.`؟bM"<37v9f'X5hSm9#bGL o[Y0-Abq=BnUkxn e%gg &_y{s:>^kң͜&/|Aaey'Ed cpL+>(QĚ#P]ʺ'K_I0c r3נ5s"qĮ#zdx'ᶥ6krT?g׃_?+~yz}t*05yxN{6 ͛WP0YDQ7by=J(/mn.& ey3pn6ɶjv,5/MζDx˙w1%Ix|3ޥG 7̊xIP8&>,`YnၾHIAaZ?S.\$^-*OxA|;X@GbZo#e۞/ǂ/5WHݾX4ŸĒ >q@j1wnFeKc 2z9=g$=WĚEќ GmsqjVx,9Ha/1HWM|zӴk+[ +D+t $JwqPe'EëM('w)$,FLn nldr9k~?Njo%Q g,u"=KmO i6 [PcI'̄ƅx޽GciNz\@gXXt4QEZPh/IP0?TPF/_Cy +:[K$#{|U!b"Rm1uTExgOt_*_|FP<7F9 AP/F ۈvP<@Lb{zy#_FU-eFkf,28g{߀`h JQN+զuLEpaweFz iKK̅b;Fy,q擿}ϲQؒx=psӦs4z5xt9nL#gp~`;syQYvG?첬2<́rIp92S ho.UZD *9;~U^(Pi0$ kA𞍠"F@c_7cܹakzw>O%]S8<¼>U4gd0Mo?zv?XfHQإGR抪KMHhzEG"MMCP?hտHn#? 6Vӽƅ ${˫E&yUGRkF2= G1җz¥t{P %ry1^kRկ2?){]ЍPnmfL 6Ӄ0pk7:xL ĒߵFH #5JzPΟ m"6[9V*S*}jzjZڿ1>w(6Wy_ @Ԏ2ȨˌVTIY`6Aq@[b,v"/VcV4vfZ\3>=w]pK7O2CzR|K_1@Al`OLu J[k^P?YQ]~tV+'i4aD=;}c /O\yk[GxUrwҤAwpo`9?WݾeFΘGȟ_ƀͮ[O s ƾ-c`PC "I&'bIXYI~x?^pqs.~@Mq>;qQ4VJC{eyOqߓ]%y/TSԮ( ơ}tf2:|]~j -;4ex2z{וpwg<9[ wQOq^(3m?s K|H|Ik.i=ֹp]JQԹ5uGLY|1ԩXje8sZz3]iE {R`8 rFN:מ.S惨x֞&\ ?-H>G?*z-Q@~ЮAuq[ iI3F;ߥojzm8K#kٓ(U`6}oV}wo}ZXڬu3L텍Q du?W6_L&kfNaN;q:&^ Ini8+}xOI8 $wf#,:}W%@t#xN/ߣ,GƼ8oDo 8'נa'_ vckk42 8'y/47.U 6 ٮ<r3:+&Ŏayko>Zf‚yYdgһB@}1c|[}u3[ f>9Ї"W3e*bo9>-^ECcM Ā1l%|ʪ?v6:6nJ̌Վ[^v dɠN37`Ny[YĚ,iv1~>.q^kRCwy1^iRAimq6O;QHǢs@lyIHK;cmVni5zUn_Μ@{(~m+mA֠p2Mmٔ?O,kEc˄O4A|+!98׀>N2p&8gTw&~LZLв.,~d@jg.X]8F}lnaeI nTp3m:kX^ # GmS6mzH? az*o &Q5+Sl"WBJ\wZHd# oQg,O?,VLQ㑁9G%dYM{wS8UWg |-J:;o&H$3#[HgR'?ck^Y,lf͒yr2Nq=*=BY5 Ɵv=\SQ*(TT qճOcT{X\XP#JE\̖$OøWE_C$L#*YO#^ϋzƜ#X58 "9럺ց7 E>8Xl\ɸ ?x_S&[xhXCՋ|KƨʇP[)wqOkGWP P .u]~RXD|6Ƣ?1K`}8¶1wKi$[uT` @o|(cG啭6)d+]V@ /Iɍc,+0-[P_+ Np ,y>jxrX 7w_4F8Es^/?|a-$Rس4uQTpsҴ JVgg J[±=5Es:G)jMi~-oYyۨ`}bM:Uv g8"~LI,^s/*3r^o3k7㶀=fNkhs+U!MNC=uʎ~xZ/ƻzENc ?ThdD |;|_>e4P|D%񎪱ʤ0 Ј$m`+IMhL.T =y:Cud5 mm-D s }(OAN3"MtT#:dd5%rQ\Jo&p4mmo@m3W<#snm`i-m(`) ql#|u `օm=1:6N8瀧|Ma}US"(-Hg~W:*u{/FҏʼOxUjfi )6N㜖>Bdq3¨4>>}$d]4ee8q霱tv47AmH^+.|IלsյjEfrkvI%;,4P{ZQҀ8x>D5 Z=>٭l"6ېF8EbZL¥Ȧ7^(жWF.>ל6I^C3f*A +DcY6Vr_~Jqm? έp r{{,="On__N!cJӤyxMQyyp:{ ۉ0֘2q{TQ!H T5eq_Jm3n>ßedm` d03^L&o n$Bb?X@9жFGFH͚O#mo^vm3Ȇo TI$`qoeif@=ů[Iiad2>+B~g&n,YEm:ĤI")(A gUrJ  ߈|R𝏇|Fgo=H$Ҵ<e#hhm+.׏j'18~sώgz0@0M*b:q[^hsb1yK'VoAt3Ʒj8ZL`0訠OAf&gBֲ鵡bɎ+u\xDvt( zr[hinm:VJO25ikH cg͢([7+bc _!ogzGG,^>0SX2csi.x >#1P mG}ei U*y=k[Ky!Yy*t9玕֩M+)Bݼϔ1#nzLqVI+~LQ>v1笋 úͽ㽷 )O`xw550u"ir[cfMz3c՛Y;HGL)*bھHb)P1'uL CܦI<ƢRQ:yi%!m"/Fc3יy6C:Y-^};.죌T?qқXIff'(rf{ҶQN9t1+YGa_ZxL\xgIH6U||ođom ~ 5g6ج1O'+pԜ驮"Ty0Z^IlcY3N A+K?sת$Ov5IMnYP(i y;-hvn5xn4J-o8'np2t iYCCI'%˞nϯnIWWe<|dxWU[a5a|>>?z팔k]CN?*<_ʔ_MÎ7צvM1!~~|,*(9look0jבD*"0€q_I)A|_uC4W/_K6{Z?WY4P#QGk7?™gB ΢4ۀY jޭ]Mo+?@9%-xm-Ɠ{yIkáj s ڷ-[=1sUf#4a=lq_om:xmeγ~GC9ћpVI2 +ΉpwH'#: 8>-e&On`,i_aJ}ĖЛP[(>?Al9jUj0w5xs##]Sߝzg+_i}HJqj5ka9~5^Sݞi:O/PxjE ֢{*@I]?WkBVI5G-_oSymz0HQtRD&:gkB[3Қ//ΟZ̏m:<(m:m:<(m:m: y}ԔPKcǚ GOt_(:3mZf]8 MiHHO/0K㦹cU[!iiո){\w 1A3+5~dEjJ#@ qaocqm#AwhF;|qo?*j_H]#Y:qwSωo-XX\9{„rِzRNGs9J_;ոGbM1 EǸ?u\kW14DGp)i\Nz szN7Amb{CN`2ySǺF,}MQMFcVoʑ{o]`J,1Jo_Y.Mlkz?h.Y;m0O$ʯ8ƁG :ֿ-f}MȆ֬IJVP 95KV&`@},tF Cx%w&/KX+aJO8\m4_ jɫ1!d@zkެ1BrAđ.4XMRKA% a봞?*}Lhm^˱5k 7UܡArdI]<(㟭]DѵDԐJ}V&}<7Ցe{Ö~ĦW+ww&v ԅ=+XU_Bε/u43Ȟ-]hxhZl61)}Eeס rN* ( (:_>e4QG즊z!{αGe"#d!ĜMF.?}i[ͳPKYlBb$8L#=9<95͕a.$be-(RIPrⷅL$L ,M0?:h4m7 R U+R~t:˜"-dCqom GѽuL>9ɍ۱%[(MŸc’m'rp1#Kgg-4Z%- 9HBǎ8$~Cy'Yqv[QeCijzW,}pο,`p EP ((>*eA?8c@ i<!KC)67€,BTsW5\3_T[噹_+GsM}8O/ ʎzWsYPpO<+vb$ S[_ͦ^}5ru')J=1Js쓠hïWI!l=%:׮#STlu8-$cv>sRVafŶ ?ZW:1e5ʠE1[AumB3ߞy4cpD^@H 1,r,ʹ pIl<(@rEm^&C+<`((i#]SE ?}hgb̽?g+[S$5)Ǩcmg˟Rx.clI#9bN|vVqYAuLS uZT7tdewA3FH8G8<ջh# tXB"(P%Q@ ̅+VWv8I%̞X`J'[O&5^h-IU%$$.lmn1V?:_y,0ֵTpWUo|u zEuo C@GEP ((~ڻ߾?U U]7aW=qGkk\)mٍ$g `1A w>~:7>?Տ)LᙙTLΟ+nxmPi^fPw8GctXoWj_mX_e7AbcAӴ%x<:NVQ\s#V71+PB9rf8jn3j߈:vR[r,aIB8)#jg=WMxO%cu^GUpxRoyj\zdl6w6 [jQ. V%Oid~X`T;p?~WxUc<֖2YrT*'n=Ŀ%,RS Vu0T.U60 |cУtWW?uDl&M[ˈybWL[:,3IK3֨7O-ͺz=+LOH2ە8I}Rn?!f4j[y.2<6n#㜊d8Fz ťH[,F~lq9t&yƝQLAo$UJ!r8<c/!k;(ɱzg rCc8$:PUlXnP5Ǎq?o Զnen{^5\3@-m&f;R`:C\ > ䷓t,T1U[UՇĺZ=KǔlQܼź > ?֤b7|WGkp\ =y5(ԥ=(TWfzogz/Ho?5Y6w 37RmѱH'u?0XְJ*¦7x3^[a 1$GІ4W]T sܠ~c#a$ݏU=I+yUw'xMߍXpn?Ê̓W/8fi=:cT{ܵ[Gݶ\7Ge*~4nV#OEjmVmvMq&aCqw'1)Zl,eX[+3c#?^Gk]YBævO~}N̊+5.wmB`n1kf),gc@׼wg|mZօz@$nΪRookgu,7!fb gd 9+SoڟI;SI?tV#?(w[6eZOoڟI<#woE^^-܋, m!H{p=p? G{kum!v *2kYz,=z4ZH`5bT9Ogf9/T2[lVOQl2/p$n{?IK*ו✋iSeu{M*)WTR;nIj+;K{FoF5AL浸K!UJGR4|ɦ\ UG˶X}V`.UGPMpW-%7^/O'*b^*sn}UfK+/l\qXS/4cE.H }!<~ukQ!)N ?ʫɭmEǩNIu >FG 9h' ke,z莖}~C6(6[7?\ .1c?Iy焿`iWM{<˕UMy焿 ]qbrr/<% Q$TTgxK|>?4/<% EEKy焿 @Ot_*OL;ыE"?$|};Ex{o͢\aۛByTX~Tf6vWB+ZZ\)'jmPMfKixBעFsbG$PmEp:;K5+iV9db!0=rk4=2)nRS#ks+(F_! A=gt>&+(M䉢v!  v0z7'i%̓]j2i!qvD30zds@xm4Q.Ssĭ:@+nr['d#:A4 B+Hd8%Xq,x@Erz?o ]vHJ 98rzq4glaK%A\'#Ǻiï*ZG1##!fr@2=h @o4 ?$9g7Vcɍd2H*:|M/EO?ys#? z|H%wTMNo& 6}(Rā;?y"tOkˉa+Fbb(<3{ojq4,% {0j߇I}́-$.׍nd N z%|ExŠђUm0qd΁=_׮" ƶ奂e~vq' 6\4ɒ_Fv\y@gc p GlشK7Ruw}eD$ʳ)SҀ;+,3^avpM*\,x^-ZGbăd뼞rOq@?s]?<_v[d<࿅vsop_}BT*/u2@~$U((pNcFEr>!<_M?L(G_.FI W_F(n~0k:-3!G!ueQg_ľ|Gnc-QI>:Wx\Mޗ.G>g\W@Tϩ*5?YƯ+=S>@?gO\ﶣ>@?Tϩ*5?YƯ+=S>@?gO\Q SUj(?s G.}KVW@{Z|}i9ՙljLg0_?oK}2=wšGH+IU?5㚾ujsjPW9EqpA( gz?2QC+>׿_? ?пׯhbCt_|zWδPאl%6ae4\WNyI_9hoB +x ŸiiֱGs&uv$pO/wM S6֯=棬;jpy]DC@;c9k/ōcJaVo.w:g(>^ Yi+$rev.ǩ᷆O,NRv0?R)=aБ^q ]Nd ?tk ?@ CfaJϽyUFMEU/SZT$T]XrIsc/Y6ql#5O7m"_IӒ|yB:c?zm/|3.s=Ҍ4\;g{d"ᆅEq,ڬf]BoR\H u=OzӮ_Uȧ?€=Q|jTkT|dncWoP4k|kF8`G9#+?tk ?G.w:g(cFsqxJ/T|̈{؇GƑoŧ z#׋?]t'P/td𽮃el#0-Ȏ uzkoh^[]\dg杝B!| (ӵy.w:g(Ѯ'LzVfmZZocCFCY <9=6ֆOk"uH ZI]B{-&)NaW23:{[x7o_ d>FB%sݸ\V<zըy+d(`6p8IF%1oĿ@h5 .WWIK0#;l X];kZonP^iAo 8ɨqoĿFiPZèK%e+6!Bxx7/яF%~}#@GKFT,dr{ R}~]neb.B[7o_&|K4ɼ'K`ƜV10O~n@Ӭu!}gnL-Ů#W [3y7o_&|K4 i7zo]M(| +Tێ.IƧmG/7NJ|` lj7o_ [_TRkI7$]̭NAuMWXX TqoĿF⯁Ont'JSoOoҼl-M:1%QԎ:6^i?m1ٿ"Q@Q@Q@Q@Q@Q@V~|t:IcR€d@^4ڡ?СiR2IQZ Ω9\$@:q3] ZgL)R13ʓU44yj's?|{͗)<'Bǣ)zƥ3 6-_J08xZO.&)?ZKm;bKy3LuH<`*1>Ì镫hvVvi hS7! =}(V/mZm{^Yo$F#øEV>_" D9G0>$~_P<\#XUuB5@(3@tM]6Or?r{XK;}kmo 1T7tDUb7ݚҶn;4in3;b#M"7ڢ #\csc yE}(AEP]O_ Khse28*:/xfj&0r!@"(UQE ((((((((A%Οmoזii p=cx ' Ce '|>@Q@Q@Q@Q@Q@Q@Q@|O\*?ŷ :ȭʤ`zikhU khw_Mil;?U뎠_]U-Lj,ô5 -;`=7Rjo |;eą,%&uY~`C׮/|b%7SK-n`VA 8< k$MYao̱]EB12A@9¬˅|S88"v,brI5 (  W+}J&jce[ԯ (QEQEKTts-ܪ7ݕGr{ 'ڪY[=ف.g8~Gl3>iGhz4MR?}DNV7p,FSPx]F &efϗC8Ql廞6f7>Kuv8rh^((Ve}:Cp7ucR}pߏo$W]&<3sjׅ%3ZtgSGyS ,K c +7Hۖ{ŏ\p =[CZ\s}rEcSВqr<}QJN=h{(6ݑq ׆1i> 7:=C̐¦H}F8!\y܏)܁$Lm;'W@IfX5hDD &}גws Mo"K#]¢Y:r$KEVu BN;!8_a4l}B:n>?r<鵋He((ڤiި6V|HA3'@o|m!L̠+f9D+ Sk{'\Fj6ֳ(݈`w}xOuY3 4TQEQEQEQEQE#(e! GZ MK1uA{go`>:Vx Dl8*A }%[[Sbc89/?%:\AW?"s-?"Gwk}hYE:;gKaԙ<5H.}HM|ťeŮ"oj-GPԧ L==4_NC+Nєڤg|ʺ߆~F%s$1G $X&Z'`'UϘv)VcۀNrrG>}G]h:7IV&2|VY8@^נxgӷ3cYDH؎1zf]v|MnЛk`fFYHd^]gu&1O ز|>{W>7Ԡ|:uXdݪV=)o|K]&[feRB8ߍ],tM_:hi_LTq#PG+w}U9 "XgO_(!ʑGzu mORym` s"eOz_MtboD6v[đADAv 9a $ Q?}军D2A=5E ( ( <޽,,S`g]2tY6Hm;;'9-֨eLI2~oFB rx*=winwN0,*r+6K}yŧ1ۡ`YdRc-02z()GLq*`0逝;GBVRH)C"*1*p;IR08+77P+B "(B?y\?|i=+ܤ?:Cm _,f=:\Y[F^hUmgz<#{?~ #$Qžyop;z+͸~!?Z7qXspbK7z:ED12k1vtaY]j:\]&ʣLjṃ4rF+ھ7+³ &_j 1FDž,ld>ѵKlSa G/orFG}+ƒswoSmR>+jWW`vAFch՞U߮31SIqGؑ\Ք($ qVvYMg"/߈xܬ:j/~>5+$pn$Ct@N8_ھ0DPn\(nBHR8^>ԭu}^% ;yn%x ֋nJNM:ZԮE@ꚗ.y&?< T1Ⱦ$%os zcGF ~ )B\z}]J;zEZc՜q v#ҧ[Z;йDIL4^u ]Z ʅ SPO?f] ^Xd%e(Ts2=O8ۜ z=5(2 aizƥCxuId h[%s+6㌐I83W<7E5fCޗ[ <45 :{;oR lH lEdx!gm#EO(w8$1r[Ҷ#ޣox\ltE~Ng[ˉgႫbBB 7?u((((((!K$dQՆC0A|k?eck'#rXo΀έX]M,) v# 0zx|P2i/–+qfGyI{K0ےrC~ހ4!+/Yj~fq!,%I9V+qʐqyo@c3{ގGq}@Fm%9%0=H

~׾nfFZ R;۹H;lx%H"='Rm$2:ՔZ>< yH^F>8Ӿ3i;D05ޖ'# c~)xr#H4*񝤁ӱϰg3@:ͮe u庞Ku EcT yb9߯`qZ45=MRHạ. rf1n'c!s#98VUj^6E$-ԟ׏xj~3Ԥ5ngb韩5be*6 T|FMBV,^[ݙQqAqT7v'\aVG#`b8p8>_=wܜ[,%|%Mϧ5Iލ.dn,]X'EΧv}kU诅ogٵH|DCcBy#9 _M:͵C8@ ܜ®YxSII|c693&<+]b3s5?o0c#с?2\~8qM][^ n!mvx0rB0}=5j)}ousYnsVE&ֶ-ȗif=pqc%ۯڅj$Ku=c?QP8TVE v)I4KV"8WF@X&N{ ,ƟmB/D ȯb8'ڹ. v6ٳmROo r.06q=iN"&2%Lane /W;_Gt-$1~#``w 6&&kpKN.XC7ppr> "(wGWS׃vw>&I[c>/S谔k{{6=CAǦ[0}$bזh R_K)m$,j^7B1ϰiV~r ;Vyg-GJ#%S9m{ZB.Hɏ)Y֗=:rkܼ8Ȭ-80/>5߷Rk[XN%b<'\WW8mRZkHIAln0}϶:ײiv..T 1ED?{MvG=\TpPٞK[?2K&GH! ++=mQN^LR%zc'_Zio4ƦE8^ooFV,-حQ>ЪszT:jS<.9kcƾ B$I}o`O!8㞝M3^ YTxN [.:rq#ytMOW`ϵF$V"wzIle]K ېO8xXZ\KKVQ;Ⱦ{Іghy%FO8>/j֖%zݮ.X$&|u 8Z~@(((((+YnnXKE XRX$_O8ZPI=̂8em ;č4!nW%7u 򟋞!Fl>f/O@@ʟI_Pu2A![[{3|̷20%շ>yƒHz'?u?_ #)\{@~/e@f%#:/ĞSt4>daFQj ?H?I@A>Pɬت)᭘߶VlENG^k>1_txR܌giG53^9O\çZ :r!IvQcQUPӂ%{;I=Ъ.|)8) =F_~ׅ|b𮎾)6%kZY (հĀqzg|ُQÚOk0kY’_iӡw%Yn>^ G70 j0p€;:( ( ( ( ( FPU GZZ({(闄h};op k bOsƾQӓ\׭E֍3,sݹ&L A@9MUzs;5?܄تSTյ-f_??` Y7=N8V@ Ngn}8=jgt>L6|m3F: ,=$t)d:Hs,BhW3Ӹܤ[ҞRג X|q41ydbǹ5%\^-V,N* @#6?k@\UNWMPYL.,ND?S ((i#]SE ?}he [VL|%|y#_Ft9|Gq=61D."o63&: Q. LWoum>9`z,rRF^,t )qz!c_sUJT`r8 ]GPrDpB>L Kv5>uXMnW)Ї=Q֮Y(eTPY|k k yjKY#jԼEe>MZt3<;3h|vfmGElFU{1Z:=];v1@FWwi': {+̺:H uXaGԊ~x&iok%+UW8D\ cV?AQEQEQEs&Fҭn5KIeZa,} 0 i!ݲF7|U?K|4epnXy+nb.,B:{^|Mļ`0Q>P&l,-ԌeU]daZQ@r2բ6V1zJ?QEDP^.%̐fE'rqzZj,۟}GQPVZ` 7J?wq_Qkuo&cKcmšMך<ڍ[R{x$!2bj:|v0;K牓v8Hֹۚ?bS;1`ᕽGsv>+ӇM(?fE-[ėYP;~V1 pqՑO (M9l7 b_*l#?k? %lez+WC^:\^N4?WEkXB#Rrǫ9'5tj*Ro-H?GX4`޾vEF**rwa^ix􉊃=`e+~2)1p$e `9=3KUݜܫcQ+V65^~ضὌE_vimo( ր=f((((l,Qʈ3s^Cϋ4S?MNv+gwG2ږgZ=֥u}g"הkw>?.n`C#ÞƼ_ĚjW?ur1!>}2psYRpɴF!`Nw^!i^)tTd! )Fq& o00n&s,Hر;!y]VcePU Eq毠'u4У|P_:dkxd185a 6OKXu]JD=ڤqڭ%Y־%7R.pQYAm ks\{c~uuT U Xr\$ou+U'܇=YG:uz(#;a? sۃwЭ!heO ~:t'z֥2h"HR2ohcQuch3Wcѐ:N3^^wdw?x袊(~ޙ-@![]rk>#}$q~ݸ+u{=ZMF=GK*sn7/Q澜ևN/:dzxHbgi<Pt#- gf^Fer{@#'$(F^K|K2^b#if^k<;uf*.n 41)-4[~uo2i +>#O<"ZoInPNϨ'@S+PASЃK^'?)%楪: oo?z(o_Γ͏z'K# 8@2I( IWƖ÷ʅجUq^k}O_Ŷ 6?d;ZX1'x#z6fY#E!?yOVrsh\ k@. RY/!hQdRH< La಻{6\\?§XpG|yDt?YD\Yh:);sxlyi<B:?ХQc} O6?蟘yiP COuEcIwz,庺_-?Zq*N2%9|%Ḙ0b?faMo v0~cO/G_ʚc9Kv7͏z')|BO/G_ʨ<B:?ХQ=(yQX 󿌗qh#JAʨW?iyQ8b3G tT/;sh ?J.5x\澮_eWIJHQ-dW?W]~b>(w̑~h\'20:|' ٚ[k) 6SʝHq@̪2qF@$cւ@4`ccҀG=yD/_ʏ-?'"To_ʝ@Gq4v:j]ݎ$ԕcV? J( |23$$C+)g~h Iuoº xQ EǯyIo3$;rP#7C8XuQ:;WaXd2@?Ot_(i#]SE=[ZuoY-oBտa_L#I/cԯ i gn{q5|m+4sY)b6'z[he./*-Qx>Zalr䳻,y>'xJ<%x<~M'fNpҴQ#y[a^[!)Qޤc C\]\o;{"H4|cst\Y6xTcAԏS^cv.d"O*xetmG#  AK H.u\c[O"FO4Ygfb6Y' fCZ8ywZC8QJ[]^Hhc2} ($_K/,n>k'ڼ =7͝,cy>joePkJ(^=@!Ot %Ɠ6D'亻_x8"XEH`*OG|d> |ۿzexW}j;MjJO9R;^@Š(OĨaȖZY-sHzf,u U^0)kmtݹx[tPhlc#o(=I#9棗M^m5yvhUAZ֢9+YWh%F+=djܵoEB{ul dԔPEPEPEPEPEPEPEP9,yi2'# "D9i13W(((*Ưj csw 7Nb#m,##=zvkZ^W-!isTd*M^M@DH;`~$r9]][l-@NZ~f-\|=:ϛBLR#COzb.?OOZ+%k= tKGu@SkQUXmϚ܌oks@؞O zuͥ\\vt. ~aV4 ,7/ İ- V;F HqzRҬH5 hNBȹn(("UDQUz@!S_x(t~dv?ÑƲx~]_z}5.O5v}yR}־1d(E U[M.Q%CpN>~VaZj.&k$Qa%AWг#f/IcV٨(QE-wPڹx\Ig.VTdW5wEo4]F+6v>rwwҀ>DӼHorVT.Y{⁅Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@x y:$fS0~55u1=Ht$t1رfB`+8Fvo;-=suo}7٢o08\ew>*KIEgU#߱Oh\+ZuWE?CmQEQEQEU5M9XeFi#6났bU X_{A}` KߢK3z+ =2Gj\[oj:5&hq ܇Ru:h_-!^A?k|Pg?ȷ0h(Ug` f8wOG,}Ozд☹p|{{QA {Pw3% , 442ʝ{Vi[SX&/~@gc>h߉߾c(۪_I[6zҳz_MZTWڡ[  P,JVT,I\-ԍ}Or,$Vc!cWTh)eih|b+ԛ˳S5NOo:IemM,}A?NH3[VhT*A 9Ay"6K .?H }p?"yDȡk₤ Ҧ#??ʀ<(AEPEPһ |NElgZ( ⇆6X{ .W"+[kz4n~:d#ã##ϱ(6 [}w^ nmίʾr:}9xGРi5;a#{;{Ocfmina^6H$8''o-[m ǸXmeI`C`{Q@xAo[] IBoqki7SOxhF@sK@Q@Q@ŰxV\\*=D`a]5 r$R(du9 B(J( ( ( )ȑƣ,p^]O6i-r1'_NZ!xºiYu) 7ץ|+~è\HN矮MhZw=̧s$ |jwO"F=u|X*?Owq5' ol'\ɿH_4 ڢA,a=RN̘ `@?N'u|~cK$@v(V(((}tnOաTm?y:.9tx&~98 b 6}'p>8y<-c'Q"3zIO5A>HԊ.a~j"+S^njyWnHcmJ!( LR]NNA撣.mXڤ z,ma<B?_/X?g޹RsNIhc÷+dqںLS#[Ϳx=?:hub77~T7MMB厵ygy<Ӳ4\d3d ߨƀ'T1B/?_4=- п?hqfEƣ1L2BF"ikYG*$,YI&q|/B/WV_}[T%j |?0^NQw9=I|Lݯٮ9,ĞT˸ n8;^ ?Na?7' ,?@(_Н4oOXƀ<M Bt_  ?Na+7' ,?G&:/h(W'E(a|kFeQ8HƽA|!ҿ?€>WtO JD =pZSZѓՐNڽح֗rF~ ї5[ JT cO]$0*75k"aH-^pS?߀ S|}6EQm,ʪ\cpvCK4\\[@8 :7NIe#o0Px@6c;nV# Ee5okGz\DnOWuwiNtf3-P;SxMqg_ʷ,GF1 mŞr^׭|EE{hdSJdE8duA,{M8oRm.dw{jqGQB4VW5_ڕf8=T?CZVo5-KQG cN{*֕gz.gS1ω_nz#!66t6)b9 ٦z?s?ҿ?C7@ +('EPoŜykRP^[JITo!lt4O s4c*޻ٮn;3U?i I"b/cvm,?|eCV/mt+tO JD _Vu+vqk~ >uKie)/;N@= O!z!w;c^qcw׶m?Hƚ3 Gr \FI94~3M+#Mr:lG|_KKwv;I5˩V5R0A>l~~uү#joO)tp? \ɦK~F }4(rUjTl`>;5LX[PW|:_#%H(( |E} Wï ?*uB=A@=8ۅPIBW12}(rU jTl|S(&W5Ǎ> 䴁HZҵf9LB#נZm_{,_а/~7Su+?3@۟ ޏυO^ }9_fO>|1V RA}EK7~Le-s4Uq;K!1S@YJ2p'־aD<~'q6PeE,*I'%_H()ἩO:r(>]ύ~m_Ht.T;(bqG#j(~K/tDt2+i%5Y@#prW5Cǟ:/k6(<ߊ?D›G_">}7?EQ?'HpppzPQ?'ߊ?Di| o?MtO/_@4)%(7?E{r&-նӃp&>~7?EQ?'r( AE| o?MtO/_@;h]*bpKq 9+8›G_"S~(KWTPϿQ?':=su<64α.S+"?_%_?󯰗BE (/2j'Siqpb3}TbIarRl+쯡In7`_;_Zou?hkqcki;n{Xs4t8>ԋ[<0E>լ5DH"pZGSl]v7՘&Hr\ Zjv I%sє?C]xa) ;qk"ZUX-gK[u{YX)'H<>6{;H* nh[}B%8 8Ǣڍm4t2헟~UQEQEQEQEQEo(0@ݏ@PEyX䑎K1>u^῅:֦]IMnptw ះ4Yν(?LԤ+2:Wa|*5hds_CpHtU\OӾ .Ե'ŏՉUkÑsm@C8>R! s$5_>:qzo]  ?D|9@/*FBUdR3*XRMq )7M3>U˟"kś5ZZ$n$AM>{YY#`؎ xƧb2f ?KXy]- ɧ=Cڟ37_L@ȍYuV#𤯬K֡1jv0\)Y~aa yFxv㟳?Q@3E_ִ}CDk]R[yGMÆz٤Xy$`p+O$"Gho6gi&@v=tx"(6>v'q8=C,i]B%?pA ( (6#_B<#_Bw?u?_׳mi_wQ%xǏ?u?_׳mi_wQ%zk:~ͨQYCdIX/ >G_68{I p 8=:uZD?p%E95x &sKo9"MHwm9j;OAfQ̺it4鼏8;i]{# j>ciZj'Fi[)mTmNj-B]Tռ{i1ZEq_M3HYab$3ݵVD&5Kel`OOj4{U5=R岲6+@bu}l?1uNzPk~FXX]\[:v4q:Z0;+OZ%֔݀#k+ #I1>g#9)ycKmzbK7Qk$ |ᶺAӼviݯNH)-1e; c'Gqޑ5 oj7v676Ѧ&ӵHQيppخ}*[u="Cϋay,`_>[ԕL{B,=zWxT{+ +]TkVNۇV߼A5gM񦝩xmd9_(0凘Q@G,G|3\_xh4%%HO78BZW,O~jn=KXZqI4cibnB ("Xu_T:eYhpf]'HvCX:Mgt-t9R}⿊AIԭDLxd~5 }Kz鰟MWEPEPEPEPEPͿj? 3]~ jd"v  HCp7+qtOÝKb;lt9]~y x׸xgšGi;?տ@` (xoG:SurRX/R8\ſ=j[_ $ջH-*Xo`*2+cً-MɶJs8z+#޻|b0 7aG\} ۋ;:kn佖)L*_*32VCgqn;7aGcs ]%M, #ԾF12[IM(orzV=1IvU!%`['Er;\~,qquwoy -͐2ZQʂ~^+e:s/nw`Yn(IҬс߳~g$˴o$xWXҬu{-u;haʸP}xn+^_H'm?-w"`9=>gVK[Ig韺JŅ>%t;@s ]VD[% L 1۩D`$n9z%~7N:Ҁ1aZuHP.N 己{Gp|ULu 3ך]gzkoS[Gg) "ώ=$q+,[[H`RHz5Q@Q@^ Eȿ!E Eȿ!E;ǟ:/k6(cǟ:/k6(5"x~ĺncgq%k=ıID2Sn Es0oi0jR{+3R2'33(uu֏/E@x a~ ʯf%u=j]6y<1b-n4+]A -0F 稯ik/5KfBw3z*=My xDHo9 gMXW<9~]}sXM6rcl0 ׮w>%$|]x},ь,OٝtQF2z}Gƾ*-+gÚt6:2\D[6m# yoUЭu웙&OYFQX6 $dwM_þ0կ4z+=KWx ^YUy SEtN &}+gQxBw6.{<^&ºO{=: ϴEL+|?h[Q.s+oF+t)VqBZBxCLSXh>_@(*D,H1 # SԤfV\c8+33' 08$ xROS ]]hVEF \^_>Ex:Mu%pn'Az࿇>*um+l} Mx&@rQb(^u<1_o7!g8Q' Qo.y4.6;KP'͟QG4vUݵ; W{$47 6d$=TqO݌36z>Ğ&%Wf-op0we<jZ+zKB U:jLr;Ry00JCV.Sir5Ɲb$DlDȃf9|P%x6=Vy͵2av)rOPI?@yK+"?_%_?󯰗BEUԵ =2KFg=ɠe_/Fa[@mڤ:ⶃ VnVͣi63\g ?O_ MM\;x,?mn 46 =۳w%?6>Ii,ߝ5x:[ۼ` 0(P]x\!Bki#~e~ޒ }?ִgJx>I "%?ް/]6sM'ë[Y#e=CqJS\7ȗۘ yw~ iuLJl|6 z,L$rpE{V]0y&izO(b>h>xoo.(j9쥛^QEQEQEQEQE\?5I9;_xķeK4##!@2|һzlhƨ z uQEyW|+Ě4:~om`HS,鹼U׋Eu ωaB'Ų͙OAWbx]E$.J7zJ(a@m7Yߛ b{CMAeY9,ř2hp`\E-O0gk?5#2<bXta [5nn͜6דg9#'H~\|3䊶#G{m]\Gon!ۘqm r|9;{ qiNw*q@T jS]1}5gS5i1w(UY$5z(vפYLyc-7iu_[%O5SOduZp<3i^ZK?!ֹ&H𮅯"TSȒM,P3gs@ >൝֞m͝Ȱy.Szw :c{i~GYHmݍJw`i'5t 'IH:*(~dq4s<'S[dYnw;ǟ;rt5,k$d20 ws[[Fׯ,K'>[X0m QEQEmx+G "Ѕx+G "Ѕg $Jg $J*( FiMI @f:A ؂ڱ~(j7M;E6ֱ:R^N9Q]yΟkqyemex\{-ĞZј ?]xZ5fnTXѱj9?^ρo uM:+YgTX-C/8Px'؛Z9M`!Rq>֨}*]6R~$ZҸ]PN9Q_j iysĺŲM*M&oIsqs3F(7l${C'H{^N5c &Y2s*zVx﵏ P|UaCM3$UR(ј "`&NTǵ-Q@s@yK+"?_%_?󯰗E}FY$'lq־f& W+[%3Le݁ 'c!#ܾ(j؞8c!8!OX\ݞ_}U[hPbΧܽ}qq4#K70=ί*a95h{uPW5Xmm[űǷA}6ZzOt0P̊K_*\[Z++H2"tV~дV- U/cʰѥ!.#RO;@N#yhe2YpC wʰZ巵^&&q  oDiW[VGѵvahnX|:0hm[@ϧN5^lqJBy].{$qό_"=3 "P9R~LF05Tִk× 5<1<Mk~ОHQ#2Z| !F|׈!KJ/$Xԭ]O802Sen@-w!Ԡyq"> }#@Q@Q@Q@Q@rqIl{Q]UpQOd][2'o{EPEW4[[ݱXRk#p[*q9@:AJ['Wڕ~XDntR}L(Ex73qaaK#Z/-o.F$eʢ.CYsm^d1y2vxj.>xFlkkyfUJ XqO~Gmi0[kp,r`pU8RqB[յ%%im4,#_V$7ꊸ{G&Km ד5z1$a uBQt?2 kVMH q vZeY[bT=I e᧲l ;uV$99|AGZΐ!b@xԞbv2O\ ==]:;3T,.%wƌFXlT峂Ny@:V-Zj3}%!8.|A2U[;m:E"+ %Xp3o?[&mUFPOPȩ}[^ՙ41^=wQ}80VB3G#O߂lm;M`G` C]zƖ}0Gq"m#U%9_k~$X:+W?3v.z)|'n S6uXeyLm"|d>/-B; <[XXu ;w 9~#xQ4OkH|4jry;wdg<md}kHӧAb; W݂uQ7A~ٺ[\>++JaI]ƗXɩIBs+τکQ> hu'U62U#Dg9kUc̷[ɪjZU􀩒y9j:Mu~7֮P%.WW_Fg`tZ:C6nq.h>%s$/'ⰼGoxw\{UVڄLWJ; k/˜+ഞ;{ǼKxFnc.2#Nʠzuz҄uחxGNjɥưX_AءAmuQsl}9V %;H, Ceԋ` ?+ u{?nxbMzl{ (@Aފ3v=S@((~U38&K '$G˷i8{?o4[hLEAnʉa|g=4_H5U,chs@9( 63[Y:}[I`bhPKps`jUu X!g^H{DU8iڟ+k[GoҢLȧX7mqbGbz>{<#6.mABK]񵦙ZAg=74x/>E0?3c2OlM_u)7qE, 2y"20g#Ƞ]u/Lg%=.I j?FAyվ 藞"Э!!YndDX

|=N]RXFhx$xԏDy8w3M~'Iq_އs[&0cI0rOʤ ;?IӬOJFO$C(WR896;mb}M򮥸mFˀU;zA0%LP`8K@{Cⴳ]SOPB#0 r׼ -~L&CI0v J(𧀠NJ.u-/P6W6.g{{# 6qɮ֊(7WZ g02w*bMk%v(bRO;d(04r崑Ck!ܥwz5~2/7fwx¢`y|K_Pe76-^CIbq KXt䷵PwܞWz5}Vvf4#s$W:cWW*pѺfVUaW4u_ieS0#*ne$2>Zм+WH5x-`{rJo/MIGsDyhW'g^fԠyR Mť oQ!̤1@9'JwR@0 5f#y$:_.Temぴ_ťNTzPB|REmx+G "Ѕx+G "Ѕg $Jg $J*( ( }{1cV:wmעW"* =zEe\h6W:Ρp$bDc3}pSđizr=3@s^$eQ"C-c85/q9uqm3Mt)O q'j;E&cqj1Y{@:yi#9#|3YxsFNԈ\ s@<N_hai E;"n:wJOKWKv9N: l5i:jbRXgh\lOٴw,SvrNshYǢMZȟiQsk{TH|c0 }OxfdobNK"hgå_KX 1-֖¾A AA5xc@ޛ-8JUii #}v<_`: ֢((oO )]%s@yK?}t}+uΡyCo LTd*xEݚͯE!؋!J}K` |JxMv*ti^{ $hYZ<Ϡ|ES[MRrl09錜j:~s,u969hcʁ 9&SZq{3 +mٴKfZZ8Iwd;#F@iPh?ؾ#UUm"4`09Oc~ uw}3ʷ@׊Zz}pĤ%vb(籯S\2:%r]8 +a4k5Y)e)UJWeb:ul:$nw(@>3ڭ/*2@:ຬvxol.52'k6ÎUw߈x|\}Teu$+ʀ:o!e;Fg]2=r>su+TxtJ[sdu>«m ^=pf+R!r;9Jo(JY(|*yQX$X O4-7V5SIZ8z4TP)F=h :E<62g!]s?wVge,NWcK4;J,dmTqȘݖl F *@q-Ǒk|0 !r$gSRxKiscM'.yAEu?|QxWF4xm.odEʏE&zzwr(((f7ϭyi^{|YH=˽Ĉr Qʹ35P\|XZ~@csWyUXTpAt-T:[XoK"v K$pMvtP ?Gnoo"8FQr:U*6z4OZvڅɳ6\ﷄͱ*RIMvPoӦ>"̅`V7%\cdj>rѦ8,e[5˛cS@ `  j(ּQhiWm=Y3}9? 3^Msݚij y޽W:4Vl~7KM4Sf-A͗Tr*o|dc%O#iPf>꼅y.Yn[g,5< \R@fsm yQ#4skÐF c?qZmeE08<\އzuEĒx0^N!Ng;h'74OHmWs7`+T.[mN4)Cn HA\\喕kڗk}E9a]bRW@3cy?a!O"lL I'H#s}i׃PnaҮMPF t+ЯQ@ε4/_fgte9 s 񞂱5Sg`"3ŎH\UQ|yu .ax<]yO,q1cZ:<:r[SapG3a*W_ HNG>vRSnicjc|AX:"X|2%p)2GJ Kۗ%H#H<=*jѢP1r[,6seV>@ҷ(k_8h(_8h(xG]s%Ѝ{?_&֟wWxG]s%Ѝb4췸2[ls:I8@a_ }o&O{2}Fk=O^OH&p3ǀ3]Ms7x]H[ }h!jm ?^]IJK9=Ivx@֠]^&_2{+VQ.:ߌ}3E|WenRaO$#wZI]iOR7440OZ?&R≭5׭coW )_2}o&OKqs,mwN0ӹ{y|_WOm[: V@,_'?>vgQm2-=ObFc~^%GjzΩ:\$%߻pvG^jKoZVrq/ؖys X IhoP&e>=I2լ$'MК,";nx"𖝭ۺJ[!t=P'I<8tĴ֝.m뎟yԨb Ƴ˘e|j>?_4<=X gyXt. 3=⹍oCӷNG7_аUxYJҥ1yCo `Gwsm.f'rN+ؾ|HLbiNKn;DNs ul4-SWxXR)7H]OńK + $'|rg#ZLAaI" 3ۑϺdp*Ş}7ѧ#ë;cחe_ŏc^4sF8`濳Ji[tc/&=X3ExG/z\-_^Z\(&կcWeӏZTtw#+.qsNMxwC uoj)uO,eErʂGsRWпf[˽^{r$w%.$(/$)xP :h`x&H慆FB+%[VMGҭ-mm&D G\r}Դ<_--YixMlXIe$31 `hkTeSYX XQqQ|k%լ>|O) A.{ma%:͜,M.d0FWoր>ҵ]?WүmaI&{yD: iDSW|-o5LRuNWmo%S=UB港6mgNu3kj5-TD6IR;dԆXn.}sUVӧ<O{Զp+hp7e?ݑySȚWmǟ:5v?V6v5瘣hC\"XU_>#i7Ź.lr >=I{M%Gzڀ<8#aGTQ@(Q6I>ħ9GnG[_-Ů9l;G|՟پ`xxԾk-# $vbst_+ >k.HS`M%\4qIMɒ+o#=1@k/i5h:;5Jf3I+#H#p!x9}'SemEげ:s G$g >U9$z@e\ve{OKk۴7nO1H1KB>Йx5"QAh,+(Pv.tQROҭEmx+G "Ѕx+G "Ѕ*~K AUu;t).nN#Në{JJꈣ,pt f_ iReo"Bƀ6tzx%%,l=$aj:O};+ǧvJ]R}Nk,'{ @bxR[%n%#.綘g*΁w4d(qNڴ[I>͍€'?mr}ֳߪhW* W~= ?>vȺg;R;n-Q4 8 C޴-neYmehNC!/4R[zm$G_]ZnLI(@Ƣk ).쵶/qԊ+"FX/k_Y_M[JfB95M|y?X#gƿMUӣ_٦ QzJ N9WѰaPsEt!h;TFՠH4Y,eI.` x ]ҞfԭDi$%S$JvkD(F3heq}vcv;Px{U/9I1o!?ι{K-! 9k¾+~*u cL҇!"ҟkZKy a18w5|RKРH> u 8 2|@>O.nmM"HTNU*_6iq{-Ʒ3IpFyMqOfEo2Or:–p2ǀ45kw}卍8898H2zWx{cGJ允D<-[=&ILr+~8a_ǚb.] Jcv dU|?Lg4טx>9= -flƷBI,+X>뚭jz[YZeW<(w4|Sv]-K4upTgTM$Qv<GmvsV>j恧ZlAs`(Ҽ'x|?q,|ҹ@w=…[]Ejzu-kvwFU-Dj?9$JƵD$:X壊]v\zKewMvK,hO<34p+ÞDԵyn5{ϵۄTTgJ}:4av$8Vbq^Wkӯ!I<^/Dy,xQ1@F֮Qqu5ᜡQ$D g9* ZK ǺƑm Q59cG̏>/ xgEu#o_q䳕:cT~)zn?4>KTl. c\1سIblugU}RKnƿuGP|EuOP DڋEL1x\ 'B#WzB8u*sz{O,/B#u;~qu!PH/r9(@|H5[X'٣kHn8:+WshvEaAegC Q';SUWbӒ>CqOQE@F'9H7S؏k鯉<'yp2m2 ( (*^XtzޢoĞ t  xFXT>kzy[Zk0J f=vct9x朱 *@1@.Ҭ4YuK}J͹I"}b8'uJ((hfSmrZJ=O\H֯u°xNI]Anx~tʷZ}kp%YFF+qIh$]KuGtF4lX)J0hDNK19$kH`N5$մחdIwm"os\ki/ K\hv<8R}(DQE4OBQ4_BPoN񶼷7(o$.Yzҳ4{/x4s\O,yJ+Eu~+}'HK6[W+J@c}G %uR Um\P%m٭o0t'qI?LC]=vAG_^PEP\_,5(Cj~X9lz㩮GXмj(A^W=c25odqvb8Kd畺ɧjYsrbd ]7qŽ@ FI'ҨZOyKm Owcl9$sךѼoUZixSѦݧ$ x#d04t jf|=q[9~jFʡSHyI{Ne 3!xҽ14,:{oQ f1`=zP[]mvQӭ%q4r;lTq?(m?յpuKdRlHSl@Z5v\- *@qui_~(.v'/O :{8;O!  쮠T r:+Ⱦ h>+ϱigC7 Hwnܑc';˭L,t2Ȕ[Kpzoj'"΀<!4+&t2m1"Ƨx>՛S=WX zYK#+8n:澂>mմO^huOW][ C0zK=TowB$[gA'S^ETծ'--^(BJd('q?|&{."_.8?`(|'|K#ziz56؈(sdQ8Gυ ~nS^~r,1%r ^'o_zG$fdpr&y 氯j~":~my$qH'qz"=  y,n, гs޲/|W\\(#u>., jCq.޼F{+Լ+u .}tM$f%4e%+='2jDأAZp|2+p#8?@(h٫f?)u_`uss, VT#(k_8h(_8h( ^㦰 km(< bQ]y;w񇉖D,:#i)Ú~p%/s>\1b]Anf\H \{ZeH^u Ȩq뷭m+BɸkCt J]J2fk^ҵw/s@ $ 4S0¹$ր!-.4yFr/~}OrJ>)2ceVP]ktG[n/׃՟:xf͡VXΟz^F@WF(|O:|y""+?tz[5=GM5 lv0^dȷ7vݼxdc>>A·MgTf_\Ȩ4MV z;G_5y@DN@"b༾ 3) }ZΠ3ؼgEuX+8H_ڽUZ\zc*ߚ~+znȭy zka[{+k'UI`RJ0y9vej:1-Ld 㔣Å:Ğh?(z{Fbc( ++2[ǶEjKp9@OE4OW]Jwz%5~"Mpybtc+<7b('zih ( ( ( ( ( (%X!ydO@/OǥiCkIqpZ;hެpBr`M{%K~n`S#H_&5$r\(jKI9$ \G2Ġn_|T_`c/r(H&j{ggC"0EQҥ ?77Z"D@bF([OD$ON0RQ=kmK^/kPcy q2[oHWhܹ%m$<& jwE3(?w֚Mo&y]ęX{?ڬu="輋g&~s:]SRu۝ORrDcQQ'w?t{OS?mf8[PARzuEy.tZUw04e#"tu0vWaOTQ@-'Jo OR8m_H3KC0!Qjğ("#\恨Eyk$Md|Ïhա܋Ic-sųm*Y*[d|H7֢$ʿ?@n:DEgm cU]OQq%[bQrǵ`,%=!VEܶN]y$NO@CRO+cտj*Ԓ9B$PmXjvzt<7k\v)1^nPej7:|`U`z V4 ? MΛםv#llؐ؃b;]Rx`4@_THg:eq0;82z. Lccv?N:'S4;9n'e9U;HQ^M: (Q@Q@Q@t^/\^ Jq٨j=EGcE{vufk{8P6vg%䓜aAcmI+fǖcj.5hԵ7ue+.C6:b?Um&)B*a#bCle:5.젏@WS~дc}qG T 漇D^΃h@*p_xʺRǭdO?ga^XѤX(FA}P |?%u{8R|PgW#ѣdeJ-GU`G^=kFh} +Y,%o'E'@r> ,կO55n5"k1~J@ze60uܭЏPGb=*x=[ԙ%mZ)H{gi6s&,VVݣi ۀAsV)Scaq4Vڀ:}ܭƲ1݂?BO`p}#+Y"qFV|E.{mj-6rUY'c܅c#i'"=*ֳXv.+ @uu‚3kn:~*{{5Sq"^OYڔ-ΫGХ"ߨ\ V&*05Z崀El15ŕƧ)1Hp?6vUڔsKpGSWk m\W1FmQ'W{=7߃ooh1tkgj*(ǭ#lyd2O~5H=-:%ęs?+yLxɝP9,H?wiLISM?  +3^mw4(a2O^!;/uyJ3{`hqQR>,jmO`Y=q6Ւ4̮FduX?s|P$Jp'4 zom"X9Ҁ4%eϛ{6sw>qRjHJl"W4)V|CkDwHٍtEZ jZOʊwQkMvt2(|*]JdN=u4P04w޵ćeSZMY #݉viuŒTQ@Q@Q@Q@Q@|^_#^ν?FDU,nYl-`{ TwO ^%kZʰ @ђKwk6{w2iF72A01ճ O3:p/dªZCOe{FW7$h}Ξ,L|Adg'*=VmM8hci]-j?ѮrpX?*TCPї&>ɬI V<>_*n.Tv>]qJ JX2[~I@ččH#*O }_ M>w :?dyxT@ET7ZMm8rV_#>~jz3[fp1`c #u5h2ӯ2R-@g\IB0*?zQEQEQE|J2??P5cqQEQEQEQEQEQEQEW|H9-*QCׯ-[/o&vҪZנ8/m,2`ujY23|Y#F8Uʀ+c$ȽI`m#7pY)j[?wHF&l;Uq Icvzg#ꕲ2)fFQE[Ql2a=r?\U{jgc+4`48JA ߊ2B U*C{dQ1}wngO ey4gH>62G7t ZUtQVm%Qv?W]((((((((+~7[bK (€6CKZ.r83K>F0+>eGGYK1#oҀ0y zm岯ԂE&ve崭 4 w`8$ON&e)fi/+MH@$A EUӵ7{o^A ]s wX?.UÍeԁ'zOs[4QEy\H|2 0psp8YohY!FTh?djR :mOhHM5 G<0T"*([Fu_E&!;/ΊKD/.,fwo>AO<P+@(|[%+@(|[%+@(|[%+@(|[%+@(|[%+@(|[%+@(|[%+@(|[%+@(|[%+@(HXC?%$4f$?Pai?A郥M4Q@5oh"=CLXZKڋ}?LldΊ(>}7_ia1 >Nzg=((j uOE1iQ}7E д.}7Wn{5xBd袀%]Lbm4iZƊ(CӞy$kU.rwpQϨh-4cM5oE4,b[XN2ŏ_rh/QEQEPh9MoIyi5̀ns#`p*+ %?UP O'G+ %?UP O'G+ %?UP O'G+ %?UP O'G+ %?UP O'G+ %?UP O'G+ %?UP O'G+ %?UP O'W eŽA$i0ʊ(_GtO*4R @@6@;^GtUP<IQ>⨢Gt'G#g??UO*(xcIfms4Bi[ZS?( t:  9>(bayestestR/man/figures/watto.jpg0000644000175000017500000020361314133140641016612 0ustar nileshnileshJFIF``fExifMM*>F(1N``paint.net 4.3.2C   %# , #&')*)-0-(0%()(C   (((((((((((((((((((((((((((((((((((((((((((((((((((! }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?8OhW8SOSC3d#5iL\ITqu#L{@~KZOClGMf;5&C+o#m?Z7u<ҭ岠iO̕rqvJY1Nn|*sT}3\5lnzԒLi8*5!סb[>jGC9^y&OT..VZHQF,8]o2czj;E͢٩k(O[RCZ@;qv.5~u!k-Ys,ʾ-2 !>5G?uz LlRVqXYQy RHV+Sn }xX)Eor"3-;ų< 䵙YN-zg4hq 29汔]"-$IDasQKq djl9w0w"19H,sIܓW̛3ֹY$#b~>$UtYd5S\ֱV8YPLN(>P?ܕ M{ @h/ͻB6~t<5(ՙkZGvT7`w+#R"[ge ]əjrsjcTM۪I~95/xm,H*O~KkN{kV0H]vi[S%(9pL-c>XdeաD8q5U+huMune4sܙZIM [fl,ѓ=Ln5N,sG/uo,Et_h1.a*7R4,B}kxcdCNZy]sP,NV*ƦxQվ5.fo |kȵA|U@obxoL{v(G@j}kJKI[ ڦhÅe \YTe]0J7Fou0>fΫ =9?0Yz=`0?\>[Ya^ *~U\Ww.ep#Q9EWqY8K\M{xe kL?WkxƷ~[F+sq/$P@o\~8 u9S <#y'sjۼj+!&wy݇jx:II4ڝ8rO3}+RumgQ ^IY3p7rst|eY<~ h˪@Q 9("œGR_]{B[*;xR\o;6ѶO;wȭڧ٫R?|?e`=pcaŹ;1^/Gq揈.NsFAșx/:(ċŭdX,.X1N~ 'Cj,+(T_Z?>gG [uqa(]d8ɭı~щDy=VrA#<Lխ|X\Kq$@q;#Q4ՌG?,N$F)a_leGr錚]E4z76tn<;Jm?G.zuaIƛ 0uk=U{<ޠ9=OC.8cH Nf'Ǯ?۩WhT<|_T_*)u;SUHf~yu7FeԹׁt5;=v?Z){| G$` k7V::Q;i~v[9z٫_Z]:UKj]lG}1ڭ>9nZSʅzy/#ɶ xbB@?k)6y{tSc ؊RN~kM~>xiFcz"d~)1EEɨ}&Fy{~+-Z(^×ύf<Zi JpI#ßxnC42Gu(م:$O+a0{o~F|BmNA%խHYzVuapO]_k5𶝩1@џ:͗w$b tn*x;H,$c15_|C|3uwL#XJcbzƿ+_.332?7go9U uo#}qo Ki1+($.48Mˠ/sZ3ikv֋1mD@˦ɨ}?vJo~s)) A4}a>9bY /Ё=_f]C=`=K|l_mg]5y-ֻ}cLK3iSZ7c'LOV?EY_EQѬj4;zkKI!:]ٿ?;k?fM$KZߏ6:nA8Ϝk~kңGa^w vw{4>f3[&]]RY|[SR7Ko(^8t&-ܺP&R.<+ cp"۫8w1\ͯ53KjI,3y $lw#9ht&tr|#oXҌ_:/iqwcFR# 1,$l{4紽w>*aUh*y<;iPM AL{)p3=F6?x7|clJo!ڬTbzVixź^yyJIAg'?LPOSS⟋H4"oXE \YB:ϋ n[ rb5k+i Z Pk/d;(| $}kuW⟈,IE|С7aFY079y›+/Εsouc5/ #|g 4_>x>_\?`-,ZYNH#h١Yfq\#}JF>ecl=씘4?+<;'bMkuZ#oaik?*?S׏zޫb/hPZOvˤfMxn} ZocŭZ٣df9u{ly1dme9ʎf~>,|me]kX /R&KUeL,J sU_z=>$hn[90xm=9eBe ^(@mC,H3* _~ ݮY|vF~uvYۢ Q"(U ߇-ut[#kkeU@nyXWn$@\$mTG40=kI]qOW3lI~'IHd`#*z'~3ڿK-FKVeTew#,EMm[Bi7[% )#h Oxփjdv3I ,gfvykE2(]## +KƁj /pC798€<O+؆׼EI~l~e A 񾒩A,G 4 {Kr8ռq+x֭h6]_E#o#*Tq׎k=zy KRXy.HF,B=T8]8!IW9]7Ý X<nIq]|q̃?\(e 'ژ@K}L餀<^𮓤1pڠ׌W%E,8:*;<U !\nG^x߈5].A ppx?:_ 6?tw7㕻rT_ ijgcԮ~ւ=ĚT*ng2k'g6An-bky9O(毉?}x0$DG]_AMCu^T'B'^@z-x/V GShB%ʳFwJrǿZC_7T徛n"n߳vKaFX&CpX_QXE:+gG<9__NqXA)w?57mP-slUpYïho/LΨc#9 にȱ>c ZW?5 G׍~.S[g>9P~̝|c ҽW:~95շ*&Y 뒻FF9q@~!jzޑ{ +^+ > 8'ÖS K"wc$ʫُIgb]KOGX[v˧e]%W?1=)EJ6oiwfΜn_Ƽ3(yn-ꨛ?0=wPt]7MǞX'?|1}9OCL~n?*w⟆$Ka<`ܦh sn"`.oX7$q(>hq+}+Ol͞aij1  Vq@Ο/->|U-dVv ΅r=u7O˩x~g;dBuz?/#PӢM 9I@QҼYҾ%xMma":0=?>_Y4߃>l rwF/P3c7'"S%<}?j AAt 9@RE>kh;?z`H'A:W}=?+7?Ch?_-.t"VWOѵVgh3Jr@qGޒq1XLY  W4 פwgv31c!$4jԟw kW@ xFd"ekm mڹ|mj$vU 0ZB:J}.*PNig)|hukg wtR1+8 ^fM e(!_ yOfK ; ۈ54\ABK:dy7e|1ġ1A7|7?BfӁ^/j_'i ;9_/w]󎂎_ԿO?zh''\ ^Hhya*_܁@~UR͛tgZm רfdo;F? vW]RKsf> ݝx$5 ~Uib/kWxx-?RZ5TYy6c^? {D96z_Wx@{Ig#Ar?>׵=b{I# 7 'M:|0kR~l7<{@=xWTuoڡ4۸md;``*H~|vC) 4 KFʧ A}_'w4SF 8YKل\T}? $ſUCdK<"^pJտ\6:ݑҠ6 &=ًԚ^+?!e)|Cz?:}5\.l 1veR8~EUoq.篝׎&[Omc%em}e!้eAV_5њ>ThFE4wQel13w}~epa5ERIqpWw1OSK/a,JC\"7g?rߴox.̹3 Z>ŗ@c{k~ˈb߉b8u?*@|:HsSh8 85cip=y5~-1?:'X:e5zkt4?Ao|}).**o".ae2+w"%ֻ%>=#{W~.UP'z|Lcj-bN9I'ngS:Ejzf5dxd`;~ ,vovf 8ϧJhko^ DrGp xЏLRfAi3i7&(lqA_15⻥LLUY7gi“AGi?\+w»?cS%8o_??WBF CUbU,\9?* dYIc9GP}|a|u oBgl'$w?"`xLj%E1R6}Oݴu{g}Smѡ5kYAO]#qG^x"mb֥'˧]}"(WB€>dm\~ZHI,9dT#8O~WpjS\75U'O\Mq_Nx['?u?k}#gBefkmxxu&'K(.b 4k"b3_3y<']Xu5cpӱ>^ >+qة tA\I#vPO0HR&g}25#$Vl1QkFS}$W2̲mCl ^~a^x#\ޫ<="ZE voz~ 7}VޗO&$wq̾Tc 1dj@k?_n\H,+'4ߺ'|AicMr]G pFFk׿b'*8=.Oּ_SMqocx*'PBv°1)\ oX?]9?mln#rI;cI>$ו-' ki$:6L^br;׎zLd'j-?z~[XحgF076kfycxo6YR; Qtd_< il E k-B*C~o4xP- BB)E'>f3RnH횰l |`?44喛)\MkJ섎W 0$TsGSSkn"fIqgg7[Hh)՞ +KZUabn<Y~k>0xZAUu]ݶ> 9;w? xHԡy}RUvڋWҼA Al5(d{;!8%IZy {prN+뿂4öZ4+[Q2?ZlQGZ s:Pβ+FK&7m'^+eX)ɩ(_':_(}7Lvh CFpopOH?#9H|i}֨]$Bm۸*?^':jPPVXw^2Y 'zeb3^|WneX'٭u9ݐEDer$-JQko %IY¼CNn5ϏIғ͆u E w9$yԼ{븢o'PBI$/  7exDVHg7CpTAuMC-_)3 >Rygz~\hE<@誺$TRL_o ¯z׈-VXooPcd008ėݧ- p 67m>~'>tQm:R!$t*ˌBd~$XF\ *;8_V WlW?5xI]CO s:2lB㴚as|g Fԭ5 {g6sX+Ҵ eV HME 2z@ϛ|dmuo$Դ{^żHxx7%6O#3.0yAF<{_iPM$Č!@7W7O4? hX5ٮ%#saCi^/QxB׼#kV4zG3*R cG>F°qYvM'cU&$ByҞ /1 &߁4nk^k*ZgBc]Oq7_ <9s1P}81cwA6amc{洵}6P0Bw!GOf2}T >٬j_B Wq\sOoؗlVC{S\"ᇇJ,t-\S־:KP $aZW)UaA[d'隧}soxu$-g~9emEp^SFAxL K`t:Gt+浅CzEϥiIk2AT3rKZwD0KP>y!i!QP5a"@nQWuO j6)So0|PN?x/TcR9`1lQxgKjF𖜷455熬P=)hhpġԚű +q^h3ޡlAסW}Ƒk|ّ4_X&A:ɽBe%劺W / Z](ҽIP&Mr-K1EBA#5Am.;h tZ~kf (^+֛ԏ~#Q]v"ZkKVME92ӖFb)\Rb0Lbg'r-fxcOyPߊѰR-`#=z?ldQzdw ZRdrVk`&4K)dFTo#@ VhԖ}z]Ωm3"_Ri5EpAEszzYX XaQ1W30!{D {OSFT~xoLIo uKAo=ycmI}VφmuO@b-^{~ x;B0?s;u? 鲜?JH9jg)\s˅`:<)IH= OaxqQvjVxI kXB&O .c.0P;W?ZzΠH"fgqN-en)3DiI:$.aXmą@Ҧ(h1z|0)JGy,6-+C5)|_I?-@V<wf֐Z04& @d`?T&F8ԌRmrNjif@gCEfuJ׌t$܊Դ zf-q2Y~1NBgd#4\JQR硠r G$J2o_ol2!9=E1Jl^D8>I ~oS:S`̻IBMe2Hj&'y"l U#C^lidw42xObfFrE[sșlR%&ASC-QVal 2,9Z]F#rc84վe#j?rX:M'93I͘mY=tD=I l묚_$RSikjP8– 9#k:5Mn3#hp$w/&.[YJ#w)tkI}mB u\#xl'kTHD2@cZw-#)R_u6[6)hK5dqZ-g'`ԌX^  yaS۩I}iHgMHnCo__uV1/Srօ \zVe\ך\IbKE=w\0Tr55*AaB-sյGfp\t͚LLȞBNNqTdyZЄ q HTf$L9(,kJi;AY3fN ϸ"8 +a$?*͖붒irʲc(k_@.7: wW۴Qךy֥1wGE]',ʴOZϗScN4< SH͑.Up?4A\#8)F!LRG+۲0e͵h NOR,q*o؍mR3К0]Go885䞵C9 R(rj%BOz $aA$`P7<H#`qP@`Mb2 HUŰsdV4dw$ȵo6IH"mWZvK fW*YHVK/sZ-Ai!sZ44 HD95vs? Dz_,OJISzoQ`E8֮-2dCñO3Xk irWn= G;p$8Tֲbl[c5zҲ"ESF< dً64Gb9<)Ǫ#)Fk)GSPiCl3 ͝|?gJ8J٭S Rԣ{1BIT47jKT*tԥpnjԊIc{}MhhRY PyyGCjp\E-]l8=4Jݮ6zȬ+Z ̓Hy2&bTkf#j'TV!n*=5om"VݏV<X"s=sV+<ʚ!;rN:w7F* `~nZh:|crԗUy!~ i?ڞ4QWj|EIVlfnCo\c gVus9Cu#+4uX2nwc&ZIPC ݞXDž5y/ YE~rה*IDN&2K]ebĚJSy| :}jfZnR3ьaBl+)00ne\M3=VU5fҫH~j/r^{Dj{P0W#Ta@$hRe?œQq9A#pYzUf`c=k8Uk CU&sFsV;Ah$[duB FX;9"H2 隆F(Dokcvm)KS($Z6.vip 㨩!r''Lϩj ǝ`ٓʎR۳6Mk@"Lno\牔##Cɷ'J{D4l̺vnJpj%3&F3UGCjڏc%ペń;u'&FqGksۓ' -˰fpN* %K#a6ߡ;]C>/IS\XSz=;Pi *@)T?:ecGTm.J;SPiR,r#ҴIlѥLvGð9-YScZv2jjg)hy+/q^!̝[I5y) ZȞB\$=LKR4fIET# H#OW$$L 9$U/5x4o5>n|tӺl+.bU:EvaŽ'ކ3LAn8sMVv6++s\֚˷ºHnٓVv͑19 7A#4UyO*6ɀ LԵdL񍤚sڱbAhs*6rxD7qE4u"6ʨuf΢ MIP Ic&ٌ?Z #kXoUȭbQBHrGXZͯo aGVIyeoivW銳Ѝ9j+!PF3C@Vl3"0V&ڒ !;G5L$=Tݜ$;VOT ҷ`AfQjfs%Uu4tƹyAXMg/Y^_\ .8;r X+@&շ@˞: Z,Z4-t6KIfz܃qVՈxBH;>f0nsr"|ܱ?(08p} UmCla['u`&go?ΝRn r3dE4v, C1,@P(| |?h5 J %Usz #/i= !WkGnN-VWݷkJPTaE csVV5ez5U"硔/վ]}w C8 pMa5x`\E]XdJWKs"޳ևoyV1ixģSN2jtX$7A /R\M]߉o.KVdݫ>]̠|89gK_J=]:YXm:^Ž`_qUbǽq[:HK5\NXkЧOCLƾapŎζ#6{d⠑LMLysNƉy@1UN9FdH2H6Ğ8]L9`3%wqU*[9-XHޛ̼*4B(('݌BS-UlcIsڔ@ +Hw>t@?gOWaPyMFzM7BrxrXDNxǽ:3*J1sҘ˹rA,36HP_806c&EFVw3MlJ٭;$";=rH79hZ`w QRLf O{ z#QHXU"-HMC^\^įV8S3r2i4tFW͜4XUD4|'%L>t`pMUexFBu+Qmp2@?L֨j> T ji MWo/+jl"j I7GC F:nюҩ w;$[u"ۃڬ[= I9OGs2ܶ>[e']"{xoZtVē Ɲ=-抶z[8ȃ]LJ绵=#II3":A&{7xܲW X7 V|W8Zg0$#}ZC\tBr+r1HGVa,#18OV+BK@Fд*`ʗv犳inx,x&~xP2̹Mk`K~tsC-;UunpCcC 0Fe/Wuk)H%sWi&e |lq`\+ӂg;yvXr+2[3jnkH NIGBcHYg 1M$mhhnHbM+C#`$'4[";b(H bsQ3iX[DR47zj=iAݚՑvWv*$3nr?Zlb \ñهtZ͖u's{*07r)}jDx'<܃ReN _dg4!W$?cZrVA MrVeџ֪grށ=yn"H.C0qHd{ychFcXf#܆}B e-Ͻs:NJ"vZ7;>⩤5]KƩvYbI-> ݭ m'vRXQ #UuP9&lJTa5L͢;anԃWЫ89Nn rUϴyfJN9GX 9CwNV+BBr̩PխEVF=@oxDO'PHҋ3lҩ R{8#+PNuY:;_~tnRw „C܍Er*92y+ ]90pIa!MW>`PMGUƮ.ñ"0OZF̒ʳ!s3RbSc53HԄ? \-NZdIW7qM6KJOHyJ)GC#XliSp=!gt^Qt!-lQ".cVO:7}$RKSv6nc[+5Tք#ImcWOR5H{X,[\A,ϱcNxM788IMY-iRU{F-IAdy 5"rYM*Ze#G5 0 '#z&H@񐹐fqڽ\]*NPp+H8nrܦs_;ߩg r+- Wkd'a3xU^}t,✭Pv9 : gv=}|ng$FA`TU,u#yVc22)Qdg@84KB9#\sbVCU66-˄g߷OZAQQ1%E_@.Y/Oc H:`~οʴ@I5 4ŹRج4}C1ZF~`k~"B5N Msy!g߭`t,I5MXv +UlVg oT@zԑ?:4lia_bڂ^QG:@4ќv(52[B'Kog+:5W1**ڴǭW.?!Z1!⧶eV77G$akPd&"zg$v{G<7#^׻\pDV D=\w79sı*px5x&)`z浂![e &{(@+LMډrkd)\mh$^jlm*rPQA}Ow@ԓh"T|e ־3҆IBlHީ㑭,2ڀEdY .{zU ǙL 2սժL%%ֵ.&`!bO)is3)G$+~LO8+ kqIݜ~b$<V5}ZG#zҹ䚆m싲58LgRPlj jl欿8@gE9S9&60}TR)ڨɐT-è*8Cd ԕGim,"Lջ:$`ʦu(&塈};hͺ2fL*dݾw8*vSzU!!9T`q6hi ǛS͔,h-ڡxv['FI+c5u.wKN85a*O7 ]E齏W3{#pkԵ9-N9顣V^ہYZջ$eqҲ֌o`$~ݓ'ӉIhSgf9 bV$u,hczMM00zԜ1HQHLLf [8m2VW?ujhh>4$2>ioS2,KH`}{Uq#j(FhP~RC J1c'=ҬlܹP,x ZQ,*r4I|&WZa'Zn-`R0Zd ᜹cS84^M@;7Vz" I,o5jMJ#wpB#Z tq~U>. δ:5BX5 21p )L>2Fe b)J+V!1ɤ'8[t$1 ҹ3PXe+ Һ4 aƥ- 9S^lpZ|Ij͒[r3Z3:brulemǁSk)hh+94q mCp RKQKrWfU TSAwhYe0yWl;\םjȧFi^j: lG9ުhJp`G R$B|T'"e V.N*\a'9[ q]%*Tf1lQAW7Gp1fMǽT nYt7 g5ub%c,s] C \өnsڼ/GUdgmBlg`Ez2,eN ȮIU5lZ.IS %NI٤`>󹣕[e@>\έ=.T]p疬E~LSF+) ٱ hRy` T5HDq܎jJ6"ky7 w+U^_bD$LP&8r0952*&KpGH?Rme=IYB7:MBOXBȫmrv4W3:# `Bdv(t:v B k#W<}B'z+ZN ;1Ei54 7fΉng4Gme$/SudhZ߲2x[܇s֑ u#]D{׽hԂ xۇ ,g*+,@y'g:YHZ٢\ߕ-{qcʎ+/RA9q+υ'ֆbDy`Nj%ln[V BÀݔ45QZ[@{ԶK "HpO5QNsҹv,m+mΝiM.a3Bݚ3sZn)GzoT(NI0DbʴN2(rTvC%SHy,M(FlM+Sj["s7VyG 5WEfZVad+'\_[9I\riS+ W OhKmlg8\4.U w\;.@'s".{d˴ ykҺm.#ss\}M#+#tTZwE^Sέ\U  I&2E0^}ˌT%(`py2܏ch4Eu4-@?ՃAr s]V~M9③\'5eAR lzQR:Є Jބ&iZcB(cVlVL ga2xZ0Dc79#ѭi8Z2a p)\W=F Z,iYќ3T lkMUh Wq: 1Y>]oz} &/^e;$a{fm*zk[35+\5bi d݊ʾ>M<µL[o?G 89]j ,њ }MfNa``ɪe# @ ȱq 5 4'ī߭3w;)e{՟e25D!Cݳ+,n9'򧈣ИB%K(XTy8b]A5Ksk_\g"E a\)a5>cV g85IZH׌UrաT Dfnt5U^2c5s2' 6R`r(*/?NE a(8`ܰғ7X;&:VpwJ Ȧ qҊ1TM4475 9SV\zTH+CnILHY7cGla^jۉb@{RMc_@J2Sh.2\ܺOS񕾪gARV-l=X+d4r[@!O4ˈ3?*h [\j K~ S$*ҭ\B:5 ?L'l?3Td9%=&WIFSӮSth>$4>dbE5TY3IOC?-$ .֔"Sw9ǭZ! z<%g幙 tZkrėyɒsL'\Ih&n<M98n4Y=VԴf3'0Jсq,azL+ +𾨖lAY]-{ ~u˰piR@,ִ0 kY QS qV-q)56jXM`Uunh &P04o}95wH`ڹ/vmsJ#5u͗@3e5`qEvV:.I4M[QSw)̲YdPմmèҳ=J4 oDTbi%ު5x'Ed3\9kͤr&&1kehңⶎ/s.-d` }sM]d:( pE' =J,R*+ޫ dȢhY )| sl\MCZ 0j[̼!=jqWUC!"sM2*oԷh샒HicʩܤҸX5ŎJۜ(HZS754?1[+Yl\Ud#Ze,ZSjK*Frj@?JgE szIR4H?tUx’*$n*ގ՝Pj/, 05 o,a@ih Xd{V$gWasnd @ִ \Jk"+/Ι|\eAjleajyZ2ɮuck1ZEj T'ֵ~L=;O CO!Jd&dĕֵF4(u+9%P[ҺCqԵc|˦3AVEt5(w@H5ڸH98$i=ILOQʾ# $p͒?Zұ`Q=͋&>(c)3+;@֦Hϼ%k&G|I kԭ)75LdU9I L7FT rz~\iSEILb0Ic悐WwB*@=2*$4Zv1Sa?:-$9,$Qw|lUX1mi%"1gM wYK `ұ<Ky4 qXt;Pdgc z nO & hvQgxs\jBg'''םwZܹ5Qq+qUT աQ*%#4g~f#PWP!i2@R`YYn ۥCfʹۜ.I2)x1p{&$>4>ayPH$+F4* *oIhRd=+(ZQ'޷0FZ_Uby31 \ArsLf᳀AvzqeQcXjv`)U\W-6+Uc'%"]w*znc4s&glC<?렷*lHؾpB.̖E6p1U0?Ƅԅsm@7bl}dAjI\,1zӵ4nqՅHitsPm)2ʺ+;1fQtc[#nX^›<qvl F3]OS4Sla@fu:d7PAkc|E8le-&ᅬK6k#GaT[DFW,F?h}R?κic#tҚHp_M1PPj78kO3pw+> RN+/C3-p^Z|G۹)-&oIQ6վ$59a). 5)Fi`?:}  >Y9ISOH mI2baIW׭S(=$#‚WWe8*(byM,'?ʭFy:h5 t*2pkIecjҔ*Ļ-@$@^ӱ@]6IփfUo,=kCV%N)Tk] ;S=>a}2@mQx4&ɖLZ=RA{3zndǵ5Xٙe޺^Ƒ. Cʀ+(94iۆ6@kb$LR"MQH)5؅YY +~5 rҤ~IjVp֠{Ehp~'՚JU 'ZvItE]Y·dJZIAmsXTÚxYKPr3h!~1 1{]rzǃQuQW*A𾢳– j#hu-z9 ٨7;x sOk਽k9= C4pp(bDɂHcOZlg!w-UfkC25$g9֣19d]l5G!DBEGc0e+SlE3f23*;bC1U#8Z1>)*< ՘c5`[̾85z+Y4G>TmJ&ӌLTs)H`z_1>J"`y&ˑVr.GSNzeºiE3rsa׵OUokKepBunj6dK'v'/%]F^I$JvGV;XӼW̗_#Yu:Ft=:mtM?˷U2mּ?Ə>|#pY\vT;a%E5֕Є...XrAVłyF]KbWmÈA89KCh5m5IoqX)::53of=qް//#tDM\aMv~P9YQ\m!]nMq)ݝ\GxdfB;br5PuGgb.@.sV(܃p2Y,jeLMiC9q֘FCV]KB[FxX݈W}J C6vֻIPzWa9'TZݏ@dT3.Ì䞬Ol=sRI(V%qD2 ~oP9"zI6IFr* #v;8Cz*,MPcȨhxjNZMeMU9JWІ rAãc\ؐ85˖G洠lW-#Uɩ"=ih9rKg6_3)ZZr0DƤ٩#HqUb>Hg;WU= JM"[vzOK H Ua2VұGӚC#xwq,@ӮEkA^}wyX+.MXݐ:V. s9uzSL) p2#)2E7֙}֍JZ|,x5-axZĞkH82αn̸Vh?ޅ5a$up$!9[Bf3Fntt0x|7tnEgѭrhH85=Bxȯ%Vw'tq2\ۗ -׏$2>VrVv{Yk|̐FjKݹ?i|*mcM8bN`f=|UɸN֍jO/yӬ^7 Ukh@ڒkCcYc DŽ]onrqE*e:^d|+jah%ǰ}ow>[GTA["$Q{u8G"Ze`:)֍m Ȩd˒Hv*'|,1bĻqYIE *2 -|.qSrE̗3YQ^®36r1ji1S.x(Z-;K:*L(;IoW!6qɮ6g#A7n#uu|,OUv]K q4֧zX3}p/zW6i ;z`` { hd(zTf軎qU ,6Hǘ$(d MFM糑 mM[{M歡QdU[9сY)\#PX1\>k,ĖDaZٲK4NNi'̅mK֤1f bAOYjbP%٬(*.},FJ.tR'`TgLפq}!bX@VזQ\|I]j%c# SOvVmvswwFG+sM2ZO B}3M=JhˆJ@QFV[pɩ 9$32A~V@)#݂uz)RFRDWw(HБx40:FQz*E>* f;ؽg)&vx `\[S9Iofi cg+2H>qH6Q3֋<E7q^rM*IPދ#( "W$"4帊tVs/5@ˑSz+SO ?ZJ8gi_Eyg|c!&،cKڹ (kJDu_ xLc;W!h,KOdW\Y2;HD i2`FVT$S7``{֕&3P5fݚ͠3ÿZm~݊*uQDDJ!?Jc0[ؒ@Q]5cfg]V9bv7ۀY$0n xFƱ/Y[*:-@{ w{S*+y5?'kI!?ʭK k&m[ynIZW:",o={VD5Zm: SԼ*tj[$tP9#* R㟔*O$Wo)r->by-:*V!Hx fm\Y?lx/4e Xc971# Arx/ďWH415rHQD/Je?Ҭ*p2i89+tȦV"OSaErw#y9[۲1Ҫ(ꊲ&ظlҭ,HPp2 x4S͑ޣ9=6G$z,=i؆LϹF:tĭNOzy۴zbDі#T[d̍ZFj"Id`bRe2ٮ\ t4K/=yay] #3C?Bh-cżmyV@Ny\WZvg|ewEw&5}&2YV&vhS|A(Fi~X<;}Dz6g]9CLF!&!A"BnMiЕe OCC-VbUHWtvo/<fęApb@:`)xaNi˥+ J@?JVRvZ}Τ ϯZ괛*:Ts\,"5 2oNi~&߭s[]xXͽmųmmTr3ZT3rEdE n ,SĀqQ9ICBo ?JǑU"\u)m[ƬVИQRO*d 4A#V$.gB?:낹,VR05h 1KzzU'蹳 ]p\:W4cjV<) W9;!piv0;Z?^K͒.]ƙpتoN&oVkmͱ"}+֭NzV9dܠUԎ\O>RGU, ªF0D4!au6^ak}H1,XUWu" Bɩ4vLlҰzu"C9jnp#53nB283`Rc$l֩[]\5$ѐ g$a`ui8JqeI DNղ#Vd&0v_䷩M$m֤)4H1 ޵ؒi=5~ ,+V6R=n }M4Ԏ8SHqߵEo B+d298lkM]B?]u=`]lk<`t2^4rΙhFW|r%ɻ+GDHyy $JjP uA @Fpdօ߃gy.a hy_:k6uts['k=*K)8~+d 5]1&b6b6t؊%PZ׼B~!%8E^V0u45H*͔jznc+5n)ZEhgrI 1 V]_ /Q1x'#ޯ:|ڄNqªOAGsmm P2SV'+N3fBÊq,ܑ8soBI4s!1]dr7sАGK=у^Ӻ~8Z)<\k摅b pVc )Sd:x>\s\e:}kzY]A2 WL1EmJW0kMC85ZΛKYBN:WhvKufɮ H5WN5Ȫzn%gphf*tMK!-IKojδlɐw6яVT^Hli {Ҍ_/`fE8 }}+58#}An pI5G$ղ,I1mF"˫6=~SyVGXq8bg5tܫ`hٵ¶:]>jld{{\H(hv$W*s é`5"$_jз[Eg8i\班RH&`U@ު,Vf<5fs1& 5;-qY3\ \3.O4#FiBm(qj6qİ,pz{S ,HNݪk{qsJՊ+ sG\=4(fظ?k" no~EՓr^R=+3'j׆hT#Gj0$$ӚO rk)hNVUcҼLTա k[$VZhX0⚱8`Ahb+G?RHCV~K?ZtaTZW2lR?Go9rO5Ԋ0GVBh4иtZdV<קi) V#kīrdaQi+(CyrV:bmOk0nʨHJyp`$խEgjliU2:* wұܳlLg6Wa,dⴭI)2вzҨh'4kZVG8nOzkA2{`F]۠׿2E.!+oB3=*+M°^]vJ\vFȞ? yـe;W7q26woJt1Y&0qY~բ{`ic Ut^"5t/n&LwqTsUm SӇ%sUن1(0 0$qWCBi}H$vTnk c~}9R&'ǘͦ J@H&S*J*iZ@%DPTu NXc( $Qد#$l溋;j75o.1{sji#nGDZӀ CcRwW,m$AMKSڼ/} u'灊i'ʊ2jy/d$WM(9\GB߭jiW 65db5U.1Qk/5o 0Dc%n +ɭzlC6?qN<]"{Uaԃ]N`b*ƼD,u@\@+\'.uHHTFZQN;ȑ\B4d)tiMs3FFx5%X9 2y*Դ++1Sz$P-\U;.zW[C Oր9H!4=2* [ ʪ r4Ā q-m`M+fҶJ T#V2mtrI$Zdm/C{BZB_Lef$ߑNFHePFn)yāIe"ĎsZQ1K))x-m+:6G#) 4柌{Lr}M*ɱNzp;,F)!>n\N@JF$bxb[}b_mR&ynnMĴq} E, cy'1tT&D'uM{A^,(ږƮ1ukn0 k&s6,#5zn>t1i:G0?rtxH#)lwjyA{: ^0RZOm/j;=Q\JVD}i20?> IhREche`KF M#~[|ȅQ~xW-M6;E^gyvRq4Ԯ"6KIS%I RAb]1Hr@XQkxlKw5lZ- RVE_S!޵` Cr(A@j"N@·[͙~v?Mޥ沒x=:EI]@i)Ukk_- #ԗ6sBm9 +V@`0kilCNj +ֹ`y Y󿨥h7 9ɮNmȯ6)FY4qҢ+CznqhAlܞu{\qzw@e5g&Gp¤k.㰓k><ަ71J5nq^:g,Fy ҷ_B9Uēp@jl#%WUBC̤`fZ2 w1L#qAO }HG@3 s=1#4'y9Lm?F=:Tޕn0r*K7!l3YCϭ+D+¸?һ߅:4 $ H=W^KYdEk!|>5Փ+lTnO[-ǩ:^̇q[J%֦%Jr?*S,^TM"oz&VлOܟqn9^,֗0.5f5i[pvg>[0**4#N+uR(n'ʝ HL}jQ mdS0pԆK18 ^Tj!ũuo"*SuEW6^ W㹵-U+.;ى\Lqkld՘:.+D a?AWbd.kY@'0ES{s}\M"k[C2MVQdQ, WAnc.sOABkU,KMdz%b9Jل2``UUЧM-cr8޸ۻˉw&LޅPnKp2Em T{yN I9E2iq3 W4ѽ3lF}ђk?IKK\+s(#w.kGS@>xz׻r -]"k02:VorG^\Yq(?mLͮFOr lݕjzAGdjcS[$*+T3x^Z0+jź4MDXn&kLDF cw*K j<1߽\)fri,LI JORBi2n*.M,gO4ىf!{Բ}Uƈ#iaZ'@4l&OܻsO%= qt0;T;6HWiQ~u-aKp4#ޒx}je?6ؑSaM![2 ߵ/[k*;[qRZNKɘ D$p=keev5ޭc(߄v2gzw303ۥqOsa<_G# c*Iy=jdQ3ڮAr3@X'z ǔm˷s@isgidTE,I 3 XbQ֢r&NǾFvHHRz%rc;EPH#hj s0c'9h,;|yZSc AR=Z=8.5vŰ3ֶ3Eݘj$Cr1T㎪M l(nMzm}JWnoGFy IQjVp+"D{ *+2LW|oeM%CWTQgA#CÖmwFIUZ2{vrD F:RH4I軍XbM7Ŗ Ss΍ƦlpǿrvCȨ3XV֬`dԼWig:֝Lcku g^5XUR\5ɕMR.53Hnz `yWD[u⹪ꦌK4&ђ$LMU<ŋKjH{rݝG+,t*c;:@]#8黄p4,15'8/+ި"ua&]g!y  $m&Rzq.{IaieE%uCH$wӨbjNj2ނԎĕ j伒ǏzfÔA`5Qv%G`us֪#acQu"\ MEum'q"E͆+WgcWքk(Č  *ɧC'3P+l7nބՁ$X=*mӶo3 [V%7p*L}-ipivlNH5.Z qU8*ESu;&-k.|NIk7qktu\+|+{ݫ@nY\?i<0E23d"sU1#ꅁJF*-踤>ta@'Df=j$گivRj:0&^#;m@Xf{G3Fcjouy%)b+յG'3ޱa<3Nݫ3\u)rC\MĪn tFK' GruiaֵZRlg*9e,A4I]v=Cm=rzc?s^28ATKIR$෺]S]X'ܣgr1ih2NQe>" lC,0Hq-F"%7H!]Q00Sq흯 7~fD/ݝ@I$*hGQ!-qqv)[ܙ_+OM!Xlh!RkZGԄK&yEfѭl|GkG*:j֛imLi.Tƹ0?'AjE]FHNI#'SUW$>d֩jc&I9⭆sȭ2rb 5cv.Md ՘ ?ΓF' Q#oE3|` `0>RqN߳M\CxА lDcw s;:T@qȨ-IY9*,w*q&֐"e["Wn>S2=jj.#_IAж$iȥS֥u+`8ѡc3E ,VC]L洹xS48$gkOj.@f oGDrݜTٲʊFu@?-L/De¡j͵-JzƍHla˳1`D߆knl`c*dR;]bd&kd)#ݢf3\YŎ:#ՃYs}Jsj9trF{ =x5uu*1HAe_Tr M*U!\%.slm,p3vBR6ɽhf!qI;Xqp`GJgkˁN i,\G¯ځO ޜ8ݽTm'I-6y۹0ܽw>&{DCᜎ  A^Bmש!Z@_z;;%ڬY͞fm+8ˡq?:+rGUB C&Xԕ:ʚ2MzCB\UʎVYR<5FMI(k^mj6RgkI_+ڛ|DCs+%eA66 !9+BH.sMAg-{ґ~3R$9#UX@Z2kA%KEB1y1h TG}o_[@#;?O.=la(@1P*KgΟnguwflgaE#i~&gԧlۜqP_BJ&^Ni_AKK$wAn~k.3:i`jwwiXt&8UcH&ċoD'|ZFWc,*Y種NPԥib[pTjKڊ3 ryIOhY d1ڟksp8BytzLђN)[yrv޲.&~ y=߻=*[+PZaNriC[f6wP\vFGXw=EhU+ n}jhvFB޹a1+|;L*hb\ud=q}r'K${֑0ݑP[#5gXlM;._dJ$Xqjݑ3L&‘9-+rώƲl- c DXIv#sK,Zꆦln<8ge03I,Ap8&܊kj &u\ҵ|Ln8i3~$d %❐0 kqH>C&ڜ:1{)T~zB+)1OjFN lo e injjNGF TJߊ$k^#>=鈌N\@G0v^ *ɳ{g=Gz]DYKJ?Wo-w$8\N ?u=P[Z6>ծ"%'SCJgIN*=94_30dRXåq-trH?uL߈=m2sLoW[2d񀷆20#ޯi2mKauv4ysj"9+7y:ઓz 8ZhSeU"t5glpє@E'P: H3N]2eU$(#5T,jZQX!x:c( ߖ(U.W)^@)a^Ipf =+MhT~QGKk3($ #۽s!rN14K0k=EXwCh 60YAɠ ;KgC>~E̬m{My0kP0G*<;$葋6NMzٓDn c<*j3$V zխ :F; s0+A1q`F\y@vPbƬ'9B'QsZ(RQ/zn@[R08{p<ַ!Y[1ꌙ(AQ2j;j9ySpe vm?ʼPRG1*r2:7jM;Y!TtKҧSI :xȮ/u'@,sxcxS_ȭ7'ȚpWscTLFiעTz}[g14} Y׸86񖸚ޠF;0iӖIjs2#rAE)c?LٴVin1YQosD1|JߐѰd9oJtqNxG03#ZweN]6pk/C5eGMggl2>l:W9Pnx]vU;csM33e?޺QDгtb U4ܟ_JmJaRs-͒ K^;kF͔ng\Ҷ1{ъQNG5-]mxO^MG"}k~ӴXΟs5-!f.z:#{Ur0E0[Fpأ71S&]wޅ@ M̓ub5er0(FՆL%HEIl ݵSnx5UvAsDɼ)8٘[a]abxs _J3Kny*3sڍ U)$:Ug*?9Nf!)(nF^Uz˦Dj9RdE΁) tkԔX&#'Gac!3_ T8TH庄. ս@MZ0[B#$WޫDdZYsDg|D֚K9ؽv[hֈ#;j7.-қ~dz l- m S֫ȬJ! E#5dW+t ;Tl8I 9nib mO(b,ޙ}s%j´Lךqp!y(QuEH!I6ž4Y%XnhG__6\[$滠Y4,1/~O[3H)(̻UQ4ONee|Ă9rbn TEHmI[-urry"}.`+1]iH,:!"[ # bGy+uS;ysXG҅' R%\T`7n&#̔0Nq3+H1$H ff]&ɯ>ELhC `J7jdLq198iq+u2/]9cS ;}+h J$ vVȻ?fVdݕQWl AH"F6YDrP`Ծ\bbONe9g5KS9 'Gҩ-nU\bf,#,.֩?FVvQݎkj0݇]^`c+w' [Ei(c}IwT.Xx ,Ci,i]F NN>U[}fFqNhvEx`h cCTERQ` ux.,mV8z:怞>Af++(FMZ9Gg%[D06WҚ)m4"720I5J7<ڔWS*&D߱0y~2]IY"/L X20}5} MFтrzf #Kc6\)}}ݹTE. \PˆñM_҅Lze,)`#pw/dBoOֽ2?w`l ӾR ˍ? Ē]bo|YtYӌW껖G!?iM H.A;c'?Uͺ)adq+benI"J\ :-)wiJe^GbHI@ք ïf Һ1J.8$=+_MId+KSDͽdf6T$ ?e#D4$qPo1PO k& *š3VHKT,Ou绐> i%%%"a*F<zV̫$_8%ǵIW- f*< Ou1{N;}ӇZ:U:C,#~3k{c?5]/Q< =R3u"d98曂:ŽM^V] E< g|GhF‘B#ATpeE**H0V4O?Q!#N=ȭ6:ⳏ(G NW'^L(+7KӸ)Yް7!e]cJWOK$VďJI>`%1 ⽈.AFj moC{(ݐjFW% y b۞UX h0'qMs31>M.3La[,IxЍ21Vclr\k!cZuPS]mƻ$H!hQ=@g %`_u#0QF>cY7ug- E àW$+ B;rMڹ ^}C+C=Sj" 9˂yh2LfVs@`t󥝨H&5(9&H Pe+gk#|UsJEE#a9ڠ5ӯHhw6QB&QVP"H14P ZFƒ96VUr2Tʉǘ6kIy+FewہVe8Wcť(3ZLpzbjlbu+bI!uaerT!ުP0 zwGYrHk}aT|Ҧ ++1JɥV}߳q ?Vs1>!AqF7sڳ:P ݣqh`( 5&=8}jh'h3} MbTOQ'!EU̬g!2:{A}P2:Pۀc A%Inn 9+f0&,aOS\Uٴ=R' ҪY!9ۜжؽg Z#]ZrAAވn/46b@xFxL͍Pws.#KsOA!;}t0Q"vKV\Wε~.׻W$D\8 HckNd󊢁ҲnE Nub05ihܐ3X֫pӚ奈dT͒@"7-[ հY?eŗSH},hZ rWp#ynVH:H pC/cЊcvUKCf^!CMX|}k*W4U#!EBquAcG NW9SU`EPFug$V0#YX5cXb)X2hЬDc3(ݑ XVBX tUfwV>]CkD$Ft*HYzV,:E)օc =j&ʼnn}j5NELIlSL"! +nd"/N]x'@BVT$A"EDa g$֔ ֹ٢W옳޹lt]сI?8FIy)iygMBTE)#.ܲe![qztyQ;H ?9$sq$GJqV`>X* 8553p6WK@rݧҦ^rC!v5#I/@e֮rg֦bqy9ʟΨ6\*i6KHђ@s.$X֯m u֦&ԯP3NdGLsҥṫe@ ?YBELHx^k5tOrkVOqб S۹݌OQRϚ_Yg0q^ ke)yW%EO(X;Lγ6߆`8#\ΊF/S֎B.,v*:f9 E!^~"a) RKC6+X$Q 7JT> &DI! S=㓼pZ\t~#ҵnI]J ߹ 붌l/xn>iq4 ͼr]Xr*Ȍn|Z -Z=qPeP@,BlG#֭H`ZQ 4a9 gdqG0÷dӱ]gPNUz4DOS?Mulֽ#CF:cVVSĺV22\WLN?j5j0T$I! Jra?3I-nt1v$u$KURoP)=)PO4v봖 U&Oݞp*v \{4b{(lZV$\gV; g`k(Br9rt"2mJ"1{%N2ԉjˆ8$P\.B>j$NJG=w) J1W$s.dPD=H0?4"'&n:pj\ uWfOq\sx"dR#p9tғ9W$V'%B܃^KW=0 #ҝ$rJAiZ:=2фbH Q YWG}$`@L`cЕ%XNbEu&*p0*³ޣʆ&iJO5} 3~~}ie SڥpVXȨe}{ٳթm1N)-'G4t{V)Z67LːK;)`ao4lp޵h~ZFqz7/= ZvI|~fIB$+z-CT͎2発_S6G2MrKβnv;RQF1+Ƽs$J[}++::_y{u+nkM2 J8YAɮz#9j8PB SpM{5cHI'e7U߈R?αYHNud֤ lZٗ!tr#9_[>^I K7ӁW4ٌڳ@\N fT z º ;&vhxk~.o{`VIǵ Uru͸? k#xEبEqM$x5/<񝄁TlA$^a8g=w5@**Ȅ5HeyzT&UzT,r|J۾Yw`kChKK)r"f^-єNCBďzmȒJI*D%>?IQkz{"yue3P =j'5a$H8ZϾXBV8}]u39ygaqGZe`I8zuȻ6kKOs*#soYI5ɻihkMj\[me܌דQwTm.f5Z@u jk{l+IeJvJKC`͟`Π10W!jF/5ˁ(ki`G#oKc+j60T4֐ `MhkoRn>`$:ͣx4fI+֮8C3 ןZZ0E[E?ʳ\Ν//֭ߎ:zsR%sW4dPRm ZŏAzu S q zyux[S h8 S#p:G ^wa@XcT@7 Ǝa0=q@8cRI-TTyFe@Wc'F,*˒@5BEJzvsGM]ɊԎ9kqoNE4$0$ڹ6$п:4PI=kjQQ\]5L V$ZwʼȬw44^GIgux5_[OH>][Eioh+JuUĭZ(d]Nbg栁 Gj$ju|\wPދE<#3** 꼿+>>"խ$*%|V;s{Q`O[S(-b&GbB ={~~ 4ܟ3 j$)jQk&s`y6{fjM3R֋˩hgI îz dpy#ͯ]( ~5-[e헸dNNRW$4^2 L bCC>?~%V^~ KB+R+2B8ù x`5KY|א#i-yT5$uҘ4@ 8r 2VʮQV@”=NOi,6cJi \䌘x瞕][ܖ`qJ\ւ,d^)Va?vE 1O|U[[ulkN` 5#Wh:ZBL}M rq5l6ANKbf8z)qQ7Y3(ZZF쌕KiXp0+h0#GK&O֕OS%`7ZvazVM4l*85k[^G#ZЖ&!]Yt%X zz{Л) ֣@_C W4#11җfO^zsJ{L &~rݥunD5?CT:xs4eO$qT͠vw!:'yK: '>{Yؐyr([nnIl9R9lH-0_AxwGKmaygyxI5ؖ/> [тOkJJȚS OOI .JY0 zjf|*4ߏ?mA/#毡A"20X`j_s !#hbdXVWa{WǟƨjXZerG2?txh@GpFkG;nʪ`tд\Wq ZD׳v@W'!M)\q\%č#w[xʼo|¢HFDoPnYD2]FfTŻtˆP_ woC `5*sV(5<5ݘ+];^p6ćFƒLDv77ߎcpll TѤ#'#~tk ⇈tmi"}ĄI{– 2`:R>n.uKq4 K$rF9ff9bo WhxDSU': _v!.{~&k_y,ӡiV`.{>,Ѿ5kJw*NY]uvES~V:+V܈&;"mH3#sG B]H  `#>CapS|yBO-WteVԤUo&P8[_ByKaa(OZ@1T t-e/DbuՄN6QyqYIj ӯ lpkQ0,A5 ;T)j\ygi_Eg(\HK{&mGقRp/Cϼ.X$Xӏ+5gUH@"S*]Z[}EfCsMfbsք iU|P,yI8.i=Hb؃ E;w`a\9>;fǬ5$ny9~W MhY܌dldФXR djV+/_qڈ3%\+#ֲېHZ1./5 #td짓\Q\fPEAفmdhvU[n)@e[{"x ECq8`x-ֺ,ٲǓc/-UJ95isß6&&5 )Q^m6r ,pz眬ΊkC?i!R$Tuhl̐i7Rj+H_&~E{Ehdx8ʽ)oyqg67zOJ%&4}8k^Ke3ة_ rO^!K94! _|IN#]40a.?q wiXLy55ۚ/=t*dDR|TOcF<(ً?x cu9̇mV ? hxB-#znCe}=c9!RFQUcZ+;kP\rz&3TK"tn3t]N#ˀO]$Rr$!̫VuSz1ڹ{TDfaZRvU\`T$s1lJ+T7tn󎣭(L3m̐0B9G>]AzLv359W10qU 3֢-!A ,ս,e-BZ 2M~b Tf^)H}yV_9.kͩoLvfKyד3*pĀ)=:RjB͒*let3%߲9\y4<ٴy&5&h?tƮ|U_~'*_j6sH v'ȱՅlW_P'_|iXb9)1)ľ* 6x;nV/^3SE{=Joml&=8hB??|+qPp*ϊh>XW^[WϿ>4x-mI.]#8fy1BwįF o\o+6Vqq3Y qY k GYiv  j-q.Ǎ@ ";}{ׂ Jh_GA:=|}4Fǂs e<'-Sv/ y?,>Q[zYveWVd II#ٯCaBj3?xٗZk{K*` E@<WmΟ<$cr<ŊOįH׼X=Ph503ޜA SV)!pr@kB ?R&7X@` S审$ѱw7ڥؓ)s\g\^!qMynńqjCJKi4{kMGšT.G*ssD.\J޳%p0޼}i/L@3:{^U)6I:!S9d婬U1?6*9hO 0H!pI4IlH+S#ZmQ$nx$W9s = S<95l0h8]G*)rz^yԌńG(rGZnm92|T[!cNHIi:w"JE5CO{Ck)%~*kT9?f3uhrV( .oC;me]~F__K2 9t*D{[Gy;f\+ZD׆dhp1n4J:W,вNVevpkZzɟN|%_J۴fIom\I$PO$ n Et$'ExGJ;UeT9+5p$ɴ`ی⾯ 4qJ:x_WMtcQX ]EuPMpe@ӂ;o, O$4޷3>kd~1E!(\3`@z;<v)42Ȏ: #`;kQ:>!om# K($xV>n?H{_J߈;._] KIcTڤF?1L44tf೘[FXM*gW|hK\?=@2ӧsY q`gT)l~[Ǽ_~lSTg%٪<̿$YUuW;5Di RYk&TA 񯋼 2ҭmo@p}7zO}QA+JV0UB}kRR~3%x=vSx1}DMgvok<3_Ǟ,IQh'kceY Qq9(H}H0EyԼk>̷v[A>`1V{R#+x<W]wVMkod͌QɎ$U ㎄0b Ԍ%ϥ] k\]gM<88p:qaa$͹B B6AMr'U6\ےӥWbp3RVS%$R\Mca",Ʀ *\=/, F CL -(ԻW }zq[$^rqS%bw?*r:hJ1ZlIֱ1l]:sΪL7\4V浢H@fIcE$+:˕I ^6fV:*CeOVmyף{j̊|θXŃR*UƘE5&(\. 1sHFqE!0n)A"t˯jXB1B\icvIfS̓[63\|£jL֪`I==oMx݃JuzV\=0P!PCȻ 9< GBEysNX1X֢Q4Gw;5$lXe{ErIRi-p U 9vn}bMinw'ҹH4LC׭eгF?~N+tP9LRktO P<,SY\>g[(ϕq:&x\E'wYu2<V. :8Dyʴ)zJh#'-wQO渺EAyI#n^Vc~={+GUluqGnQ Y_Ɵx#ve[)^L3V@0 o_tt"]gUmZ|±73*i>!=Tmm#?,q> x:֕iwLf[dF.τ =I[>%c P/tm{i 0L+ Fp{.6GЎ[e1Rx2vD5%~~=Ო<zWm_^ RP's1\>ȱ_e2 YcٕA ꮝhCE_P@d985w{{Ʀn"7}^mڤ?$Κ4ir6l.Ah%H[OPhFPggUѮ7xM'MBvJeE<00ϽSd.a&y>J!VPEs_S? F̚k +HH+céAҴ[ǯJT}+9-Bk8T(Ggkk[>*IαMc+6UjR2kqk \\vtDUQVJ^IDev*GXZX?Ӌ6^޹s]m-wΉXN朸f̷6BsE.yFMD"8Iq-Guܕ zz3-.Xʜ4O˖Gut l @ZeXcd鍦& CRIt4 I4AM,)0iRuJ k.q@ :<2{ڋJ[TNL9$h[[aIf_[C+ZSS'޴Q&%LT#rh;95ch !*Ӥ` ք_At!=+۾Q%'`zp)H =k'9I;ߜUFƼmťFo0sWgNFO+RQ{5P$*Ο` 9 nhA/Zy# 9^k1 9$sbH?%.&}~#Lm_z4+fx\`mAy[ QQM5㘸zP*M#Mq7#$A"y)+O:/mSr6W'6*|a҉=lbѷNJŷ@2Լ<Zλጒr[)-&%֊0xOJc"[]R\74-Qv`GҞ.r_Q2B*[(>3{ ֵ0}gԺڨsVHYW7Idz&R(*cl.u8e9$m|@#cXc\[oҗRY fF}ko_K2O:!u`.c*ōɆT^Mqԉ-;ƲPCXFm#qVIy#hhr qWkDt2q W5~]8\3\hzJ[kaB󷊸#o'5"gb,a$5 mhkΟI$vuڷlXVGK8.#'޼q^^Z>7\]jFGbayestestR/man/figures/logo.png0000644000175000017500000001740514133140641016422 0ustar nileshnileshPNG  IHDRxb]esRGBgAMA a pHYsodIDATx^]Uֽ#aHC,0$A]D1X뺀몿 f H 03ɩ?TUw3=-W]ozy{Oz/L~#8K +=|ͽ,u< 6O fP_ (B`]g`,3Qd\TJB !I0͋T3V&5n Mf́=)%! c/0~6DDlp)LJ /t P) b5 ?V{ ˌ[, #Hܰ"0}^E7 0H^'`{6/.W ؾ$_$<)_+TV2)H 6DoUw AG0`w"XfVx:jxg=ɞMJJ Q{6 Dg)%A!Fa'ؓ,3[*[,[.[WpIaq _$ A0lU U>v׆^ؽwEKg%ohLM HVݵ/lM0m kt;$c#J(Uzd*Ei%{Fž% `XO+ltqĿ $[ֲw3% | SL^Y绰) :gq< 381b eS Pٳ GE/ .RH"uoI vHmĿqGc"yǗj,cR>5y">E"{k_qRYV"T{\ }` t-"YKD:MѾdL;H[* _D2ЪIʼn4}UHqq7ՏbZY\\!/Dل XKߥ%}ӵV]SȞ\H+4 |Z?gE|Vhqd " mη6Q~,POҮ j^!K\K&Aݖ~џտ_P]j"Z"YSc 89#gF0]Cg^eI?87>YxcD+rhHA5ulGgVƦUG-sE;pSY r۷ŴL#mk4 Nè$gp3kGirÒ>Eox/HH|8wZ`5KU%ӵƣ+qx^u454*(3E5 mOC!Q|AmH1HjQC{5^"RC" `+gZC NB}"98+= D;/i [LcD|ccMk$gDjEeZsQdUvbx>qͧlv:"o~c[_'TvFi?QQYa@Hi}p^?GR)'v矽^-cif@G7_efN^yy%x;Btr 7'_w4 K8AFK 84`:#Q)@["oJ#L\\5Pj#(nͥ Pac #؂iJ=MqsYy.-ނ{&v2A~u@O_5^M:i"qꖮ|?M"FƒJ݂'L{0FOzTJVuo G@Z7[T"_\p$鞦`Os+͟g!}lC"8_SKסUMMF#c 7 wZ9 n-s}ws;'KE|pD<K Gm@C1h3ѯ+"ʇ/\ x_w7J> Fp]tM"!jAs09dn52n7-~UG6I;-0"FV6o+ Lfy#[vIKi-]Zm+%V@|(|c0`D؇滣HLR[I`+CIuuIoɣD`Kݑ` "O <[M2Hs;J895(-bʩrp0o'+CSD0>ңFϮcR2 Nc<^4zopu[|Wx3mW/=ʷ(gEHK8ԞIj h0Ԛ+DZRfV=CJcܸqX?9ڮ7o`b:uȓO ǟ|Ffg'Ϭ >锬ȫ$5|~ɞ>rtbe>ߋJҤ5~ Wց\0Oɼ%&-b4;ː!Cs1B\,mĶ'8!V`tJAc؈;$8#+W7f OZuv`}eS"%WHe޼y2g;v3FRRRR;t iiih"9}Z}ѝg8+u(7Ɩ}Ur݈~RXXȑȄ $==]xYx]w%;w[dص7*C !~!//8"U1WZbRm'[sML֭[xekO?-SLe˖)Q^^uЍiQ` ؞{5iC VhJ_˨ueڴi>'N=B6 mOBr,{>TND Z~+%99Y&O ۷;> οOGsEz)z,_}_U4GJ^̨:11QVYː l$%2M"G݆>T< 0@;Cy`F.GM^#{=[~(=aÆ J%Qtj5 ,j{`t=n[y;yO ܙ_yF|p)4pdJOͥd-j)ቕ;M+SGɭHVtib\oJޭoUg]H|/Wr}-g}qmQ\X]hz7bZKE:$nIY?6SGJoѵ奸eڪjx+Mlfɴ;+g=i$6K@kދ:>r%rf.@\ْ#*8*i.k CtNW>ZҠϟ㲿P"?^!+%j#g^BӦ77!} Lk0fΎ401+tىn `tʚ[ȭݥ:mTgFE[0>ybi+J!e }45ET$_$wgI*@~ud|TzH^yTKƽmarC\W)]Qz2QrJZ.u]7:2H8s0Ӱ5,DoIXt2!,F:! R`h_-i]Oԓ}Q+Cll/dɒr`JI'mƶ$gmT'LZ4 ga}"ym/"K 0P95(}c0"H :<&QaR-"R.$a<|VD6;f]qiٱ'](m n9-$đe!zOu:@t#lf¸دoD;!C Vu 2hQ.8ĞPJ|DƆsNr$ܸI«lǙ8*2&]2q[{T`$3\bGaKh%Ii?]HDȋa\z"X;='&{ /R{E^'lIMs6rјÒxdϒ敼8 ĚH0lMub){W Y*qvHˆ-,{ /n)~ Q4.HO :@.{rY#YI{Jx_Q^ )AGmkٳ&{&=9*/K,MU&/AK0yiZXl_EGp^ʋ|mnp" ){> i:ceO''/E}b" uhʞy kbl O:@tl({z;gbm!/^D00P9E'n١ =09;T`]&g l 7XP RtF`*Rp C6@J}.zArv(:oE0HE DO0DE DO0D ED0D ED0D E,DO0@D`-g le.ԅ>`-g lEԥ?`-g lEԥ?`-g lEŋ@C6EE ١`tgP Dy|3rvX:M z ^ ]D!١`49:9;T`,Ptf=@'g ln'P utvynDO(4j=6Dh: ;Cv/ؿ>xڏ~?>x+%GӉPh1UF]x{_<٥|&Xݡ_mϗ5'(ͫ,*\ p,݆Sr|`?Y}Oٌj p,݆S첧Cõ7Vw{=7~HO`&P}]u!/=g\&G P¬;B3=w.:CVكun_#  ;:B3 ̯lv={}~v뿟 vG] ;v9;T`# ?~EgN!(9;T`zB,ا>a$:>[pG١a'vt|6C6~nt{6NC6~E3nt{6ȹNqY7V/` lN;:=p'zVO?;?yAL`{Gpaųw@||uF}恝D`tb_xوc8ޠ/^|a l1GN ;rz :/Zŏw^M`9wb^;يc8ސׯU/~<-l 4' ó&i Iw~[]! 䤽Qy'3|]FzˍOfGD` 9iϦn^tvcI8uƐ/ahI.yTDgg;U7lވꋪ>G5xhʩ}ِ gX%x!`&Nkۄ l8'SpZިw`Ɖ'=ڄ ~q{s v]D`d SpZO~|ٞ ݜmn^tw{v6; U7W9仫T 'lKyԡ^b^߾ l 0JlKy֡.Ս?E` ٚ 0Cf__ l1qtp Я~ƽ{aC*@`Q>nlM8C‡Qx`X`*ƨx{FfsM,0D@Nm՞"b LYࠜ**=[`Fљ١hNRM7go>f*G $w'Uѹ٠ Z-l 0Ƅݿ,':54k:ۅ}ڇRc>{tkk:* {RqO/pnHYܻL."Ƙ>v ҡ6oO/&  l :Pz`pkXObS}*,6m`Ggf/١Xħ;0W}~*E;tA݈ rvm5ua FT`C6dWj(Q}ޘ/}cƯoQ XZ`_OV?yX'݃ݗԞa7w7_.~;-SD3 _{;o=l`65Ԟa7dw}`= l /Kj0tnW|ϳ'/U> l1& Da4thúr}a l1v3*O1ҡ ~U`?|桝@`!Qyސ}|d#/:uC`7Y?1d3/>yC;cnK lƀ|f`o<7`^*՝c70KZ`di{l[9z^lalY9zo#b6@} 3au'%Ǿ$V{JU' }L:7n 4W+= ho~MYžDY:_glwlQb&iUe3C[ثW> ?ӟߪ}tğͧ'Tsް7ͻ?uegyD}Ii:WTN!XOjN3zRݛ}=Xj1S}= t~v-_Q*Z0y5 &Jizcy^޷ߛCE_Z l&WqNT 5؍8@'g_C_ TgC/VCIɦz rvh `SnL:COg}ϪM)X3nL:Cg^:z`G%UhwC?8)`ݘj u޹םWf$l 0nLz:[oH9cTiC\|o~r/6;:'Lz~̾65fгy}k~q65fǻSmUN&ƨ5-fXӡ ]@ž-l 0nNz{:ޛO [`!Siޞ}u{{D6s ժ`RT=y~!Ƙg`O*Z\4@owEdƛ6u\ 56Yw_s0/_\+9Wg_HYf :s 6fy~c`?Uՙk' j`[* 6Σ10ne`?UUl'&ga3=* 6t.7sLL6әi_W d l ͳvj6Se_ &՘m'5nQz{½GI`7t}Ƞb`GG$Un'M0@O` Ta@`|*ƨ >P 0nT :9;T`!U~NcFpC6;: ٯ*ح*>@'g l1v8١` ݪ3trvC`*ƨp6*XL`ww]R):9;T`^ JO9١eث eٶrSXeOvy,?fbGqP.^ st?"g@ V(lݮstf?.g@ V,}]\9:?e 3Kevi 9ՏE\πVvץs7뫗YHJZ hBzAMqe' +~0,4@`3we' ^R,4a#ݰts| ƨ0E' O޸LElcu3C>eb;C`z󻗍=` Urց޸;W}۱6e%gOYvl 0nZi:tvyncThdOa#*A`!VpځޑϿuS[`!Vpځ1UF]N#(Xݡ_m3-b3tY)D`Az)Uʼn{y7(!%K=,4 BJM=Yࠥ^‥[^Qv RStN|_˼w{1R.A[D޸wuO׳C,,^ u! \Wy}o}?X:>7eB =9>65K%6{Bpywuڃ pE"G(4@g߅ꐗ&;qzX٧l`G"G)3@gߕCd9C`#@_2Q%l(h>/|_.l(̙C`# @ETd@`#Ag%l((g l16 ١`*|x [`4/E[XЪAx)佋?o;%"J\N$N`ʂF T|g?>zqO@Y.[aTniA\)ly!;(/k`}rK 4:h{/8r^p=cq--hӀDҷU?? J`;\[Фn 7>w~< )@}8\\Т{dzי\\Т[D6Ž> N  4K`w[D|hMγ溒 41}θ=A`! 1"7CD6(J./hϠbFawyU:@'vt R^֡}aҿE, 0憢 C}xw>7~o{D6/hwdpZ`G ]` 榲+ #TvAc/TvAcrvC`sK%m١`;:P 0k C6 /2hIcln+Ƞ%'uwOH@j)ʠ!;7lŻl6[^fЎwh -cX>&GkWq,FeԡOv*E $V { 1q·{qI`4r:i#BV p7W.;?~Kh\A}5 { Z1hu^uV?l㨭v { Z1yO}uTN>(Qأ_hЊVw/}hOI`S~A\6> c Gv&@u+}PtG)Ԡ ^"@#}X`GÂ_jІqyۺ.m\  p4}=FmW&6.$8He`/6^o YlؠC}~ۺ }!O 0f Z0M)UOgG`!ٮb Ю[Y~%G  0C^y_< 57C[_xsd ١ruhz}\wlQ6;Xn݋.쵋g͏?ꫲ eazu" 0f* ѡ]lzsL`#٥ʂrvN?PgAr9;T`!٥ΊrvC`S%P 0Q~LҚtWsތclv zy5d` N6Ca3`-368vt1Z2֡ojC;^`j:HlX>rÇξ6v5> Ax)佋?o;%"h5W{Zҡ_~~Ϻ^D6p6mZr%ym?HkT05Bh{/8B[WQLVT[wրHҷU?&@}SSmAV"o|t/nxZ<`WƒBGVNB V$5pZ NR 4K]OX IDAT`w[D|Ȥپn8҃>gf-η?}tC`_1|xL@c;:Z l6\~@`s@ 7t_`$ 9|rvcd`Gӫ *RuB69;T`!9lrvC`sHP 0ƸN="]̘>%k` Au 2C~~ww^K6.Aeh~}bqF`!9\v-q|B`1*C*fTu蓋>|xf[x` _ܢb*/BdP~'_>ߏ}v 0>!d2Cug6xAL`1&32WW!$2C7cќoa l1vG`W{B"C:7>ׯ{ɢ#{ {riy5xrȫI`W}B.,goo myq@렊>p{M_U}B.+f[;Dv~< 0,#^{U_Z&׆vtU!cM_ Q%B`oykcvG{%[ s!DHbSDn?/dz 6C_ĐJw-΅ &0 l!!ו27vaowl tlD`G&X  KN՝_m>y׳}E`3kRԡ'/z_ql`1B:$rl7lq6L!wo7`wDb5BC;|# 4rޡgqߛs^ lq6Maug~ 0f)#,_c #$oHzGRC`3$ WϚC`3$ n@KcldA -3wޯ( l1vt1ӬHX=6n# 6͒eEl6M$aنtR#)I`!wYEL$a>oze"a71;IXAc-V+DlP~u~YݻL(ط]\M-JX!=Fd>\D`;9=l!;L(a6-vtM-Z`h{TkSD}fGO`3m{T+U弙cfVz%,W΋'Jao1 l]bƸx;6cL0ar^d\=V1&[TC2}Q'p`G's2ʄ:tgw7ߛCrvrB`3t+jegWϼO`'[6L4a^fʼ`t˄ki_}ō/MvX'clFpm"{CyPߚN#،2ڄEӡ_E^\& 0ơ9ff KCB./rId` 8SNX=-/:! ،3ӡnl~9 l16L:avwh'ږ'ل-85L>aqvwho|AGD`$$i 3"o}0_` HOX fI(,``Gs4 lƚvl6MDaY6{*d% "!7ݥEa_x¢la+}ċD` Y]>x’lA),s>8H`ox’ |ku{ϴ6}qsa-S/SX}\oh l|}&7MLa96Ja$P7MLa96 +8fM`!ozrl 9}=8fl Kb \p{1&_99.}=/TXWSB`s*,E΋)p݁p+"Up(/UXW 0(/UX*ǼUX*ۻgY#L&3:2_4H61@ hv4#"6պoUSY3k?E{;Oy)6w XЄ*'`B rvR`GXЀ*'bBrvB`s ١` ]o/dBrvB`s ١` ͝B+T/g l)vtр ١` ͽbV,T.g l)6YP*wO1+*C6g;*4 †srvB`O5 YКP 0hVКP 0J`ZP*o.S l[/V-T,7d{'jBr! ]ۄOY4E`Oe 9Vڠ띨e 9V ';V=unZ9' = ;띰 9Vw<"lBr : yD…ZA+tL`4ĭ\TdB\&<$nBr!:0!qKs l)6[P*8 `-kC6.^QSlxF9;T`L!yLP 0q`G\P*<(rB}rvB` ١`5~:9;T`L!yPP 0Q jC6j(vCervB` ١` ͣbW0T&g l);5D@5 U١` 麰0T%g l)A`;9;T`Lѝ֤aPыjC6Q[aÞ*؏r [`Þ*/v†9 Hk[^J-te5g48}-a'H2k';2z%zC5rN$:2 k ;9'Q vtѲu 9Z.@`SDBZIf-tD_ )"z!C-r$b?lсFۢ2"P2m36eDdDΡ$ؔ9;T`L!)#z%C%rvbyFW2T"g l)6D/eCSl ^P*9;T`L!)$z)CrvB`SJZ*P 0 U١` M)kC6R2T!g l)6D/fASl^P*9;T`L!g0 g$z5CrvB`Dvj P 0.mг*e ['"z5CrvB`u`+l@vi]vrx9visF lm.g.Pa`o<=\gOoz{۠ ׯ6Tؿӳs_ۯ?}.~RviZ`Ce//_zKuy3kB{|& ]eA_}pW]?^>F/!ZU#j};~uo[f/&K!ZU#jGO/1?xw= !ZU3^SڧI(C`STh5u >V:V}V/ l)6eEhVSޣo?6!+MY+ԡ{(Ս>xB`SV`5uSWze㇏=`4Cj`5 jIsf6]6EiUSN Wg>fF`E`S\P5u^~`W5CG/z_ l 6EjUS}Ͼu6D/kTS|U?}׾O`3e jՇ$ǟ럡.&x-AHUuG3g l^ E ' Ѓ~9F`{~[aG/lTUzGOg~[?޸!Z0lJUz*d{չ'W?F`U Y †ա7o>>8ٻglV%$%3uu} C_=}6Hvچ8u~BޣoدZ`C/l٬KH`ϬVƵ{ lM=u 왹 }od}o<={?xm`>@"#6>6!$e_ ls`Đ_yd !=c9A lBH`Ϯü~WyeC*z>$E(9I {>$E(9I l;H`3QrN,3vf+XF1cSbAyDpsd l1$G 9;T` L8١`] lf!F#*H`351rvaWADqC6m/$K*9;T`ADrC6m/$K*9;T`ܴAD/sC6M{$M:9;T`ܴ@DsC6M{ $OB9;T`ܲ@DtC6- $OJ9;T`ܲ@{q}ˉ^갼*n8( ՘꧰:,/g l H`/k+6١KOd1ыC6u$%!*yH`/KaCr9g3;^L`C6}^Xg}-Ne0C=;1՟z%zr/s7왽?a+69ehٮ{Ħ 9 fhsfυVaa9 msDżؔaY9 m3$վVaY9 g.}))zòrN0gg<7ZaSPe`UkMAѫC6e,+zârvȾVؔaQ9;T`\zt_+lʉ^*.z- aI9;T`\46D{XRMkM)C6%n` l^*.Ĉ^*.Ĉ^*.x-aA9;T`\06D}XN: C/zrrv@`h56䑳C6yv P pسOdvѫC6y†lrv8k9:١c.c9:naC29gصX5O/}-EΉ&5>6G]t#<35^&rv} ^g44f4&Leg44F_/q6a,#<3-n`/q6q7,"<3-zׄͤeD&J.Eh&5Ʈ]KlDoXBΉfTC{G&J>%hAkDXB+G`$z#rv8v9 ;C6Ĉ *t&DNP pLkBD_GΖ&@^P p|k`~9;T`;E]fQѻfC6YRnP pFl`v9;T`<EE[n0D-g l7zG`f564+g l}rG`Wfд*]a+D#`f9;T`컕;.[a\P;[tQ7kG`W&aG W!'v6n֎LVs l&Ϣ7cG`&}_ lr9Ln\yvdkMr9 u˺;:Za[fpCnfqsPA`/kDl/`N9 u׋:Eo Q gtCƤfy;fsP=t1cRG`396֘0**M١٨0*6ƕ&@P 1t6w&g lq#;`69;T` M١`mdlBDKk#;G`"z\rvX9撳C6Ĉ!0*Vf&FP f|_ lDoIo6#0*T! fG`9;T`؉†fP 0v M١@`1lEw "g l=a|)lhLΙ'26e #+'-9GYAYfBʹ m9Lr6ϣ3G`.q_ l29 r0%qvkMJ9AU!$^ڼ~(3ɡ&1p6 sP}`RlE(/3ʡ2{y G`)z@q9'Qձ)6-p6w s lwG`)z@q9;T`8PJ١ľ؄/PZ@ߦ&Trv6o6w C6зy#c*MM-P tmrlE(+g lkF`,z@Y9;T`=^7`ћ١D5PT@ϦǍ&Zrv:vG 'PVrv:vGtѧPVrv:vG֬jP 랶؍ 5١=i#2  ;z@A9;T`C4{0=i#ے36䜁لq*ݘw#lXϳ?4tW lM9v?DtW֤kM9vF`7'a^ l9 v#69ZaGhC$}龮4rN@B 8wv J9v5:Do ($4!Mެ!zA!9Gp`{F`S䜁Y#D2rvtwl**_5JDo"(#g lGWѻ١Q#E."rv:@l*=4jD#(!g lC4J١#M#GNrvHl*<4zDo%( g l;%"{ C6НF`S١cE#SO١cA#۷|Y ;z7rvzX Ϣϣ١fE{gv U96,˖ `)lJΡ(aQ=حv圉=,yGF< 59zXnGHݾT}i]΁hÂK&=,H_{fvZaӸѨpnj U_ lږs$ &zWCrDcMu<"L4aa\-#NGGT'zc#rvQe6Y*~HM}w< g lERF`S١FT(zorvQd6[p*^)Mw-g leBF`Sw١D(z{rv:Q(d6U_p*NM+g l:F`Sw١C)zrvP*c6bp*.,#n\ IDAT0Dâ'g l *F`g BP \섆!EaG2K0+"F`3Dɣ#0';F`#!P)iÌviPvF 䜑?hPؕ(0;! qrH3ZLQش(4a6%F`"_+lZsF0}]#SJ?G`WhrA93u(Z0Eo6,4aV6Xfѻ &9&6]ـT-zT9;T`MբLC6\~T-zT9;T`MݢwLC6[|-zD9;T`MݢLC6Z|T.z49;T`M7LC6Yx.z$9;T`MLC6 "^)rv]6x0A@^s~;&١kt4 zx9;T`yQ.}z0^@Zj<*l_PTYE`a.j3K ;gr圜Jl. ¦9"Rqi3O>4_96'³E`A`BrNM9e 63uD}iFΩ) }]E`4#P\"{x_+lZsn (E`Wejh697c;Udjش"z(9j-VDB%@B3FF١Hhh4#z9;T` ,vDoD!g l 9E`ӎ#P 3glᶜ*tf-MC"ܖC6ά"iIfrv7X6-ލpS@6)n١f^4%z;-9;T`+DoH!g l sE`Ӗ 7P 2wl%ẜ*\ݵa>颷$\C6Ω"{0YrNS u*5{ †rS w:+_faGJ*, pA}?VvV[aST"=v6D8@^ C/᚜T!]ZE`Ka rRwוZ"Uvǚ- W䜤H5"kH5 dTgR]k69K6֩2"h_ ls l BE`ѾV+g l :E`Ӧ P TlCႜ*M(C6R"iTrvRXR6ޤpV@ EUћ١`H4+z99;T`,(fEoS8'g l E`Ӭ} P $`lQጜ*-(vET8#g ly&a{NP 4oB4,z©*6neѻNnKplN C]D +&ۤn }EW8s*e؈> .XO6G6W-z‘U7uNpy"9BDX;䜮v́KDgP*}]D`o9ǫnv喏́Z`SU7unljPs}K*}]6i6U9`bT,M6ZkMUrNX7$ՊH͉Z`S#V`HM[^P 4+L6 D]xC6Ь2dya'g lU1a" zN@D`B*Fu& [9;T`mM;P 4),K69DoaxC6Ф,$a#g lEqU""zZ@D`E.*F&} +9;T` lMlЈ$،4 gpSN7Z$qVדs)z+hChlF\O ݈"،D-ћ66 A"z}&7DfؐnEllnk6r\1; ="؄9tM&\Ω+'` р،J_+lzՋH&Xι+D&]Mrv]"6Dok:C6Аy-I(z_ӷ*vDwȊ&Mrv!+6=١hFt l2t,g l !(zwӱ*VD'Ȇ&Mrv 6)EoC6Јy&)zӭ*6Dǖ&Nrv[8١hBt|l*g l:e&:>v63(zөX`'+5B`յ'.)4ti,D6מ賸$zӧXeУaPm}m=qINK9gȠCà]6w{D[. nLtuk}.DuzsK :6w6rbAu[~fy9gȠK%qD`ZaXe'+ѹqL`Z`Xf) ѹqL`ZaX`U7=١YtjؤK@ŢC MzP T,:4J@3M>=١Vtd%@ާ'9;T`n6=t$g lRхq۟P T*0.t!zӏ*:E%>D_F@M'/"g l[ lf1 gp""@'riMrVx2V8E_D)-Im,FEelUgq,2@riAfàWl^jK ]9fEޚ謸F`S˵Jaӟ#Z{nTtT\%)޾, 焖 lʫ6 9E  ܬD-9T ,DMyTJtM$L5rvj lz}U *DmD_-g l"!1?R١GtF!Orvcl:}i *jDG(E_H,g l 1Kї١Dt@$Rrvc l} *IBn]t=&YV5} [``%.,juq>gW 9e  u0fI[} [Sl4(l9U ݸlD`[E_*H)%$ lSY_+lstk׍ni6 639E ED%U rNoYBeѽ0fYu¦[uܰZL`ZaSZ-LH*nVt+L']UdrNp .;l} *8ѝp WR١] w} *(ёp rvD'6(l ١ZaSN@>+iPMS$s+ڰ%kA8M^. 낅F<kM kM9` ;(@`S*Zac] }Dp D}cb N(`H`SKi@bG_mhPΙ/fv(X,?g%ߠiGΑeNh ?/MZa3YΉ/e'bJ`S!46RlB6kD9G.:>+MB[b3IΡfIt ԗ3Tz#nJ`ӈ~-/|%ImV< "u~ j lR[0%6k9cAQU"Z l ~-B}}M#Z҂cR͑ї5b }T~g"jH@ lO?6>R*+_Η$X hf{VzO`lF_ZwE} fA{+9@`Cpo.f*޼{`-O6/fj#$BBUYߟ_C}gm6>$-i)Ub//A'Oxz==gSxf)V?ǟ*3C`C=:髧d^L=Yk+o#);:M6{x$U? / # 7j9.!LClӃ}3W{Dt5[}"eY~@`_91q(N`C ꣡@Vǫ}-]^!38P`_ޕ]`߫s:z1=bӧs} R.F).f>V`VYYdem3Q)a~ u9m a j&@mȻ\&+k5vPMV6j g_ƛ\&+k5﷿X˯F3s&+kmTU=p= 5YYdem^%N03j&@Uzc{Sә@n3s&+kmTh~߼{`0/j&@]C?uvoZTo>6>{Ԣ~o64L`@A P6$  lۧ/iW=5ynUr컻kW komo}@\kpO\iM/=@\+~݃xzM[W[7*`S W/P?ޅG.Ҵe}n>~Tllu?|?={Gdu,{V]lչ;7*`S {tfꐓ߄zEς<MM6\sOmẏI2 650{to/yX:{^'~zzwuނo識>:wh 66u_Ο?۾w &Y3.0Ҫ;NzX=T}Z!7`L`Hg&c|:;oWz4`yuc ^>7.|tTD`l l#tpX]ofƪLuM< /\ dM 6[ӳ[}k.P?"Br.TA`þs pğvylrs ξꭖjyTJlՄ#_GyTZlO=|\UЀ8zzP+l!O~z *-}Tzlbu=8럾-B?g|չUour"ynu+@>ޓ!`q?|`P%l*"C;||-Pٻ?s&.{u|>^H ;d{P9lj"᲏n_7|ag ?n+|_gʹ`S  7o۫5{us* P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$b8o|oj^^毞]ۃ>ÉEʯqy]իҘ?/46@Q{~'gvfŌ^}?6(R`_x"q}vBi ":Iwׅ]cA~EQ.\6@Q߾t7ZG d#:P湑 T`_xFd]η@.O<4 9mEuBVH>V>U l  P>_OG4IDAT/y0|k?w;o|0Ͽk_|?~Uƾ G?o^ɴz~E VU'o֝/9˷mû/kG^8=g0/t~E;=Zo㽯|I>w|ʹ>S;p~/{B`_ xa|я>6Qy=#և?xe P PY/7(o^9x]/uyT/?`xS{K'̑?t۵=2@=6@Qh񽃏>&q=.7'&G?K}M.9"9sہ>ċ}{N&o/[`ln??!ǃ^rpV'?xe l  Ppƿo:]Dz##'`_7o7z~',^g @=6@Q7{z>=K~qq?;F_q`W/}!F`_:xl`'|>\;r\`;r _~`u! .o} u= pڑ{/N{swC`5]D~eVum?B^;xt`owxke}wʑ#'y՗}4ͷ_qۗyTC`u+?pe9]+g(|}ã7%Ư~2jln~Ϗ>iᓊr~r3G oxO>\Sg?9+_<O 7?oſl7h~W^a`=2󵟼9 +ہ6{0x=xwJ#wMm쀹[Y!d8Pvk_?wsg/^ /EټռJۦ5PvN$UgJ4k^kW~xg _G}<#E)°\U#7^7-ȑ GpdX1(B@_Gm_ x楫R=OXDXg 1&@rq +ۯ/.O^M6w%մ 1;\eMd+Xhsq5;8~T_v]CX'ͼRķlLI:rQHPA#lbi9}W^CLׇBkZC7?4 kc8%cGg|# 6/4˶NHӼ7Hͧ#AOTw|؍ǀeg_+/?(?f[W>(Ԯ'Ymh^Tw;r $zȻ]?ڟǂPuY Vw\.qPD\Lk_T&~>wkωÿُOzN߈^ UpӿW7YgTc-o?}CͼH[idfT(قr@3O1u? .kռ3 ?\VSo'$H!8`8 Kޛ}5S$.Qq\JGHʰRA/x'aSB_47 ]yk/{I4q:09A9ny/_F_ڧ"Y>ᡑ5,Ieu$`\_=.weYᯇ$V됥zykhwH&8T1+#bˬvg̱E O<ǥ?ŸiEΫn.χq]\ܬ6>daH8K_u/ZIy=΢.bFͺU\1 A~g?_!m{ng +I_RRE1jw%\ S Oc}*Ckč MpP̓ H{|#E-v{\8a{Aُ*?LL|.uh|=Xi%ۼѢ@ؕxymO_)#⯉u ]ͳ[}3Z$R<#mۆ@$_DmUUC ?kT$[yZzSπV \Pc1\)rr( {E~~ܟػ?G|7Mq[6Ew3>#gCuq>o$, '^*:TSw[~K $}u|i~3ŧ٧ \I d47S ΐ<S;3/njo>%x[Tchn)hcPg2(t,Q"e'7_௃|Wа<#}F/\PK!Un̄Wi~mKº-/Coois$)$7H U15mmϊSdGT+skWR"-橠Zڂzn98;k85[tmk?̏ueFAd__k ܟgY-_}[M/N![?kwlj&n ݴ~iߵ>(^hψ<7COr\\]bIn*ߔ T6Wx CE\ n⦂@+`Gŏ|6O|gUַ=d_.8G3mEf8W?w 9D7-?42OEudEռL[ɚPR2vWn3>~ >?w>֋_iuq~d.HVeCr70 5x ?z6&|Q=Oi7:7Iֽ w~ȿ ll𿆼w%"M>K:)% (Z<_?A?"|Ks\~=F=*+. "'6mHǗ_vWU(?oܞmKyar%RpQQ?gf)~*+pQkRGya*}>? K ㏌ng mZt+o{e$ouer4/ %|ut@| >|;ۏT>o$BY$IT0N3ymʏ˿Bx {[͔|[G CV~/2^Vw>e.2W^Ey'gL\x Muű IcV/>S_׃EOkZQ4ᾖO b$QnK\B&6.jOW|*'s\KR^6f״:"_=ޭubxtE,L$R2XpA85)+$ᮑq@־؋UU[EQGV3 pXc#_߁>(~ 4׼:NIe:ybY`8Nε%szͭ|iپݼck]6kb wH# @\ w[!ֺmͮ:}~@[xORtszE>B1okgf&/&$sk6Vȍ~ |˧ᖎ/mgn$zž[dxPDdXr7Qp39{X¿ '|C T]\eD{rd, $OA|rO,?g|$>-m{rxFOړNNɑbyʫ1$Iq cOEDn/[m-:ej4J_3_Ÿx'Gðh~ .4k8jF#`6Tm$qY:?௉OPᖵYIo"!`r"3ՎHU+OGE7_^ivh[^[ Uce. 8z?p O_WCt} ]L8,`8U"O n dW+>Asl=#1nӹ"=$5|i| =c'StLd[,1FFF[s7 Sz[AK=6ڗ̊܀pxW f|3Ez uP%$ֳ*m4%n]]m_U/vs;|LZfr>PEX?Ex?<7'uMjTdE\p=&AGa;K?^QƺPť@ܨ :yּA/#|/"*?>M]Ot=9?j*o)ydfo>/9?%y\ܧVwK;hÿq ~Z_'9on]iԐ2ILO93M SZv<20$/I~-^msoGq ‰"& pAGO|'`~џ_|_|E\]Sv3XǫiH~Rr%F8"##No^%^ wGNԭќ1B"%bI$Xf2So|A-ZO՜)V('Ua㍑ꤊtKW_<:}2gh_8<_lOOPռ#SP:sw4^TnvRN2x??87%487xg+CAmq4ra@v26˒q_튟ʿNR4 ](b(Կoσm|DHN1Gq2g6VH+c;h| Ú#lH)_0|5o gTkK. p5H )'2x>ث3JҼyypD[9(iWq<@5R>^ZVhdeu.@QM{;| &Я~7ht6D|{gq:ykD>YeI_(_;OWZ}FmS kP>h%0$ЍNnlJtgVq^{q;sTd{?Db88&ش~jđEǵuGڏ j`mq]xs)?gzomMѪ:R-2$8)@YvoO?b=Zǿ xK_b|C ظG@YdOȡ,1V,qΪW~g__{U;\\0Hĉ4*X@"|OV$D&KyGa U8]ς vK:^si _TA$W_n?RcB 1t!Ӄx8qtKh_}o7JSpWM.o~|ƚ_k?]7U~ 3YP?ţ+ /!Q>h BMrkg#>|#g?Ե/ gWZ.nf:nReS#??lkP=7l [^4'} vw`,Cq[q쯔X>>x<>G >9R 2(.[MϯY:;PQi䜼lDxOdw ]t4eً>K9uXakyWbml$7BKI?!?ngo-{_46?Z鶲s<2$8~I(:`W_­cOxÿ x/{ԾxVm|758[¯OH )P}?7aٯ'8|ox_OYƯihhZkAqmJ.H 7c# S_&4>4Λ[Wy2h&PndoCnT!o~Fȸ<$h[fǾz'?)Gq74wV+_i6PȆU q6 .kZw3O^&xY{]ޢNU-{YFsnů-~0^%nWC-/AO xⷊ+]F٭$3U!e$ UU?/0H9 i~h6H gk$׆q|Tn=OoVo̓{Kk3+(Pk3×O?xWuxn_|jKu?3?'5/A|-~u?kijf`Ѱ0Hvc#7g~fO$o_ ֶ7zƢyWU(D3KZ:$Xko.ikoƚV!?xc?KyOjO-&g}z3=aGl#<_L/?.]o 3g&En4[U41$x,P3_G߄| lªKX* _9K׽0Rhb\g(?g-_s ?s?fR?G}'=&Y#]u@OS{g-x:G~ii~QX%37+3s#p3?b+7i[ֱgx8Ku[IBvXI8mkOUUU< ?'Mm%|1{_&4x/쥵1(iK!=>2xǟ,|F KŶ\x$|P+X$T dIcd+YB_~~0^Z Fl:ῂ>|3sM!mZE^N[I I$Hr[j[EzPOmKL'הn**+:0RbY~JQ` h=|۸{SֿT_/[W?u_lo&! a+kT\2Ih R~n?rWwy~;oŭᖼ'}RwdP2B$@jƶa|gM {(ןWu" PgO{o 躮wisK$(ZtA*F yfex|P#xfZ[iji k!_pCn$rzo2n7í!i@pkSk"ˋB2|[Ī=I/w gJSCfxtsnOHVe[m)e+M6v -4KG,<;]VMo᮱{wZF IeVՒi2l\r^ҿI(Ϋ$~E6}exP+]FP<ۋ[w7] )X~[m/ZOl-:cW:qlek~&QnAgHx+~;]{B ChȾE?RAIEWڸPt߶#6WQ0I*ɜs߭s}z/?֔`i q[%}XjC153^#CxÝv|`'佟YKWh| h!Ane?O1d.hԮ*tVsֹ?7Gψ>%- |euʒ3?;E+?hvګ趷mt{o4gM3 Կٶ?u<'=j''a<}jCcw`6%M?ǑФѩA`?f;* O I7+ jKu'1U/@| cjw?vTۤI`X{O?o+*Ik-͖ n⻶'|OX|2d|=׮x_7&4jqz=;oX߱/Bնrn"l)o?]k;Mݟíf ‘󯮿9|:+cn_ :lʧ5 ּE8})T;cb;D>/˞_njMy/ΧXx F\HÆ~|ɿtz25)w T^#IAWɿtz͟ l>3ƖgYѯ&uIBee d~+ %_!'6/?!&K5 ėznOC%rOE$KzTvz?GS4V+pe G? [̷Bx*gl]3 3k_o(E|QK_HmŁ]8'Ҽ A⯀߳_XZo4ߎz̚q;iyDĊ$+sGr}A^wS~oAfAX]RXl #N_f~dgrnU,ߊ~+wxM=Bu5mkۣ` 58;_\Ѽ]/ŗ:O!Ww7Qim9D aNkB]F7co?ட-Gg5Otnn,..##ST0r&ecõ -Wߡ~.].&_4ELO&h\/ C~TJ?|jLj7Y9חަ=լ)˞2MYW/OTnR|?мun-u])B lx?JP|ȥY7kGb l|n;??wyr?tw? +6gRCN{[!_\ _w.WW#}ZMB($#Ԁ;no_#7C|i_m ݢ+Y$ "֊N/R授Ԋ(=OR3+?["on-nMXxdBKk4Vvgw׵fcIɯ(?_Wx;&ѯuM( VB!xjY/u$nG 7?Aep>Hp  µk|C/[3'ekGGX/{tjڬWhiTدOqj"lׯ1#ZΟ3wBJ~LnCo0 q.w ^ Z%KHft2eW <]Tu[rέzJMY՜7J~ߵ_㗉g?_'~m&~$bn|霜rDd6Z8򇕗R3@OX\dܭnҴ.m\d.JyN|.o; tm _|eཎi^\1*F8hїc(jI ./W#:xYz}3w^&Х珈L5`?_?iMW OڿTZist{.-"U I39 n'8?Y tp|G2ɻggAjv:-\=q @aĚ|mv2W0ə?h+WO0YV->$6i\I$`\0ҿd.U;? ~ o%֯c}:F/ V{54fRȥcPJ9!g[s_?"^*_h> ӣ˭^-q p4w SzZ &)^eUlzG[F;|PpSRIo^_GT\t,;gl|u-7V4_Is&%њRRxj+*asW$zI6|RLx?Ɵ /]@!4|s!YX?П=- 'f+*|9>ϖ1gc:'SäQvz;ipGciRiZ?Z<1:\n8ֶI pNy'< (Q˫)/u,akYQ A1 ` tp}yr)mwSd>*XuXC꿍w?ؾԯ5w gw^'$/I#$^62դӶAb-W)5{5m~_R7 o?Ὧ=tW׼qomx.R7olZF%;Gig S q]~'7RZjK};yCv6)s,kI(|N@|Wb6&\s6-|ݗ>"]l@Լ/>.kAYo (7d+/>[C_b|+GSþ=+mS]?SrE7[cF·d%dapàʷovw1s+ _uKw`zҧBCrQ?(1?[oT~ ~m|VRR緆Bn.eBnd!Np>+3~ӟ`ڟKZljD6(kC 쥜-9q 3|1kxmrvqשͱ5'ndЬG pya,o]t>h <x_-gUt(l5d٭]nC^32b$ai*G "O˨ jpމs=ԩ 3w7-ܐFpNU1&CnV9'kזcVn#Ň 'oSOq¶Z?3_.bEntvK( =:_ #Y~П^gXt(-]m'kgK{f@Xd;rFEyok c$qjȻ㴌yRxnypBp8+k56cS16y3̿? 7[ \Ϛċctg}gy-$o;,!\ng;@_쏸 ⡱]^#sB=Ͻ+Kzy\ZQ|HZ?^*.mso万M[+(#1I>t  F{x~xcڄX(n-!dۆ2y^0j6^Z߮Y]fV,zZمGU}Vj9|(aU*{-N#l?''߃?t]odm-vԮ{3y71ǝUP ^;x-czڥjbCt!dc]4=ܜ]V/wy{>X hӤ NO_Sm7+sYqLl4ZG|,<:o;f^c?sj~Zz?[ӵ=ZW./gy$h29 㫏|*6;55hqo9#?J' i$m >5F'J {]-s+ ş~LסOƅP o%čmW9>ymڣ j2~Ok̚$s=٤B#O(Rb5f`t9x7VZuUY08VhsQ]G8{W,b5֚"`@ 5ǁߵo?o|+%|ioO4^VͶOɀ(W 1P}|a ]E4{/-|ҷ.KL Dô$hYz`kѣy4+K4P XERn=",<1Ok^YnjowπI<@e֬uyn${Vj 9''֟ZIq[E3P=Pk~t빣[\J^UZnԌj2Ϯh-q ~%5Ӎuk[]XEP`kg u/ ͖N'+d\%- R3+ Z̖ld"{= |O˨Mvw2F[,:qeWM|W~w`O*ڒ{Э;?'o;q AC_Ni+*3q_6"'ڇ\jeO;ߩ]T1MP}^rÿzGKi_ Oy (񟉣u Ј6(qOZ⡺ۅ1`p?C(AӮ2n VIڒ+eT`c'LxQw3 QË}@XzU{*}qSՔf۽O[-&)~Vg ys_B5mZl2+J挳 sߏʼsfw"o/εu`~#zw|krE vv, Nc_ 'UiI4y6J_ԻfS.r9/EN;__OA͌WG;9Sv8:u}:t_wCԩIB*)Kv^]VTm@k0 {G־oK/5o> 7rkWkuu XXYwׂ \YPa8:凈//]vۈ_]W죢Z1M\ 5tdYbhbR 5*^=ɬY4T:0@aX~IXjQ4*%vw,?JǂBu :d0ѩeU 4=Z=CZګD^a[s6^sL\sxC7qx8|.Bu Z4qG{ |+_GapLHFz>,qPOhp2`Kt_Zͷɸ72({W?񥂼;aBR5P~T)-$eel6?y:S$r,省: ajԧR+Gz{я6Z1ƬVcT Jo$i z`<q~Q7|Z? .38֔%ίhL k\szr.ej) 'Zo?[E[yw|H=#VFJ2: nw۷?^s7I$m\d޽;D_$kㅙ _[me'*;u`Q4j*0#H|T·-#uؤ VɏSo ǦjD z#]΁d3ۦy_pjx<3}:XϱE$on~a:/90Feڨ 0&Y|Cp0O/h0JO}oikwWtjvIuW9 aZLm۹oZ`]M|axG''i?_]2FEXH :~Vj2)X⪦"iS睊b'է{4q +ĺWpkrlVsE0KeQ@o^ o5.Qx}:W%)JU#4wbiF]pс^}Wk[I;ABn]UIq7|ּ%oi,vXD.$玵tJIKv3Z/ LD"Hb?5Klv+zcRzw.5k4G1?CҹʝRܧaSQz3Ȳ'+Ԏ9 i1N?{h=Ezxr;J"--ܤz3XC ~rc'$uO+\<1$" hX;WxOS_jeV7^S8c]{Nęfk9fv3Z߅"8Wi$2`z qNK[x&c*"䌯XH95oY_4fB}gں4N[(25ҥj3F9aҿT̰tqҫȼ:xd%c[_Dž|;/VVQgYY~r>nZ:smpA g>y1U@+M 'k^A#]R7xLufBNzs G/xB3מ4ֺGF \my>a&0bXLRJe\7^ӕM&B(ˑGzGoYz]cigm5սcynx~zv*9і&B_tM/::ffdv98(i$ۺLwGW+T0#Һ<~o#GtJ[9U$U.ɕ]/~Ϊj?z?[)-q`ױauKYun$tF6>sxSYm$RP縌(i8Wk^.&77S3Ƈ@3kuen&,,]G ƾ4}s~kg$q"~ 7`ltm2M\ibqD1X5sw^TUEx>969z.I>rͼp:J6-.#s|sӹn@Wf q^YTZJ7Feij73|XtSXzem`r?Ubʸ7\GWyfcz,}eMo<+u9 ,h3-fā%fo2N9U i./0nzs~?m\5 wFщҾS{M'ieWYv'ڲ \2rKr) uH;KꧏΫx)li#(Xm>/4Ir](/Ǟ_R( ı0ǎŎ2n.A:Mp/Nj*x#9I:]FM7Ú3^g/[m.90޾jXS:J);kh>%#fXO-ykkPKgr-m$.QsUo|ƗZcKnFMU9ba_{}OU7X$]<ܲn2q2 ~ G 1O# k Pg^k;]YH{o;R.%QQu2Zm65+Jy|?xM'@݌OM|Ֆ$-:RA4{SZӞҿFTgϘЕ~PׇJjSv?ĤtOzmjW_ih؏,gqG_n~&FV$H5ŭDQ^x?7W2G 0 y@2{p>1ŭ$'qN2ԊVI|R_v?|Cw7A:h-,ip}Vwlp|CçI+jS^$Oxb1vOesq*^0]Cj׋ 6Src}@k|6K  .9rGVmgL6){׍~ךfhF2' |=V.&f呏 _Qԯu Ue$9JH=85b_oSk?Y*9`TzoAˌW5 X|G?)o玵_U-E7a 5IWvchZğ>;גgHmkUݵnG<ֽ㵈[mc'lu֏zm༸]6h-]eר:ujRC 1 Ji[ֵy#r1;rk-j::2>$x~ߕvwDvjT䘜}s޽!RuZlnb3^[Wkn#>Z\A&k/fXS<Ԃů-&mnR9?t*vEE["( zں%ɫ [wjgaqmivK==6aItkm":IIsww˔Ώ."$rjRqa}XDS.ՈBW[ svczs\7Mwfy%N$*ƋΕ?dkMK^8W^m5w:т=n I(jRr==kX\Zi~z"÷k ¸Q][BV5OH$CF>cX/='>',ԥpۯ5Vh۞ }_r(~+Nk_/-4 ZkW}ْrxc5v#g{1W}ZNmrLCC b}:KK*p׭YSQEs?e5I :V]~혅zzUCD0vmck>XaR8>>Ԯ|34}Y-5&dK$1ٸ*AV8>>w.Mԍ)*3Cr[h+biYosV< J/dKm&7[g5__M^LnX]~OV=Og֌2嶺֯&iq9"B$(vi&>$s"oXrG.pEwiOS;ǯ~7M]+:)sTs^/-<9m7fOWʞ Ve.ESקz/ofʱWg*{/#QYOj̯sMj sp>_X-l:,cv*(5x8{n*m~+$z& W3=tkֶnR>x WgŒɴ5W'Ff'ڬe2E^ӱn܏S_?gDiԞe*u e_Yŭ0yp^@lm- ՚aǚ{C+!Z&S0 6!ݹw[媷C[KE|Ot]zZ¹̋vHjH]ȿ0pzcC2Qv)c{QFIY-.foÍ{pdhJWO}l#UQNkӼ>,1#Ut-_!9tzQ6<^ et\Q]寗|OS]vPF_-T,$۽EmeMYn$jp OJBjko3ƴWrm_-L +{j׬Lg\Ƙc ޾!gğuIn4+h.c4P,C`b3ڞ|K[Y}EAn_mz6?S얽nx8uw$&~chSHRgӡ>ݪ݁;J܂i'8_׿//eClϨKZ\G<2xfEˎCTg;ms+ v8w̩e%iUcZ9_FSe W끌Wcx"TG 9{EjWy>aeoIx'Y[t엘>&]5嬗V7h6F8y]S+omMA ;n'~bXN>ao1^}l=k歊of,RyL>^h;+zo&;Kq#G Xz@f̺^^n"XefIF2֯3*-5;pj;Or5վ|:|eI[ŃȻ|c J^3S. ue)W_?^%=omwWM8,}'ڽ eK<ʓ /ThU\ d}(:xp.ES6syR/M&o?h%uaVYh'Mznٯ7Z0#% b m47 <_c^\k6Q~f} OgbrRs}?e `֓c?JGOVJY.$ }fA95 u? Z.Egcig3 n@yy&E\rt*fiAZ<*Ƨ=Fg3xMxv=㊤X!wWn:/O| \Gi@S:]ޅQGҾ7VJ^ϬӂZX4}b|jPA-d6?*$k-a2T\X#ڊufv<\V"SvOBfnhiv fsV o/l>gҶ4*9޿wN#~8j+ēGak*/9HkYycjzF3Ȭ2ݤT/Y}5V- rO83.}¤cm%u8=qX>͐qZ:?CevD^lƼ*]{sf]gۚSi6n&j%sV\Ph+ "RYdp][vnuWWS `_c uWcvF2͵@G`_}̹$Ӟ&e, W9\]3y]GnU8ԩt 1aTl1}5*IG=H=7^î.c[{;⑃ȣʵ OG}-IB$Jh? ;m]6迻w\qgg|PF9&MA1UxkS]7Z8 ۃ & O 5$43Wz.Sִfq1$0 j߁<+']oewnvQ׵f~Uc sq^0銶|H_=?μLv)~p[٤{q<:jلW-g^k06޲?1z|3ݪ?4BQnf<>dlO?6zVnx@LJj&L1doNx*lӚ0zzVΫhpm@ܸf 1+%ek).]&Wpʍ+~'V+ٳ*3G$hM!?<7!}eHnD2YfIf'=ˎ_XU׹Δ4BqV4 2j,$YUv_2\ȥ+4Rџ|7;c 3ZZ.oG3k$ro<5|?|[VgHfU\FC&\ۻ^^*'7CWߞm|e[X_8Y՝j3מEy3Lf-f_L9g{FX3/Hb.ƦlN*GLt C̑ECnۼv?FsZ@|0m|9?t|O'.>RL}q^A\ Ii +Wn[ FS]мBh|҅ۑy.[ggt񴱺ȑ3+c@eTfkgdD+w[ÌYu5dcϠMk__uƱ ]%icZ&i6GWi|3F:0Z.$Җi2.\u#Tumn32v}޼gֆZvduz}1YfSqk.8ْhfoIqߌW(ĵAV¿2,uzg}kԾ7!F KqysHJft3VH|IuJLjCLP(d#,pN+ 0c (ǩ~,|7O^ ƥB w$ij'ـIV&gMSHG1~(M®=+5+Qyi.m63Rk7q;2F3־B:fv:#~u|WhlMh'0F>x=c_SXyT B0pVûiu(/v d/>#Cq5*hնP|'"GvL/pq~5cen u-Dy{կoY4kkЎ1k5=b*wg%ׇ]ZZ]2ةwp?yT*{Wh֝Oq] Nfi6*Mߘg:Ж{t0V̤ tCjQ<.@7M ,[8NzTOH̒pźƤ-<{8}X?MEx:}DLV[o.X=ۜc%ɼ!Ad(ɬ}MHoYf6,N:k7nuA=Q>@8#V*n'v=5|P]yV<'\G| ­&ݣ'U'djsGS*8 ׊œ݆X,>b͌Wsc"R{M .Yzo 2‡;5(ky|p uTdܨ ys̋V)U={^F M5ڴo v8Sޥ9㪜ƔM\jZt۴3I4E#j/T[ff+?xnto]YdmL˵z[T_ok|C*31]_U=[qY_ukO7*#LkƳF!;r*_=RJ`I-t{^40oⒾ\Mn\lGvX0)m5`0XtdK`.> tz6mdm)=sz.'W_XBwdYPyZ^Fb_s =EVd n^RNaO]]nz `^߭f`}1NA#(`oW'.e8|wӵ{StT 8W|WLLLO濭yN,Ey>h.:W'W O[~'f4\kKn5)#;2Pa-qpٓnN*4:fxgВ>^I2428ܽsSnelxQސ޽>sCHSr>\ -~$̢扦;vcc9<*cvvv=kXx]W{ISl{Q*w3*5g}J]? x fX|Ϸ}x)UfM >A=Fk[Ve b@oz+fڎK},5RVa {b?~Zo3鲜lxͣMBVIFӚ󯈚31zUiڴKz.9מ+>+3xFK ;o%UIVQyc=.4KYIct^?oׯ.fY$d**j+` 1d#U0kӴY`W5|=~K2RcE l1ϥsL(nzu(RW<^v=vjV2X?6D?=v jyvuy/!z1ż +[\h՟~hAC`}}Wƫ}OSݨ[Gk}pۮЖ3\A5;b~_Dž-kǐkjWt"EnG nʣAT⫏]H&ʼnTttzW==Hc?quYEꏥ6kѤV 0N yomw\X]\pOʪNG+SKmxSA]IҬ},p9I@w5cj6c .E?(s+#u>;cI;">X~*r*,|Y^ ğyxL-%̊G\]q9%c;pB+`5[3Soh^p*ӆb.x_F6t`񮑬Ny\U3E_N@Fޞ+NGZ圜+Ƽ޷9=:ֵ,3uqXMc,~czUcT֑Ļ*?6pkYYFȺiXҰfn1ꏋzbIlWzӌ"RZPkoP~V}q%Ƨtٹm@tik;Y/:ڣ33TAOZu^;YnuO\Λ{63` ;bzcsw5hz\> Yr'`}x)j6*ө/hn6c9:3^qßk&G%: >]sƺO%oí=ZDm9Eߌ  v J^p27*CҾî_{X`ȮK.m9kv݉?-Icm >+{j;YUL^pxjz;;s<雾Ǩ.'#sYi(V\LiC]BI- V_p ?g*s̥NIjA~F?CϭpzkbTQ]7Y'dU%zbgkM ̅c0s Yc,߻HV=!oʆZDG_CؾCЖypɷ?z?@b/5 ;rFs:T%(]yL4bd<ye|u$*ŚlrD_I W|L[ddXQsW_kS 9xPS-GqԌoM +cc#>w(,#pM|v2OJ{Rn8fb#S5Y/͌`gG۷FdR$ ֐s]=cRCdcLH?BF~l)K4CpgPQ*liqJY׀)SSNFTeڽj;χ-Jӈi)d2$/ î8GKZ!5辵b)(R㊜┺+s@ەxi+S ?P^F/8rƹ=qfGyk>[t5.d?{>kEMb6rzEUPS\Ii0h١_@,mvZeQIs$ϩ@$iHV^GEB&޼Qu[uu3A,6~_yذGQEXO IV{c,U wq^g ̫iefʠudޱݵyq׽PPS>y Ps<6m =>,B^Ģ7\.;$k-afبY棆FVK&KHdLO*OZt)\AY~Z.nȳE$Inʧs^!ƹ~@k@qZ~+6Zٙ*FIs\6l'>bgȽT\,ZCB$% mp7rju÷s/2ͫI%qXb+9KZ;uȶ1yKs<1cmF]2*7 rkӼ==_DsMnF\7"jƍ:=CnnӷDI/4vvY ןg8ˠi fIFd^lcC޲_Oca'$6U6~fG afFUG_AE~X~M, Cfn5*owfz[ڔm0ס;:qe%H=*}6j2ek#M<ȺN˂!$m8ksia]6Vm$ u @?Z(mFky"Y"#~5/šYݹv|g<9x$"Is^A#P[Ii̓/Epg=ŸlsZծ|8(#0:qiJIEI-N1,fmc%i6+뎞ՍjOu2z~5[,HCpТ:c;ċn-[ŊEcgkF|,sn>#K});?*' $et[z~2<UkCշ,}[:ONh慤 Bt,!4ܜ-zpG+|6KګR c]I`]ںX2-Q hFʪ9\56joxQ\?qĻ_n^"<2=Myw]9淡FލkeI5ҩf+7>VǵVӖKwip%qNҦ|? o6,ӃV#𱷒ݼ%?r@'%UK5|hkɚEۂ~xgѱ,ߝsn3烊F(D泭QI3}ˆ< ԯ,w Oqg QjHbi]msԟA__Bfef}>:' 7:~ +9F"'YKq|75J(YҮEe3[iW44[{}+jEr͸svvm0"+kYr'ڹGK5Zf+;CW?y; m[K>Yϧ`r*(qU@mjPu89#B z^sG$J0~W@N?u[VcX|!zzGn 67¯C<²[4o s6x&FUO_tw=]YPFnO|+iGZVR١l!OhH;Yx\_b3&Tks d9FfׯNfD@UI {qY8F~'4TԝG5ŮK¶ˑdWӺφdTFҸ-^c/iy#]iyWV<7ՉQj+^Eo0'SYZU-8;wujKB,-$ =^DZ9nSNmk8.VoߵoКk6OFW)K1u򣌷 Cp?TeNkl2?IrNŌh:m2,Q:g€3JB|S>]#2n`[yȠ+ZT"SO+|:Ti͆9nbgG?kx]1Z"[u[vL8xjuZmZHUɷ=oG#_AZ&VFgUF 6{T<&OǬEmdy+0J9)ymy*`1zleau5wfЂlv/9d-+ԯ>˺OF>-̇+IϿD={SZZƕb<3p~8]*N\p8<J Ir-FfQ-Vu$m,F𐺛tMxJCOkhc u[\I\:]4̊6e dS >!VV䷭yq[:ngxoO# _z5`*@k{L#.HҲ=:den֪2kDiv mګ翵gI|wVoNG=OlM,v5»a.U;6khɢf,s^| |!ijP]-%B} 5޷+׷,6gۚ.^3Kj Y@ ⻨duqvS9'sWŞ 7cwE2N8Ҫ)+rbg%Oh>|FRM-#Ra vϭu^˰c2jv $ݳ|fe OWPU Dy &#/$aesץu~>7m܌&)e*ѷI?x%G6xhث.} í>U0nx~ r3:eT\?<2ʹ0+)Tlx4\`rw~|׬M͹l^5l[qǷ}>41ݛS0p}4)V}򎞵h10ڹQtrq'f"9kC|kI"?C^bE|:WxJdF2$^U†v{]fB|[Td [%cfHS UXX8**_3zzȐB>liuϯ4֎9Om)*HnΛcX=3Xԓ܀xi$C;KOVYV3+xLh~ RsCǹT5֬KJ-ZlªBzz1ɸV\uXʣ'#q nGQ310  ߚEVXZK5Fu[/mW%);汴0~fȝZkSOSѣQ(cj&Qkې8Ҭ5ڤY2QҰN?8T1dos^|;ugvm%ݻ;}ܧC-ev w7ܻUR[j22y`Z9K&J3u~mVW̬?3ϽK_jtǕ'qṿ9rU]#ҫ~3qV]~&湶3 FAkͮoX]3˖I8+޴i.^u-_,Ȟ-givSLm9R'5_Wsq"u>a+*#y-}WRgXkF:73 A|Edo4=:ei6j"#k:WsdU "ǡ_ps^c涗 Ǧ_FM4BH=Mr0,Ӧy' zajZ-yGB+-/]3zQeji]?=];X&$Pjoٲģv$2&K`9瞝 n>%It.dmP2\O _eTZn&L_/fY>u\mq}٢ RFN>Nx&ZE#̀϶:+ۧRQ~gRrGQauڬEY]꛻_؋RiKִjgP=֛ؗľvִ-B%Ȃ9[#&xU#q 5bI-䅤2qWkO5_+ߝDM|w*~(v"g־<jyk V6yy<%ںF(Bu-=f{oV9Ukh&+,~,tRxMƂ2 i9J:Rp$=b|Tk>$jP;KHOWAP~cSʸOfV 7F5Tth쌽W/0#Vfvv1<eU֮A'$ǭhPZ[8l'?zO>ӣIHTWc>bbYHpFj-WE_Z`*8+Եf7V$󞕩pV^qkELw;inYO?Z4I3VV NNvAjv/ Ѿ^Fk'pxՕ½a`˷%>ux $gH|욊zM'[zᇈbvxdU<)n"@>.;c[hKGEe=W卺}+ .;BRǧ;nQOͷHaʤ>AgUa˅+&zi>*>\'ƕt-LnTNG5?x0Vd\ ?ҿ&G(${Ggswn8m+[Hʩ>$di%dsN:s־iKU p6ՁԯQ(6k|JzDN@=5ZA=lo.5BI&':6'ھƎP^{%xG&Ѳ7ǥuwUU[">Y}Ma 5]ͻNգͬKC82~$xI&h#%9?XsESk\[lr2͗7zfrȊU98o+RIʕXьoicv JԬeb[tĤ]$ln23+=l~_JIS,-o3kZO0HV өo!^ /]Lь7P\{]g-.h'\s:2jT쥙I9ȯ؏1^Lxy#v*g>'mNSM&L}vgǐ~`]Im9YdL_'y;(|/7Mo|M.`,+ ] Cmp:W f?~2^~/q{Ҡ71y7`HgBc޳韙6K/ AWb ۆvL ûυaPSLs3 43)2WoqջMQ\Ɲ{Um>ojvZdq`9qOF!VO&?f;eRd\`1vnUi_K_N߀o G/ .C]hK,,˨"JsT@8EK,/#xO:ýža۝YJH=8ϚWO=jޝvJn&U_oxͼ'=Ӧͽ6Eo${D?c?'o?ho |z%ZxvMm4{ oiuumv'b!2Q?Tߏ=?&FGQD,j+CiBtJr-̴>9#?$TSip7g=>t<7fx(5i^ *0nc㒌@SҽSi爾"kښiKP<f[J+L?4O4;YMlQ,[)|.? u{eMs—+Lq|HT&*]n*~}C87'FIw'6jiO!ƷhBărGֻo #˜tZ]kͽvZ@s3޾yt`מ6]cOG-Oe_h?h~~B߰|HƋ_־U<)xit8̜ Zu2>n@Qم:^S]oG5>*Km.}25LcDsH뻧_MsO^BMĚj^B6gwpEfQq8#( ]MO]R=PvF1 :nY:gRUgZ$ڵy^|2 3!_|;y ~@Üw_L~?D/Poڋ NuKis2SC< y@:7k_g  p _$v:E%Tp.oIGUrNjp[_r2iNAGW?8b?[zWѾ ~|]O#mk|#xK𝯙,֥ +#K>yr1 EςX}+?E?Vw$;wF:8S\yȬAj*ocIχ z/x2HԼ;K s4Wm`}FY?g|Nj߳'c&⫯/=n >YK9[dI>&\~|+ŝ[}]7Ihj 5^D+c O'whgrҶ~GZg6j/?K\;^c⫭.+{WVUsr8'##e~?o?b/Ż_ڏŞ#KŦ\R8Yls`~P\uw;_)(IO_eݮGZXd`U]DsX&u_i֎omc|[E]?f/!k/&fCx[uytD?+hQu 2;Fq\fG[鴹wDg5 8Ok[]τi+UYYLC^k`̷SK,owğ5//k:]x~;g8TϹBӮ6`Q:SZu:/A.xwUӴEk;nq+XP7bR\Hwarӣnx 9F/Z'vj.޺#ƞ~gk[jFNǦ r+K K'ny=UYk;]@%ZH ~v d So)[Ce~mm4xDwJ(+*=ORK*r|,'8^Vw Agc[[xڷFVA<7`Sޱ9[F,cH]OH/'^3 ]~|%)\[Lɪ BiN6?On}'}v?ZĻ?GYElfo5]fs y|:ʒWluasjSWOM:M|m!FiH}%ѧ`y~MtX浑Lrl8OE_?i?&O+G+7d4&eAМ|5WL?x;"}r DxLt\ˋ[_x?@Pehۆ'X#Q'i}|Fe_"_t_(xno5A6myw KqɯMO$:[4xz` Շ_W|rG?)o/IjMY֒H E$r0(I/UbgΖ֭aR]I澍o8_#([&UK_)7CͩGƷa`5qn0~^5%? EuNukI dm'?c_&ZO} CJc-m.n$E@")NGU81o5f:zld*-"[[}cI4_s_skៃ~rLoMYuV##}y_e?߳O%~Bޟ/~ݾ,~ Wh=դWx>PG~_ i?LAƔjFsb~? Zc>.Y^xŖL"i F8fcKtsmDҼ > g|cxNkH523pCHBQX.,?g q ?/Z rx:EΓk~KpIt\M 0 cҿU>x{g}?4 ưAw9i$ 0Bț.FIcUhQj>Is)żSsWhC\Gq c~Y.vQ%H?+2M4n'⼌n UЯ/o>-N9V.|V@_]_^?f Ml~8,N𯌵ܮr\8=n k?ß~Vw@ء' &vɯЯ&O/|WM>%j^;WOTm>;a'+ּ#|o aouŨۛ,{$R\ CuMjJvլXYU%psn|V?f}Ou #LZ6ךd-ms=夭nkfx' "x o4sxv{h.dVi|H^p?2R?~ i |Gzص5ŝה}y& hJeq%@x'O Rp&ݹٗӅLM8Tٴ߱O3o?I~,񎽪x֋$pW.P'>,~џ^i^<wC׎4][SIkK !$cDwR bXB` ?c<)[>8 v[UR]_E)B}#uE~Lm/a 6,{v-0G&w?'cD_?"/ot:V1?|Jf^[TA}62)m) \@cL/c$Gt~[A↿I#6LMCF u& ǨxE(u 6UXĈc,^~k,|57< kܼ63U6EReN$iDkq[}DFQ5Cu3KSwV'9ww+ҍ0#kklVvCLx-qnKs޿nl??|#˲KbN? nTLM*m|AM(МvpIz$Gi'أA?iF:$YĎ?k#(j/789u wuS,|3la}2=?HgTI8~C^y >Kxkw-_R|"o_^ۮfޕ}?vaq$f?;W~dW/?X( !mq1_j.=ջi?}_HG@v/=֗Zg'ĭ-ԭo?Zli$];V, 4~;%42,#[tum#؊QXogc]yO(mC8!ׯWO/wH>L}kRU֑I($|`I)t cj +VVfym>,c(=NHy{~ʾ9QvYxIΌ_%g*wWk洹8ѫ(UI2~[|ou/ ꗰΧqM ?]_lω:ď^2}~ך k|iX ޤjt(?K3~2Rٺa}}JT{?=O_>ς+?ۃSV ɼia^EW-7<7ݯ sT.5/&Gtg@y $bMwp7wt߇6 g_^ҺW `l{yٻR>O?ksi1`ՐhŭƗ CV[^F|K^go hOҿr༟oQl๟-⾾-%ǤYo*;ܹxWC؟sְ&U:qRN+/#3E~}-[bo~|ſtN_^6$\l\n_x:T:>3X%Ga295 ൾ`<障$`ta.ϣncBWNW_|Y\kUo ܏-j~t }KQ|7uc!?nF&utWZmBVo9o|aԴoٓÛ`5]֯2JUi֣ˀ0x'ٟo%wj?r?Zj }_uC~=|-x?3OLnhHmHwK1'ԓ^kO:?ܵ xoJzLLS*؇_w? nn}9Oxe_k=v?c+J3~O:pϛ Xc->o_O_aO'w̟j[U2n݇9}V}OeBhv}=G OfD?O,dWuh~I#]s+',?C|Y?j?Y|v&{I }I5BRљ?.oMx|H{xWX$T e J,Om;2x5|]O>:?i~.^_h2X@C+!OAڴڦ} 5UUKE׷c[W*;BˈeU6p{^8H}yUFnJ.Ď>SWc)bU7siXFexFQG7-F`5vrͶw?/xM. it 'fu_-c}|!3C&c}=3(c=k໥ņL<=k0!-j U]ncYbMh#$q9mKKuycjIO@8S&gX@S2%doINcvoꗎ^[;ۉ62` +|?Ic|sDŽUhluK$JƲ.H_ͲZ2H>R銒- ,7lncH:(kY%o~ڟqw(g~ ^DLJ|=l#!mWmH??&rx7kܶϯϨ?u_{aʉz/L,m9@2VsdρM g &σ4?S񤳸WFU8&oݫY +&QǠ: Բ0tܝWU:>oNXzt]c~_X(]gf=|`mĶo‹LHeF8s~&Ba?-쉧w?}'SO4/xͅլOI~hݠg]H Z=mg,JQyWAK/M#l۫ǹA-JʦO83 ySM?eLx"VBxiGtJ1 ~J<0KiϞ2ex7z:dtR7'yLP[6o#;x0x -ݸ!T2nӸ528TreO:}4??ߏ ~)h~4o%]zeGoa%n^+W~c/%w1_M4grVȠAOM%el+,)x~[نcFĊlpdži?d__SO/1t=K\|<:w2E6߫I ]_._/ =>(4xgMH,|-XHXtIcsUc1>t 'ZOm/4/PB=ŴrRڻZ~llt}>$#VV@6;Wg'cv'2_ǃlk_G3xP2x:(|} Ӓn%95q@K~Vx+MK:;7E[}D8Dog?v|#ˤ[X*_˵hq+4Pt(Sqӭ{N_WV5I.~EO-3- _<;xoGSnm̶rcv<ֿ(g/ otK|*_ŏkp۫+%ounh ,>Vq_ uBnf=LD~M.5XbvjUº*q9U*|-=5jKE;(?;+?kŚ4;YUu ]J9U!evE:mr9? Z/߲3.~"%G)GӬm_[xf91EHےI- ֧i.,bwcI6BcV*ı3kҩŞrǂj5Rl'o|g$sudכm1X0&B3XL.W s*^-i[^,R#a#*8~kxwM Gg!9S띵#6H6LŚFrNܒk8[ɿK/U|/I[ KAJ<3lPkO Ÿ ,|Vj}J-Y|.~Zdm`v1^YxXu 'TKkh\|<ªdp;I/3̲_ӥSV.G_U+ÿXWS\TZ+gj`9lqGVi|~ExEt}"[}n8 FWFܳ \Axqri맽+U Bh|}}i}mpWR/.4DsL9DZ~UrI|/_-3[Ar-muĎ'0` ||+ >_iv8Cufl`pe_&M 1#X )K__֍]?{|㯊 iܭZԭQeXeYI" ; 1en ~|d6 }>cO,_elq2/ʠ WV=LfUK(I=+~~Mڇ4}/˄cfp3p:q^$O_&KF6Wgj5;?4[,pI9 $@i6o8qM`z*@ v Lh!(>T:RY+q'8_a+͸=z?2'Rm' #?NQ({_;;?Z>0d4kx|p0iS6"]oZʏvn91? &-?ĿK Xç $XG;cUp _ SB2UX?!4{.ͳQﲉj:*(:WW? x"o^O?R;i' u5<4W\m};5䳅M ^G?u _>ӵ*3$:FD_hJ^='IVK8_7c3v cG}? Y5l'u/W.\ o6 :v hNί2I*ˁ{ MB%+VmO#n~X\Zp:ۦ cS8Pk?hkα6閉2# n;뙯ֹm/~w#}϶ >:60Ά9yyn8ݯbm gsM4xOwx*324j[r$3E$.$( aX`?&OԖ8a=c.~ :Οu0O6ʁ58Ͷ>f9>Ϫmniҗ=Ɵ7TOj;[Vo- 2)|!G𖗥,Nm7M5 Y! 7y渱YĠ'T*FJkK-?]ܿ/~;II|'=3H~>Ϩ3N8E!dݿXۓWώ~w|!&ھ▶x,C+{24 o,~gLٍҹ1&pKyRFqxW4wOvX;x'j>.ᖵRI_=G?YݼMr0\1^֫;(Eg+eXv8'e[,wzԭ<5U-"u_s^W8h>񷌴:c/n$)`y~3٫Jy3` cmigZ]iBtl;Y,k_50⹜^FU1?/O$mXq]Y+m?- ?79Uӑ^s_4i}P/FP11z}hIv\$hڼ~C.vT_dx_EhQ{o]B,=FXW .ڇyq^Dw99SCjqgd߸® #:~YM\~kk^V.o0N=:LXIm ۗHVNtڹX"KU[9ȫV<ŷ|:Qg |qɍح KUr +/}Nx>J{401Ul/sVcR1J NFv.|u7ͻRqs^ ?6[ {63E|WB?uw¿m\\;sֿ. 8#(6Ox15KZowhO>6H8W?~-l} Ki6">SwďO~'|)Ү#-5D{i/r'n7m}GrsTz__ϐޥJtO~vF/y3?󏎿n/ƯxV㧁I7kq^1Z]fa M=d"/ ' d^(Ҧids$KETɯأ [|%OĿo⇋4iM=}'nYpAN?IUkI6]Ϻ]pg6Ҿ|%I}lximoTNWQBO5 ^อJòIz%(˃(o_Xg|ۏKߎ>NA4Qu]7*yMwprsyt};ib%E`-~ÿV~@ޛh4G/w4ar_PI`ks#__>$|@_͡xn<.wVaUt6pZO)W7ƟLO:_O|B_uxbÚgfҤԧfbXYHbM#_쯧_?d?!5=mi'o.nc]Ccg, d~~ǺV]]v$k `cQKR׫̒Ϟ~~οm:o?xs#_ lUo"@+?)9ϋLwWMYq/:@Y#dO;$Ow_DV/oƒMb O~4Z3/z-5QgceKUdg7Ѹp98 .wgf%ՒIv~5Ua s3ŏ𵽬^k' !m6A85MTL55>&!/eVH/k x+TF{ GS{"+fA3n[b/wF?r8ojά~5Ioa?m?>ɫ c kg"MFpkogKw B_?VtI6yQ8}ko G|~MLI /]w-!yʂ2fe]pH7Wi}[ AHڇ&6^im&e|<\g˲jn2ݭl֚獵fx†*k:Etmt?8 k[?<;<}{axUKsڣc?,8 _جi 4{p WGD\zapVo^ikZ#zh7.-9{}ohpƥXh;cb +}v(x+~w^GٿPI漻 8M~4EU/.~z^o>/1(Q@ +Y.U=+}_̸8٪??~#?iߵ?τV4[[RO[:b#c'/' '~(/s'mW%֚u{=FHnmOr +fFo}~ȟ>??TDKhLN:upl6:p} ?~&|`U Zoy2u[4bD(BHs?df=Hd*8SZ);wћa3ltUVmtTx |?_~3h/QK=kmf|<W mf8l>4|*֬t\,9<ҿV/A}_?YaqO |14[cb"ERJ$uP[D,K ,?q8ǷuZ_zo6+O;?oo(¿٦'ﵯ6[x|,ʚjYq#@oÇ͌?_Px?ōkdZ4 _xj=7M Vg.n<`?h/8[y}u `W6Riwh40u@r0Iяq3 6KHWz͎x0}_*~֟g?'?/5/q ľ?%JKpkiP`-G#_|[c{q(n/:hzSK} roS[.x Ϸ'~cWLǙd)aMEi[T>GMJm&ۮٷSe3.s_9Qp&7 W:V:-ԓۡ;^qIF, ȯq??4be^i0 VA:SE!F0{IXV 2Mܓ2kt6DvwrAfIrr04dھN*I UVO;ڽ7ᕜ*UèSdfJ=d>i|Ud_!V1Ͽ5Xl}[cu6k#;P8W}5^(, W d}A~5ƿnulmbvQ?n皧O֗u$1(1bՖ>zRG/Cˏitz̶'v=F}3^}|,anI ,Fi wnc913ϧɍpym5?cCE˚-sA^ֺ@:uO׋bKՖ6f߁+ּG?Na|߸&A%t 3Fz#ZUҊ9q-kΫu3j¦ 7 J?%_xuΩs"<}#~t]1E:&qBjtپ`$ԝ;Sҹb`#Oa *poċYKOVrnk{u1Cm d9^Z|q &N,Ӂ]MI Pgn+?'i?wߑVK;~X]zZS|+EyZ-յ/xP CnKɷV*7."E{.87UxIs}+< ')0rڸF{ܿ>5='v?Y=ȟSku%6)z6}o/x9]6?Db,q2\ x}Ei߅?5(tTWVplv'q_expu;kˍ7ɸ)W ߈57፟uGCrII |*X|[Sk;r<0 C ^P\ɵY=O>*8Dfx{ź$x.|VA3cAH{,ՎK18& (FU֔{V< 9*]s_J XG?~$[2b9C"TJǭ~߳$_ IѦJk^ٹ`$YN+߿~nsOPB0^i)m"Y& waP~ZV=|enY+CxQ ;^#crnI_&}qωo_μ[ ew<>wĒh+jKxMα]n1zd՟8O g'|J+f%U~$~ѿP{o5okIvuk7l(na*HwM6FwG g7|?lϺA|u᷇lu >z]ՅZeI <ᣑd`H E|v_<^a _Ėzh%rT2SX:WZ^}ջ-|bվ3 ϮIS6pXogĒLŜ?3j7R|Gʙ}{Ur тE8/s+7?1?|jGY/qk0Ӵe}Z,HaFApAe}:'_.Aִ_o)$ʄ2J9vS_ShSk:0R8Cw${Zɬ?,msa>jI%M|5 7'P16yMyv3&ݹWOg -cǟ<}"/G_mtIoj4_) l#`_J~?P#Ⱦ,:|5%Ҥ`Kb4 r7`qN|)W;Qy_M&C_ʭ_WoG"|H:z]c]_y4Wn7llzmwuM{{o-3e姇WaZۋ3I2,aAr .~5|:ּ#u<7;94OOo-Rp$Wy 7Po$`ӭ)XٴWH#س9Z?۟Ѽ3_Z\tcZuP=&\+& 0A+5'_O4a)%$R9RG5eOVptxJ9 [P׵j_>+y-QbS,tti_+ȼD|.}i>񅷈tgfFk PNC zʸEɌ|k6W-|sWĿҧNIzYw'K_> )$xזNMJs?zo^۳$NvM G?CVoG| XŬhžcY|]rJhU~?~~.9OvKPlEW-;ʐMNu X E o-# ۾W¯~=~F2s֍cw_!cQL g9yd \{Ի|U76[˟ZL-4ۋ['`\rbCSQoVqFR)ZS#~O_qkvQf:u~x d_>?jş w/ÿ  ޠ_k]""348%6(󳊊9f^>ΦoGl~I&uHe}cῆm/{Wů q\G *ʩ"J0V9nHko|TSO xwlt֗gTѦ]B٧ +I"F]? uf◉h5h|]]oV-t +լ."p! oO~ǏCPAti)}^I13_hr/3?UVe_/{]k~~ßa/:^]>C \xO-e^9:G 5d *9;nooVk 2ygESୟ~=6q x?^ro~(>h5{Iky.0K̐ڣw-?3+WǚLa;f.F71g`RUiJI&:YRxJ?m?xx/WK\|?>½:/?6|qmyu`ךvL1NOu]rܑG*~ο )/'hkQM4RNCDѲrdk.`k1?~zMGf~f{<ڵJpٍg!H XɜmhdoG_7^kLky^nٻn7llzĭ~÷{\ C=Ȋ_-UF8,O_s{Ikk [OcwseV}L,aAr6 g~ufAҾ*=pՓMI[]6o#e$gtf|1WXY,6Uv8[n?cf̡9^Ue!Hd8ek"i\drǵRiI0V@puc]VEhٛuS)a_-*FLwSVͻQÜz*:g6 *u[ۮ+ylIyCVq_Z+pܫ]xB1^}I:yRg`T=oIm[T˒:͏_RkK[xd3e /轱^`ʽk,TSJЬ-?gocMFo^x]Mo$&ݗĖ!7/nJv=ľbr_ӊsI:UW=>꿻}ppsR05m=Z˽Λs=i,mmcRAy#lQ #\2>N)Hi>.IUs`f'ʃb9,qctnH`+quӖ>%PMh]Zծu 8n4ңlǑQ;1kGdI/6@+2ڱ\c#&:h5 S1K3? 5UcF[Q]Ù1xRƺci[}ou%VG3^[Q]sP\\7ݒVR3ۤwaS.pݙY4Ջi]axN(sҊ*I]7;3OkBjRj ~òvQg ct:B"3|>ljt xI)gCD6y%-C͖'qf2ޛj?`hSX$VMN~߳dsxO]K3&uKE, JIK֊ ͤ:U,R psQEv`qU"\ݜnޯ]Y΅5՚6W~6f#]J&`Ū~J.rke? |K߇ K] -f2 qEETSW}ާ㘭Tgًᯌ|CyjW]hI4bcy_ 9oDl,4&afڊWqpLKSTQElD~?n| ^H Ӗz9_ُ2DO~ewUb 8>Q_ETRvgӓdul;~>%?gy C |eéIj.$xm7DEXJtyw菤ӃѤve?YE9gTc y g^;/߆ZngPSO.(u{E?8(}? m>xuƱrJ|3 }OGGl\Fڕ.\y}袺+{Jz3*[Ls3ʰʧ/6?f$M%ׇg 9sZ(Zp%xz W&)6gx_i/Oh|;ஓ$a5['|sJkvw~mvXiib#C$gU9~y-BLgSV٤N ÿ 5O=(j޴]3hkAF%u[_PKլl >Hty֖[}ks\G{&YfXg)qڊ+УZt0m'{ePNS&m&ףg^I_|?XM㴎9x{+ꟁWv2^|#qq#]ƬxzՙaINQ_;V6V|JpsH E%궿_g2}/ Z$9ȌW\~ÿt_ŸpMjcnBNExn!(9oaS}<%e:,%``#6mM֝3mZiC@np3E\EW^) Ow>ot->[,IS Fkφ6Q>N (RSM8&fמX mEJUxΤWfjWZ$9+6E&pEZ)7o#bayestestR/man/figures/unnamed-chunk-8-1.png0000644000175000017500000017607614133140641020534 0ustar nileshnileshPNG  IHDR `gPLTE:f:::f:ff!!!!!!!"c"c"d"e"""""""""""$c$c$$$$$$$$333::f:::::::f:ff:f:f:::LPLQLQLRLLMMMMMnMMMnnMnMnMMPMPMQMRMMMNPNQNRNNff:f:f::ff:fffffffffffnMMnnMnnnnnnnnMMnMnnnMnȎ:ff:fffېېnMnȫff:fff۶۶ȎMȎnȫnȫȫې:ېf۶f۶۶۶n䫎Ȏȫccd d e c c d e!e!! c d!d!e!PQRQRde c cPQfȎې۶QRdeėl pHYs.#.#x?v IDATx݋mY$EX,)Qb%ȅT@ 0*;(,aU & "H#"ps2G:̈&f{˻zﵾb3O=*{_ݗ$IjH$Is$I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ $I@lI$ too ?G~~h=~#m$I },O_Sط~ځs I|6x9|a|mx*'}ǐ$Iwqn1^mϽo*[~ I|8ϩ&┯}Q[~ I|g>;'xjོW5~4ȓY_G 엍x{u`Kss9{;aOǯ:v9%Ip5k^y ?[$ wa{5Byt`K$M3< ,D{#lSؒ$I#?-?xgٷ->|'~Q/>K@^E/%'veyo$I ?s_:K|ݏzvr./B{KcktG!}ݯkm^x"1*ID^{c8}}_4xNl3DW+<_'v1{+l)3u0m$I\~|g2zAd/F\i/쿮Kr3Gzo6 L obT}C; 鈁::1~[%I(6w!`Wˏ#Dn>fwv_ѓ_r{;C{`J}o:Cm$I\~ng7kw3?>7_[}8o$IEԙ}97<ɸV v^vpϽW?(':oң%WMɇlG?H_H;YxG;|oxÿ$I3tWl=w|Ϗ-fwwfw,_}[pၽ^wq`Ȇ}&دgoCbxÿ$IKg[r[~JYV߀[b_kポw}9?4=\|3ҝu m$I\z/OmgOur.#D6O;gP>=swi`OuYV?dO ;v''I b0*ID}={luev!.o{0G?pSLL?>}`?l'=Bk`woλyF|[%I(8{~Ϯ~ry6pC-O__9{? d~ 뎆,=Bk`w&-qo$Ivwe=ϭ_{>>;d9; سĶG=8/u7kyy=Ǽ_]|t:|g̷U$rA{akt֣BG}:úϡq=zn5w`t~=#VI$ʅ {gW-Do/J$Q.m`<9'غmG,Ͽgx1;>v88{bI$ WYT<<.{/Ľ|}tn~ϗWO/;{S_*}ʁ5o}VI$ ռZΤm깤o{f9ٞ=2=xpqސ}xJ$Q.m`ЙWwZz;CjaoCHxӀ|+T& ސ؟`}1*ID~RtFS&og~|gw~|a>eٷwv/y!Go1[%I(4l3b2=sH5+%ݵyBp=>{z/ya7Nh F/~ i%;;m$I>l3W{=͠<O1~έ>r`6֟'?|`-9o$Iγmyo|G^l~Pο 'b`Tm>)Ϳ>3{B#^o1~[%I(8;ݛ_'^[=2`/ߺ}d󁿼Y[{O2yoՑgÇ~{C#]c J$Q.n`?'w.].v?q[='=׏H>)qH;~[AVI$% <G/{Wfl]ۖG߻_䑟S /'~m/+ȁG 컾1*ID9ہ _p$ID* L}$I2TY(I$ Lf$I~5%I$Ҝ OIo7ͳ`n[5S޸˷;ݷ|wo|?[?ʫ_^?ĵOnN$IJ[iV{{K|rLqkA w;[;[%I?; 8?oc;>zKا,!O8%I҅Iط/Csz}umGYLjy`wh`>ڤz`o?F!f`o?FlIti&Z<~{G.;vb`kޑү;o1_u?؋v߶1d(p!*` 2 Vnc|+=kbׯwt11xow oy^1畝}z\?4v`;'>O;S%0CU$!*jeܖ;aM߾tmo^;?Š~޾qo`/߭{;o{hQSkwMm ` fZ#7yw߹f؟ZӋ7}^OlGYnś^9~`?ou ܶf3XEB ;+9쏅^*lˁ[Y,GGN_zz{`nkx57K{>o,Kߺz]ȧ>Z='gſ{_߬-n[3XE"VȅiB?nĘ#.Lje큽\{W/؟{Fwb o}طo^=?l IDATqwWo[ۿ>D7 ܶf3XEB KuaM9j`w#߱=o|u}b$;;H|p/nm+W{ͫ?Z/~]Ǔ|f$pa)p!*` 2 1='cDC@&]aM9j`w#|AK/9>FdQC_n>3eZ=d73Og>YӋ?ŤY?N-" VP+`{^ Gt^\O~$aM9j`w#|ۓzo`}"{ﱴxtݯ}O?yW\W~R-x}a9aP%0CU$!*jea|`/f/~]{?^Kv^ȱuXcطYopsw_طYl vOu3Obد\O}i9P魟Nz{/FOER]ۖ V`Ha0ex`ȍf"0G\Lj|a5~Tw;z`>Fs|uoK{{hZ3u~hytw`߾ӷ?VGۖ V`Ha0e}W7M?~KEѿ#aM9z`>FVwo6{G[Lj|voW ݁zf8/?o3mٳ׃?؟m ` fZSv}~y'=ϐ:1G LjfpoFֳ_`߻7yvpӽcB>;oၽ|<f`ߺ}g{Hۖ V`Ha0ekb7~ތ^?zw`7K݁U{/CKg:_=׿u;y}s`mK`H0CU$0#nw7 OwO_sv`~1{)3׿];OQ%0CU$!*jeLYvW^>;>v`WO) 僫_sX[n-o'i) ܶf3XEB )nثgy6ہͯ9R{oM;_p5{o>Ȧ&}Sx' ܶf3XEB )nثLjtq[O̷_s1"uFۖ V`Ha0e {l>mn75Wy6{*li^3cO/`vyv~] ܶf3XEB )n9^ ?Q֋,g[/4M >?z?;/9_f`_`f>Zۖ V`Ha0e 0G؛g 6{"i=p`/͚կ_grJ7#orJf`o^Hf{`?}cm ` fZSցsہm3l[v!܋] .}3/ҟ,7݁7O,3?ħ3Uw_ I ܶf3XEB )n9bg`{Ybts{}dgWOw;|?Ѓ{{9_o7/65W?^S_m\Ss_7:OQ%0CU$!*jeLYv;{`^/LY^>z{^֓`<\o}헙'~<[oScm ` fZSցs^=d-7ks{p׬lSܾUOT_Ϳ^~S ; hrV] ܶf3XEB )n9bw`߫L788Ͻ!~0E:V?="V?ͷݼO=۝{jZ3?a$Cۖ V`Ha0e  ]92kmK`H0CU$0Ҏ7xaϾ& ܶf3XEB )n oؕI̾& ܶf3XEB )n lݡmK`H0CU$0絰g_ n[3XE"Vu`7szV {5P%0CU$!*jeLYv9Gt`Nn[3XE"Vu`7sDT%0CU$!*jeLYv9Gt`Nn[3XE"Vu`7ră >9kmK`H0CU$0MY==d(p!*` 2 {{5P%0CU$!*jeLYv&kmK`H0CU$0a`_Ğ}M2m ` fZSց@O$Cۖ V`Ha0e d}} {5P%0CU$!*jeLYvYGs`_ž}M2m ` fZSց@g_ n[3XE"Vu`7vgחg_ n[3XE"Vu`7v?/yaϾ& ܶf3XEB )n vv/waϾ& ܶf3XEB )n 7/uaϾ& ܶf3XEB )n  ]سI-" VP+`:9G\سI-" VP+`:H9' \سI-" VP+`:H9IkmK`H0CU$0M96ȁ} {5P%0CU$!*jeLYv f8;/oaϾ& ܶf3XEB )nM}M2m ` fZSցv7=/naϾ& ܶf3XEB )n`#vfkmK`H0CU$0&?[)p!*` 2 ɏ8l`_ž}M2m ` fZSցGSe-$Cۖ V`Ha0e L}kmK`H0CU$0&>dv`Gۖ V`Ha0e L|kmK`H0CU$0&>☁}A {5P%0CU$!*jeLYvq{0;գmK`H0CU$0=⸁}9 {5P%0CU$!*jeLYvqg//daϾ& ܶf3XEB )n`7Q%0CU$!*jeLYvؗg_ n[3XE"Vu`70} {5P%0CU$!*jeLYvq*;գmK`H0CU$0;"5/aaϾ& ܶf3XEB )n`#Xl(p!*` 2 Ɏ XسI-" VP+`:lmK`H0CU$0:⡡υ=d(p!*` 2 ~`؁n[3XE"Vu`7q} {5P%0CU$!*jeLYvmK`H0CU$0#N=qaϾ& ܶf3XEB )n?b2v`Gۖ V`Ha0e G~`ž}M2m ` fZSց~D)p!*` 2  kmK`H0CU$0#bzm ` fZSց}Ĕ}v {5P%0CU$!*jeLYvmK`H0CU$0# s[سI-" VP+`:ػQ%0CU$!*jeLYvW5laϾ& ܶf3XEB )nM}M2m ` fZSցZسI-" VP+`:`v&v`Gۖ V`Ha0e Gt`k-" VP+`:`8jaϾ& ܶf3XEB )n=[m ` fZSց{́}N {5P%0CU$!*jeLYvmK`H0CU$0#ޱ}F {5P%0CU$!*jeLYvmK`H0CU$0#&YسI-" VP+`:@pn[3XE"Vu`7с ܶf3XEB )n<]{gg_ n[3XE"Vu`7@с ܶf3XEB )n,$Cۖ V`Ha0e 8 I-" VP+`:}& {5P%0CU$!*jeLYvs ;գmK`H0CU$0#:5Fۖ V`Ha0e Gl2caϾ& ܶf3XEB )n<[cm ` fZSցx6,kmK`H0CU$0#:5Fۖ V`Ha0e pG{ O3aaϾ& ܶf3XEB )n;[m ` fZSցwDF)p!*` 2 ,$Cۖ V`Ha0e pGt`k-" VP+`:xj`_سI-" VP+`:8n[3XE"Vu`7pu @-v=d(p!*` 2 6a5P%0CU$!*jeLYvlkmK`H0CU$0} n`^سI-" VP+`:Hn[3XE"Vu`7@ng_ n[3XE"Vu`7@с ܶf3XEB )n:[#m ` fZSցuž}M2m ` fZSցtģ6[= ܶf3XEB )n:btaϾ& ܶf3XEB )n:[cm ` fZSցt3-kmK`H0CU$0#:5Vۖ V`Ha0e 0G ׁn[3XE"Vu`70'āg_ n[3XE"Vu`7pum8a:գmK`H0CU$0؄$Cۖ V`Ha0e u`.$Cۖ V`Ha0e 8 I-" VP+`:}6oN]سI-" VP+`:{S&[= ܶf3XEB )n`Ol(p!*` 2 GkmK`H0CU$0']zm ` fZSցvž}M2m ` fZSց݀0d(p!*` 2 qV-" VP+`:wng_ n[3XE"Vu`70]zm ` fZSց#6vQ%0CU$!*jeLYvxm1skmK`H0CU$0V-" VP+`:wӶng_ n[3XE"Vu`70'N]zm ` fZSց#8uQ%0CU$!*jeLYvxm33kmK`H0CU$0FĥV-" VP+`:ung_ n[3XE"Vu`70']zm ` fZSցյġV-" VP+`:3O {5P%0CU$!*jeLYvlkmK`H0CU$0؄$Cۖ V`Ha0e/~{챿ӗ{¿lԝl`Og_ n[3XE"V``[yӏ'?ԙV-" VP+`ˏ>{Sgn=ž}M2m ` fZS_yG⇯;1x w`pu`Gۖ V`Ha0eY ZL`C]aN^l(p!*` 2 ,=y/G?~<p`Og_ n[3XE"V:z(zԁ=#ׁn[3XE"VCG߭kb#xm9]سI-" VP+`{_~챿d؋h|kb#xu`Gۖ V`Ha0eၽxz; -g{7[= ܶf3XEB ) 쯼7Ok׫{n_Ԝqg[%I/}`/~ƽx{'[$)?; {CeWW׆'[$)W^?@ā=_>ql$IgҁD§[i-ym<̾& ܶf3XEB )3ρ= paϾ& ܶf3XEB );;Xy`?ׁn[3XE"VM}K8ہ=ׁn[3XE"Vov^ϻ~lcܟ7fOt {5P%0CU$!*je,bx`o`zG<}:գmK`H0CU$0)Cnҁ}ooQ%0CU$!*je bz`wf}ׇ{=d(p!*` 2 ,=y[;[zm ` fZS؋Yw~WC T {5P%0CU$!*jeLY|`/p`0`:գmK`H0CU$0Z7-m8`:գmK`H0CU$0 /~_~ͤv`o| D {5P%0CU$!*jeL)xÚ#l(p!*` 2 kqV-" VP+`::s,$Cۖ V`Ha0e 8 I-" VP+`:(7'YسI-" VP+`:z!ցn[3XE"Vu`70C[= ܶf3XEB )n`LS,$Cۖ V`Ha0e =eV-" VP+`:z!ցn[3XE"Vu`70C kmK`H0CU$0qаu`Gۖ V`Ha0e gm`?52Q\ n[3XE"Vu`7p3>-$Cۖ V`Ha0e ~F8kmK`H0CU$0N?IV-" VP+`:8'Y }=faϾ& ܶf3XEB )n#ߴ4 {5P%0CU$!*jeLYvqu`Gۖ V`Ha0e \];l(p!*` 2 Lm ` fZSց@с}p_;*p!*` 2 ۟`-" VP+`:8#F[= ܶf3XEB )n#l(p!*` 2 8b:գmK`H0CU$0N>Q{NWr<;n[3XE"Vu`7pGlڳ {T#V ܶf3XEB )n#l(p!*` 2 S8fҞ]RT%0CU$!*jeLYvq̢=}vT#V ܶf3XEB )n#6;{Q)Z)p!*` 2 S8fΞ^aT#V ܶf3XEB )n#g>G,$Cۖ V`Ha0e z1sցn[3XE"Vu`7pu3gQ%0CU$!*jeLYvہ=|aϾ& ܶf3XEB )n>n[3XE"Vu`7 {5P%0CU$!*jeLYvq̜u`Gۖ V`Ha0e z1sցn[3XE"Vu`7pGCkmK`H0CU$0N<9V-" VP+`:8[= ܶf3XEB )n#0.$Cۖ V`Ha0e xQsցn[3XE"Vu`7pGYzm ` fZSցG5g/a`[سI-" VP+`:8[= ܶf3XEB )n#l(p!*` 2 8j^g_ n[3XE"Vu`7puw5gQ%0CU$!*jeLYv݁=daϾ& ܶf3XEB )nഁ=n:գmK`H0CU$03Q%0CU$!*jeLYvqܜ=`aϾ& ܶf3XEB )n#l(p!*` 2 ӎ8n:գmK`H0CU$0N;9{)=d(p!*` 2 ӎ8n:գmK`H0CU$0N;9V-" VP+`:8 kmK`H0CU$0N;9V-" VP+`:8[= ܶf3XEB )n#3O]سI-" VP+`:8#[= ܶf3XEB )n#4O\سI-" VP+`:v;l(p!*` 2 Lm ` fZSց݀>maϾ& ܶf3XEB )n>n[3XE"Vu`7pGYzm ` fZSցIG9g/j`g_ n[3XE"Vu`7pGYzm ` fZSցIG9gQ%0CU$!*jeLYv'q䜽}ž}M2m ` fZSցIG9gQ%0CU$!*jeLYvqu`Gۖ V`Ha0e rıs {5P%0CU$!*jeLYvqu`Gۖ V`Ha0e rısցn[3XE"Vu`7pK/$Cۖ V`Ha0es`9{qu$I#9vlI\9[b}7~kmK`H0CU$03Q%0CU$!*jeLYvl(p!*` 2 S8v^>vaϾ& ܶf3XEB )n#l(p!*` 2 S8v:գmK`H0CU$0N89{ȅ=d(p!*` 2 8z:գmK`H0CU$0N89V-" VP+`:8ሣ%kmK`H0CU$0N89V-" VP+`:8ሣ[= ܶf3XEB )n#9ZسI-" VP+`:8ሣ[= ܶf3XEB )n#l(p!*` 2 kG9{=d(p!*` 2 Lm ` fZSց݀ہ}& ܶf3XEB )n=~^>baϾ& ܶf3XEB )n#l(p!*` 2 8~:գmK`H0CU$0?9{=d(p!*` 2 8~:գmK`H0CU$0?9{΅=d(p!*` 2 8~:գmK`H0CU$0?9V-" VP+`:8kmK`H0CU$0?V IDAT9V-" VP+`:8[= ܶf3XEB )n#"0g/z`.$Cۖ V`Ha0e_G>棵kbGl(p!*` 2 1 4:}D`^[سI-" VP+`7iĎ>"0gQ%0CU$!*jeLٻG ;kbGl(p!*` 2 1"n6?O;kbG>{kmK`H0CU$0=M Ou3xH&v9V-" VP+`s_&v9{;^سI-" VP+`ʞJyCE|aM#sցn[3XE"V=ҟӽM&v9V-" VP+`ʞ>v؃Z5cHًž}M2m ` fZSԁyʾMÚ"cޏl(p!*` 2 I#7%xr"/Úہ}& ܶf3XEB ){~כY=9;GMZ51);ZسI-" VP+`9]uXs`;Dۖ V`Ha0e؝טy[O5̔>uX;ĜkmK`H0CU$0w_=/Wρ=G$[= ܶf3XEB ){^l5,e}`/$Cۖ V`Ha0e{LϾ~Óz[&v`-" VP+`O^?6vNkĎ;"3gg3kmK`H0CU$0EdrÚqGd[= ܶf3XEB )n#2sցn[3XE"Vs`/(+l>^&v9;g_ n[3XE"V{`:wDf:գmK`H0CU$0;"3gQ%0CU$!*jeLh`}8`3; {5P%0CU$!*jeLh`ncz3c&v9V-" VP+`:8̜u`Gۖ V`Ha0e wDfi`o/$Cۖ V`Ha0eÁ~G؃wDf:գmK`H0CU$0>H[ {5P%0CU$!*jeLYv^̜u`Gۖ V`Ha0e 59V-" VP+`:p`݅=d(p!*` 2 Lm ` fZS/}|aM#Bsցn[3XE"V=f`'~ݔ~ߘٙ ž}M2m ` fZSEef^>~DiĎ:"4gQ%0CU$!*jeLٻ37 9ѡaM#BsvnvaϾ& ܶf3XEB ){^T Y/=NJsuDh:գmK`H0CU$0w_^NfJ:uDh:գmK`H0CU$0wm~~}ͮ~|GakbG7 {5P%0CU$!*jeLٻCnqt"el'iĎ:"4gQ%0CU$!*jeLٻ U|`jV/sakbGl(p!*` 2 1{oMā=1GfaϾ& ܶf3XEB ){}{cHYzm ` fٻ%Lӱ 1nEi$g+*1DJn QTD#Uh)٤."1(]rB',nc#9O}rf[EB ){|zU 2\"5g'/nR:5%0CU$!*zeL٣Cίv`? 8o9 N[3XE"W=1}77^Яrk0D48m ` f^S/w"ҙ_4{Z$CӖ V`Ha0egD~O8'iK`H0CU$0ח^}GcS`\"5gڣiK`H0CU$0¾ի }@dKU_58m ` f^S}9;g yo/]T51HYhp!*` 2 1{˿;9 N[3XE"W}P`\"5g/iK`H0CU$0\"fڧiK`H0CU$0G\"4f/Af3XEB ).p%2[[{58m ` f^Sȁ?u7GcuXq̖}^ N[3XE"W=f`Sgۮuf;-bށiK`H0CU$0G %Ddʮ8GӖ V`Ha0ewvkG\"2eW/I-" V+`7_6}ԁh<b^r`k-" V+`ؗo`a(a v`-" V+`/4q]́M $CӖ V`Ha0e _b ~ǩk0D48m ` f^St>NuXq N[3XE"W=f`?!2 s%CEʋ5-" V+` vl%0CU$!*zeL٣>EgY/q19شf3XEB ){`Gk×m^MiK`H0CU$0}c[a v5;vaCf3XEB ){x`!ha v9;r^)58m ` f^S}eWom揉%k×Yhp!*` 2 OC?Z:_"7g[څx N[3XE"W=_{蟾?~|g͛ϟuBuXDp:GӖ V`Ha0e/ū|e?~ws{DuXDpN7&GRӖ V`Ha0eue޹_loo_[no`o/줗mF N[3XE"W/_[/^w_}1{?!_D48m ` f^>~`_.W//]sѿXп۩vgv˟|ہ MiK`H0CU$՞зɫw/9ЏwՂ?~ +ܚr`o.Iڔf3XEB ;Wہ}56۷wOgD́p`Mo^58m ` f^>v`w|h$x {m=Q;@o^58m ` f^+^O~6 |{~qw%;[{48m ` f^S^L㳯~6/rqCw~s%;Z~-" V+`wzm`_7`>{ ߺs3F]zI$)?E}D=bo9%Ir3Ť^{(7=r9 g;o+V–$I'g>rbYtf3XEB {p`| n`?xԯU5ءK\>b.Gjp!*` 2 V}v)ہ=_񭏛%.ްkڣiK`H0CU$0[gn~q>{kCxîq`k-" V+`ؗ?x#sW_x7|J3ӿ0.q]d`iK`H0CU$0G zvO^!KX:a9GӖ V`Ha0rƼ~%x>deM"_ָ9'H%.ڣiK`H0CU$0G 싉w?}p 7ψ\ {P&{ګI'H%. l48m ` f^ȁo}UcVx _˵7T5ءK\`78GӖ V`Ha0կgͭ7~b^??]`.q N[3XE"W[oP= ߷kpnj3]؏%0CU$!*zeLY~`_~ׯxf^;p=l%0CU$!*zeL#O7>o?Ͽ7j|BvuXe>ۅ N[3XE"W=4W#:VAE>hp!*` 2 bgOs`;ODӖ V`Ha0e_l}ջFa 2]%0CU$!*zeL}Ikkgcs`;ODӖ V`Ha0e }_~^g{,P`.qva?XӖ V`Ha0eO^P?V,T5؁K\pBW`.qρ=f3XEB )7w녽o0a v-Z lC58m ` f^S6ŏZ>\dSa v-ڣiK`H0CU$0HuXEsK~-" V+`F93a  -l%0CU$!*zeL};| dЇl%.[ڣiK`H0CU$0^ށ=l%.[ l48m ` f^Sv7޿$T5K\4<8GӖ V`Ha0e_ṥva?HӖ V`Ha0e_Ł=f3XEB ).*o -l%0CU$!*zeLYv~-" V+`: 8'iK`H0CU$0ۅ N[3XE"Wu`hp!*` 2 K\;9GӖ V`Ha0e ؏m  x N[3XE"Wu`{v6hp!*` 2 K\;u.58m ` f^Sց]`%.ڝڣiK`H0CU$0~OoӫzMuXEs[{48m ` f^S6صl%.ڝZlf3XEB ).ml%0CU$!*zeLYvhwn9]Gjp!*` 2 K\;9GӖ V`Ha0e^ݹ́=f3XEB )./mM 8 N[3XE"Wu`p`;ODӖ V`Ha0e%0CU$!*zeLYv}{ܡvaiK`H0CU$0.q=f3XEB ).l1f3XEB ).ڣiK`H0CU$0.q=f3XEB ).lf3XEB ).ڣiK`H0CU$0.qܡva%0CU$!*zeLك7 k}hv N[3XE"W=8ɳ~ Gk}hv N[3XE"W=f`=|wf -" V+`7/|𳉏T5ؾK\4;wp`k-" V+`~g7>h?T`.q=f3XEB ){̧ӷc&Aa J2Esۅ}HӖ V`Ha0e?~HuX؋V.l%0CU$!*zeL٣?7^د|eƮk0vva%0CU$!z:/K IDAT*zeLه|/|?6́> N[3XE"W}79˷sV.l%0CU$!*zeLهUn7/k=hu|`kp!*` 2 ^{QsV.l%0CU$!*zeLه쵏!?X`{.qܥva%0CU$!*zeL ޮ~Ok=hu N[3XE"W=~`뛯p-ґsV.l%0CU$!*zeL#~}s/4-sV. {-" V+`3׾cg7><~HGl%.Z8S_ڧiK`H0CU$0?rhGsV.l N[3XE"W=8WhCV#R`{.qŁޣiK`H0CU$0G{W ŗhtv`%0CU$!*zeL٣G_}kW5X|FNl N[3XE"W=<.l Es'{-" V+`>T5;w`C N[3XE"W}~AU5ہ}"f3XEB ){OG}ߨU`lhp!*` 2 Q?xG}/qɁ>! N[3XE"Wu`/qɁ>! N[3XE"W}~́D|FNK/%0CU$!*zeLp`ӯ]}K\4:wr`_}-" V+``s'I:K\4:wr`_}-" V+`;#C/qɁ}%~2f3XEB )W,Es'Whp!*` 2 rw/pCzw9wkΝr_⧢iK`H0CU$0,Es7hp!*` 2 @x6nkɯ%0CU$!*zeLك{ȏ| /q́}#E~f3XEB ){x`Wkm7_䧡iK`H0CU$0a6n/%0CU$!*zeLh`揯7&Gs`W l N[3XE"W6F73́> N[3XE"Wu`p` l-" V+`: hs2? N[3XE"W _{߹.u`?Zx6n;/S%0CU$!*zeLY?E@x6n5ɯ%0CU$!*zeLYvmk_'iK`H0CU$0%.ڜ9$O@Ӗ V`Ha0eK\49uٯkp!*` 2 @t&g.>y N[3XE"WM{M]p`o~O]Ӗ V`Ha0e}^~Ehr_S%0CU$!*zeLYvM΀{CK}f3XEB ).]p`o~O\Ӗ V`Ha0e}^~Ehr؛_%0CU$!*zeLY?E&g)>q N[3XE"Wu`p` l-" V+`: 8'iK`H0CU$0^%0CU$!*zeLYvM΀W58m ` f^Sv79^~Yc~Ehr[_S%0CU$!*zeL}}Qb9؏]p`o~OYӖ V`Ha0eK\8#- kp!*` 2 @pgā-?] N[3XE"WM_}x%.Zt58m ` f^SO)\q`-" V+`: hqF;'iK`H0CU$0%.Zjp!*` 2 @pgāKK~f3XEB ).\q`-" V+`;_ a \q`-" V+`7O~[a Jq/ZNjp!*` 2 1%3.vuX9 JӖ V`Ha0eコ5_D48m ` f^Sվ~z뭷Ẃ> N[3XE"W=8_~-o.GuXhqF؁W$58m ` f^S~rNa}9}uلޯ:v_r`_S%0CU$!*zeLكk%}gȁOPӖ V`Ha0e ˷?wvuXE3'iK`H0CU$0AV?$R`/q 9# jp!*` 2 1{kKCRuXE3iK`H0CU$0<"E3iK`H0CU$0GgW }gȁONӖ V`Ha0eczFU5K\CX+rf3XEB ){|67:v_⢭r`ҟ-" V+`ؗ;kWev_ⲭq`ҟ-" V+`ʆ_߽x?]|:v_ⲭq`ڟ-" V+`Fyګw)"[ bOk/Ixl$Ii[ bāWRf3XEB ).v`-" V+` a Nŀ{48m ` f^Sց]`%.9+xOIӖ V`Ha0ev^⩸48m ` f^Sց]`%.;9(xOHӖ V`Ha0ev^򩸓48m ` f^Sߍ2y˧NC p:f3XEB ){[k~Տ[`%Sq!8 N[3XE"W=b`yTŁ}PANFӖ V`Ha0e+g؏-" V+`_MGyT}XANEӖ V`Ha0e_|㍋x#>#hIl%"Sq -" V+`jO.~_ODd*ns`VPS%0CU$!*zeLك{?jibˇ?XuX,G(ihp!*` 2 졁K7kzҾط́> N[3XE"W=4WoW?ͯ?:k®k0 iK`H0CU$0 Yz/z`w(a v`-" V+`?W[_=uR>JE NAӖ V`Ha0e6 Y1l%RK>Q*jp f3XEB ){9^o}1"v]"s`'iK`H0CU$0G}Lլ{zv]"q`'iK`H0CU$0jV_'luRǁ}-" V+`U||#6l%bK*PiK`H0CU$0>;؏~_?}lduR>RE58m ` f^Sfˇ~uk]-McU4\Ӗ V`Ha0e쫇D.߰~n`}{f%KqXM(%0CU$!*zeL#/|_M죕tXӖ V`Ha0e/GW[ZkH 죕tXӖ V`Ha0e^ka ɡ}.kp!*` 2 v[+)C-" V+`: 8'iK`H0CU$0 vӖ V`Ha0e/.g/ہ}"f3XEB ){G>џ!2 ١Ɓ%u%0CU$!*zeL٣>Sg>6]qP~:Tjp!*` 2 W{;ȎU5؎KCԡRӖ V`Ha0e_\gz|}:v\"<8f3XEB ){p`[߾\>0zqP~>jp!*` 2 ^rr? _uXDz(r`?LI!48m ` f^S!ş}bɀ>:v\">o8uf3XEB ){h`_]{觰lxˁ@5%0CU$!*zeLC{n {kKw TJӖ V`Ha0e[[zlxÁ@5%0CU$!*zeLcq`ö/߉7UӉ" N[3XE"W=?zDdU5%؉UӉ" N[3XE"W=c]U`O>;jJQiK`H0CU$0k[ɷkś> N[3XE"W=U8'iK`H0CU$0wo?^~GGk0IN iK`H0CU$0{o۾!;jZQiK`H0CU$0؏}Cv%#ԢBӖ V`Ha0e/qN\q`?BM-*48m ` f^Sց]`#ԢBӖ V`Ha0eH۾1;qŁ5(%0CU$!*zeLYvK4W؏QT| N[3XE"Wu`غA3qŁ(EH%0CU$!*zeLYvK5_8f3XEB ).uf #5#]Ӗ V`Ha0e?+(a uf cU#[Ӗ V`Ha0eؿ2~cW5%*F-" V+`3_~g`Ċޣ:.qLt`?ZQ758m ` f^SK[2o9YEfiK`H0CU$0;gꭿ#Ẃv%0CU$!*zeL|G77^]'k0D48m ` f^S'/'T5)  iK`H0CU$0{GXezۺā+сhEH%0CU$!*zeLكkcG:.qJt`?^Q=258m ` f^S^7ca uGꑩiK`H0CU$0w!:_ȑ~~$jp!*` 2 K9 T$OӖ V`Ha0e_ȍ^ yf3XEB ){3[诚k8r#:jH-" V+`)"_ȉ^!if3XEB ){`{ {%E*iK`H0CU$03"hO'Dfa_ȅ^"Yf3XEB ){p`__}/Kφ= sG.D2UI%0CU$!*zeL^{㵏\zӿ0/q@t`/SՑ$ N[3XE"W=b`쾏+z[EсPUIr48m ` f^S}'k0D48m ` f^S}?x:%0CU$!*zeLʻot;:Oh`Ͻf3XEB ){x`?vѪ%쥪ZiK`H0CU$0G}+>`a vCbe=I%0CU$!*zeL٣*n>ZuXݻġЁXYO48m ` f^S'Ξ}Ǫk{8t:++x N[3XE"W=f`;p>VuXݻġЁ\YQkp!*` 2 졁KwCǡP֔f3XEB ){9~x'_ok{8t:eMiK`H0CU$0?k{8t: eUiK`H0CU$0G|?|k7G]`.q4t`ʪ2ZӖ V`Ha0e_Wva vNC6+58m ` f^Sy^%]lDYWkp!*` 2 K ،%0CU$!*zeLcG[?(k: ،%0CU$!*zeL#~ȱ@uX9ta78m ` f^Sց]> N[3XE"Wu`cwRחf3XEB ).ycgRחf3XEB ).ycgSWqf3XEB ){``o|+#qۼıЁ+8 N[3XE"W;Oط|XuXm^Uf-" V+`/2gtAP`8v:9uiK`H0CU$0~ͯnkK; ؜ %0CU$!*zeLx`s=_{cG>a ycGTWQf3XEB )__^>ukK ؠʌ%0CU$!*zeLp`X˯*kK ؤ %0CU$!*zeLp`&omd IDATw~vSa yc'TיAf3XEB ) q\{=ZuXm^IFՕf-" V+`τvuXm^EFՕf-" V+`F{Ls_lx.B65C48m ` f^S؁}cU5<جڌ%0CU$!*zeLYvؓ--" V+`: 8'iK`H0CU$08x:i58m ` f^Sց]`A68-" V+`: l\96%0CU$!*zeLYvKظf3XEB ).qנWXZӖ V`Ha0e6.qt` Ckp!*` 2 % kp!*` 2 쾁е]`8x :(iK`H0CU$08x :(iK`H0CU$08x :G(,iK`H0CU$08x :G(,iK`H0CU$0~OoӫzMuX_)>-" V+`FVuX_^!*Cjp!*` 2 YvӖ V`Ha0e%0CU$!*zeLYvؓ,-" V+`: _@-" V+`: _A-" V+`ʎ?'_7[3Ё=Je0 N[3XE"W0+q}#lG@( 48m ` f^S?~/3lG@048m ` f^Sؿzs/~j}/WlG@048m ` f^So_]j5p/~:/q t`S!HӖ V`Ha遽Z7K|kA_{~[a ~G{Af3XEB AL쟬?{{_ߪkK=U%0CU$!*ze bz`X[ψs`}E-" V+`{s4oa}nkK#ֈ%0CU$!*ze\x`Uo?nq{lO@H5"48m ` f^+?xw;R'/O@H5$I'n`o}{P=$IkJ`;Gp`UZ$It'==Vi$II:GD.u"?v{&-%0CU$!*zeLٚg{/%JXӖ V`Ha0eK_KأVi-" V+`V|O_K>أVi-" V+`&ǻx_aZuX]K 58m ` f^So>?\[ρ=\ijp!*` 2 ,=ߴ^=/$j_x[{SuX]L48m ` f^Sث_yϷq|׳0w8~9ǫm" N[3XE"W걐U]痛}= swǟ;Amhp!*` 2 ,>W/ᄍꡑχO]NP[%f3XEB )7o\?,l]k:.qs`g N[3XE"W\?ؗ_v`;OEӖ V`Ha p;o م_~z-" V+` U5ہ}"f3XEB ).n`?Յ%0CU$!*zeLYvD48m ` f^Sց]?vJ=VӖ V`Ha0e.qs`g#58m ` f^Sց]>vN=RӖ V`Ha0e.qs`-48m ` f^Sց]V;MqiK`H0CU$0yk N[3XE"Wu`ĄS\hp!*` 2 %&>v^=BӖ V`Ha0en/1a9f3XEB ).p{ ρX%0CU$!*zeLYvKL|Lz-" V+`: ^bs`*nփ58m ` f^Sց]/;UqiK`H0CU$0iS[ N[3XE"Wu`p`;ODӖ V`Ha0e'f3XEB ).p{ {ρ[%0CU$!*zeLYvKL{lz-" V+`: \bs`gn׃48m ` f^Sց]3;]uiK`H0CU$07 N[3XE"Wu`ČWݯhp!*` 2 %f=viK`H0CU$07 v-" V+`: \bs`Wnf3XEB ).psSρ]aGkp!*` 2 %fL=viK`H0CU$07%+v-" V+`: ]XόQݱ#58m ` f^Sց]> N[3XE"Wu`p`?np!*` 2 Ny"%;NӖ V`Ha0e/1e9T( N[3XE"Wu`ĔRݲ48m ` f^Sց]SfLu͎%0CU$!*zeLYvKLYy25;FӖ V`Ha0e/1e9T N[3XE"Wu`ĔSݳ#48m ` f^Sց]SFPukp!*` 2 %l<v%0CU$!*zeLYvKLxJM;iK`H0CU$0ח2إvHӖ V`Ha0e/1e9KUW-" V+`: \_bs`ת N[3XE"Wu`8Ūv@Ӗ V`Ha0e}/-" V+`: 8'iK`H0CU$0FvӖ V`Ha0e.1g9Um-" V+`: \]bκs`> N[3XE"Wu`ĜuWݷ=f3XEB ).pu9΁]o{48m ` f^Sց]sƝT.%0CU$!*zeLYvKvP]XӖ V`Ha0e.1g9OAuB N[3XE"Wu`Ĝi>Ս 58m ` f^Sց]s$TW.%0CU$!*zeLYvKLZvPݹ@Ӗ V`Ha0e./1i9OCu N[3XE"Wu`Ĥa>եۭiK`H0CU$0g؋C،-" V+`: 8\ N[3XE"Wu`p`;ODӖ V`Ha0eI.-" V+`: \^bҪs`%0CU$!*zeLYvKLZuQݻf3XEB ).pyI΁}:{CӖ V`Ha0e./1i9OHu58m ` f^Sց]6To[Ӗ V`Ha0e./1i9OIu48m ` f^Sց]`uY΁}JiK`H0CU$0K̚tR]f3XEB ).ĬE>)ջiK`H0CU$0KZtRݽ{f3XEB ).ĬA>1%0CU$!*zeLYv%f9.ߦ-" V+`: ]sSSݾ N[3XE"Wu`p`;ODӖ V`Ha0e؝I--" V+`: 8[SZ N[3XE"Wu`X]b֘s`ip!*` 2 ƜUNӖ V`Ha0eV'wf3XEB ).Ĭ->E %0CU$!*zeLYv}>M%0CU$!*zeLYvs448m ` f^Sց]܁}`np!*` 2 ہ}f3XEB ).pn?Oda78m ` f^Sց]܁.-" V+`: ;اf3XEB ).pv`np!*` 2 م>Y5\ip!*` 2 ہR0` f^Sց]T]DO V`Ha0eKE0` f^Sց]rt`_naH0CU$0ځ}Ҫ)A!*` 2 ہ}ӟf3XEB ).v`(-" V+`: 87ʫ8iK`H0CU$0l*N!*` 2 ہ}ӟf3XEB ).v`)-" V+`: 8kj8iK`H0CU$0l*N!*` 2 ہӟf3XEB ).v`o-" V+`: ]n' N[3XE"Wu`p`;7UiK`H0CU$0l=UUlp!*` 2 ہ}OUf3XEB ).3'f3XEB ).v`WUO[3XE"Wu`p`;UqӖ V`Ha0em5U%0CU$!*zeLYv{[M?m ` f^Sց]ޡf3XEB ).v`RQO[3XE"Wu`p`;w-" V+`: 8;TqӖ V`Ha0e 8iK`H0CU$0lnU%0CU$!*zeLYv~R%0CU$!*zeLYv ÝdWiK`H0CU$09'v`G)Is`p`;#ݔ$I˝~0;[-" V+`: d5[-" V+`: d5SZ-" V+`: d5Y-" V+`: d5Y-" V+`: d5ӓX-" V+`: d5ӓX-" V+`: d5W-" V+`: dn5W-" V+`:n5SO[3XE"Wu`Kj( f3XEB )Η:ORV?m` f^Sց/u9.2!*` 2 ;ߋ YĆ4%uqӖ V`Ha0er8i`H0CU$0|lR8i`H0CU$0|lR8i`H0CU$0|3́detqӖ V`Ha0eRWJ-" V+`:4ӕO[3XE"Wu`Ki'l|?m` f^Sց/u9]e0CU$!*zeLYvԉ~ʆwqӖ V`Ha0er'I]O[ 3XE"Wu`]hmp?m)` f^Sց.w9e0CU$!*zeLYv܁~ƖqӖ V`Ha0erZO[ 3XE"Wu`˝g'od?m)` f^Sցŋ Ý,-" V+`:93ӟf3XEB )Nv`?и2NR!*` 2 ;ہ@8iKaH0CU$0t́=ae0CU$!*zeLYvi¨2NR!*` 2 ;]2s`aP?m)` f^Sց-y90f3XEB )Ζ<ؓO[3XE"Wu`gKeY i-" V+`:%2,qӖ V`Ha0eْg{#8iaH0CU$0lɫ́=m0CU$!*zeLYvQO[3XE"Wu`gKdm0CU$!*zeLYvM O[3XE"Wu`gIg&{*tg?m9` f^Sցہhtg?m9` f^Sցہxpg?m9` f^SցہO[3XE"Wu`'c٠u%1CU$!*zeLYv9 ZO[3XE"Wu`'˞cu%1CU$!*zeLYv5XO[3XE"Wu`'cu%1CU$!*zeLYv1WO[3XE"Wu`'bau%1CU$!*zeLYv)VO[3XE"Wu`'˞b)Qu%1CU$!*zeLYv%TO[3XE"Wu`'b9Au%1CU$!*zeLYv!SO[3XE"Wu`'[m Y;́=+$f3XEB )u9>}ڲ!*` 2 ;4r`;D>mY` fV IDAT^Sցr9s,f3XEB )Ε>8ibH0CU$0\#́=}e1CU$!*zeLYv >}ڲ!*` 2 ;Ws`lq>mY` f^Sց+}9s,f3XEB )Εs[ǹO[3XE"Wu`J`-ܧ-" V+`:s/qӖ V`Ha0eع{v8ibH0CU$0\ˁ=%}e1CU$!*zeLYv߂BN}Ҙ!*` 2 ;Ur`oA!>mi` f^Sցj]__ȩO[3XE"Wu`Cl6х1CU$!*zeLYv9v`[ȩO[3XE"Wu`ZCl6책1CU$!*zeLYvᑅ1CU$!*zeLYvnq1CU$!*zeLYvQ1CU$!*zeLYvn11CU$!*zeLYvn11CU$!*zeLYv1CU$!*zeLYvnᅜ1CU$!*zeLYv1CU$!*zeLYv1CU$!*zeLYvn塍1CU$!*zeLYv偍1CU$!*zeLYvna1CU$!*zeLYv tv!ko9yX#g>my` f^Sցz9<3m` f^Sցbh9:Df3XEB )NT]UɉO["3XE"Wu`'Y춎ħ-" V+`:U,v[TrӖ V`Ha0e؉*V#*9iKdH0CU$0D#ˁJN|!*` 2 ;Qr`wvDf3XEB )Nt3{.dM,vg+9iKdH0CU$0D7ǁNrDf3XEB )Ntz,*9iKdH0CU$0D7ǁNDf3XEB )NTħ-" V+`:+vw{;9idH0CU$0<%ʁ޾N{2!*` 2 ;Oɺr`kO'=m` f^Sցd\9Lf3XEB )S2ڳ=m` f^Sցd[9ga{2!*` 2 ;Oɴr`E=m` f^SցdY9trӖ V`Ha0eyJ[vwrӖ V`Ha0eyJv[WvvrӖ V`Ha0eyJf[vurӖ V`Ha0eynمU֍e2CU$!*zeLYvۡv`伧-" V+`:V)=m` f^Sցn85J9iKeH0CU$045ʁ5K9iKeH0CU$045ʁuJ9iKeH0CU$045ʁ 2CU$!*zeL ?o~w1갆Slm(崧-" V+`?}׿_=륜2CU$!*zeLY~`o;[rӖ V`Ha0e7Wo]lݷViO[*3XE"W{ZM?O~u ;[[J9iKeH0CU$0^C~sEWtE[ʁm2CU$!*zeLYz`dm9ϟVo_v`'q`kVzr!*` 2 ,=^,gD~}[aP底\f3XEB ) 7/f0싁%zzVuX# )vn嬧-" V+`{gѿK7amܜ]RltYO[.3XE"W|[k`ӰmzBy9MTI;o-T IfR4 U I&5t`?5P؊<3XE"W=gT5@Մr`kIO[23XE"Wu`'ZPl3iKfIo` 2 l`Ka P5kӖ !*zeLx7MYoӖl;VIo` 2 ,<~wf;[To;VIo` 2 ,=ߴ^=/g;[T'o;VIo` 2 ,=3"+:Gv>Oݔwl!*zeLYz` ^ի'Oӌwts!*zeLYvCT'i;V9o` 2 ;E`r`AS4+ߔ7tf^Sցn/90g o:3XEB )NQ7zLx7?"Wu`[Kl=Tiyr&cƟ V+`:Sԍ%<5ޱMyOgHa0e)궒[V}\ X3CU$0u[Ɂ>/Ōw|Sә!*zeLYvrv!k*9io:3XEB )N^SW}b X3CU$0Ł>yG)o:3XEB )po8ا6O|w3!*zeLYv‘֣Ub;Vo` 2 ;CFr`17|f^Sցp"9@ycƟ V+`:3.$>;O|w3!*zeLYv}2Շ U`>3XEB )P8ZXf3CU$0 ȁɛU`>3XEB )NP9ZX&0CU$0ȁOЉUa3XEB )NP9T6„7f^SցZ9"T6„7f^SցXq`;CtʦcU_ V+`:*lT}Ntw  !*zeLYvKŁ~ɚUa3XEB )NP9TS5„7f^Sցr9ũ>I'j;V o` 2 {I,X%W0CU$0J[tfc_ V+`:+DlO)Ub3XEB )t9Ū>N'h;Vn` 2 {9R'f;Vn` 2 {)gv*1ߍ"Wu`WPX%W0CU$0م!LvǪ1ݍ"Wu`M짯\X50CU$0ᶧہ=u:&c՘_ V+`:^&l OɘUc 3XEB )v95N:ݱjLw/aHa0eN >\a;Vn%` 2 {Hէ$LvǪ1ݍ"Wu`W;xX50CU$0j[UzݱjLw/aHa0eծF>a&c՘_ V+`:G+?l W}ƪu*2ۍ"Wu`V}Ud 3XEB )x8Ud 3XEB )x8UUd 3XEB )x8Ud 3XEB )x8ՙUd 3XEB )m9zJS}֪u*2ۍ"Wu`c8s>lEc_ V+`:G1CIUsݱvaHa0e؃Z!l[XU&1CU$0[O\XU&1CU$0[ɪ\XU&1CU$0׎[٪\XU&1CU$0ǎ[]XU&1CU$0[]XU&1CU$0[]XU&1CU$0[%^XU&1CU$0챪g[E^XeW1CU$0챪W[e_XeW1CU$0챪7[uO_XeW1CU$0챪'[_XeW1CU$0챪[_XeW1CU$0v Y ǁJ0Lw2s!*zeLYX;GہF 0\7*f^Sց=v`Q}LtǪ3Ս"Wu`{r8؍T&cՙ_ V+`:6lc8Dw:S˘!*zeLYPƁSP}Ug3XEB )z8uOPݱLu/cHa0eCUNDQi;Vne` 2 {Uɨ>LtǪ3Ս"Wu`Ui:էqXu1CU$0쑪 NHye;Vnu` 2 {AR}"ǘUh3XEB )zμp`T!c_ V+`:G^3/:5ՇryXf1CU$0쑪 NNs*4Ӎ"Wu`T=e^8u%m;Vnu` 2 {`_]2l Uh3XEB )(lvsG5D7Bf^Sց=P.'i;Vn` 2 {h[8ت>iX&2CU$0쁪'̊[|bcU_ V+`:0+lJUi!3XEB )z8uª(c;Vn` 2 {)>iX&2CU$0쁪ˊov$7cz(V"h!"y 0+1P`GĄiEp@| #9JoUwSZ"mcڿ颴4b6V-DfQ0 )+瓝.6e?[Jş 8ƚa#e|'XKX3aG1X3lOv\ԗi)+r&3 (FkɮKd?GYJş 8ƚa#e|ُR-gg2bfHY=b$D:R6V,LfQ0 )+,6~bZJş 8ƚa#elqvT"G3;B6V,TfQ0 )+g-*f!+b*3 (FkسiW4d?S,dcZOecͰ{6vl\Y0a6RV`&;Tl:n!+b*3 (Fkس+d?[ʵş 8ƚa#el3?r-f2bfHY=H"Q{l\Y0a6RV`%P lb+R.3 (FkskNe?[Jş 8ƚa#e\[𾖱-e2bfHY=8&WCeldKY0a6RV`%M lze+R.3 (Fks.ke?{XJş 8ƚa#e\wُ򣖱-e2bfHY=(!_Ell Y0a6RV`dW.]8Ul yi+B23 (Fk3 [`s'zEll Y0a6Rf`/Te?/=] 6d?r!YzEll Y0a6RV`$Dnl%f+B23 (Fk3e~Xʶş 8ƚa#e<+fi-cg3bfHY=%Y%ltX0a6RV`#@n l(߰n?p#5F yd-2e?k-cg3bfHY=%Yg6Ve,lfQ0 )+6XJş 8ƚa#e3 (Fks5o:^eaG1X3l][lwc 8ƚa#ev =nBz^uaG1X3l}vaX#!փߍUHϋ3 (Fkؑk}fQ0 )+#y; lM֞~7V!=/:0a6RV`j%~Xt 1bfHY(F`ÌV~!fQ0 )+el`WL:^aG1X3l}o#a~ٻ/B0a6RV`ɾps.J0a6RV`ɾp*+g.J0a6RV`ɾݷpJkW.J0a6RV`ɾٷpbٛK.J0a6RV`ɾշpzۨ?.R0a6RV`ɾҷؐ"{!u_p#5F 0vd爛tK1bfHY&2N`Cԍn)fQ0 )+d_ e.k1bfHYl=ez]aG1X3lre+6Pk1bfHY倛V` l ubcͰ;!،*{SU/ 8ƚa#evkE`C1˪N1fQ0 )+d-*Xt1bfHY$nPV*_p#5F uʲWW}.j0a6RV`Ⱦ6JsWccͰ;F$_p#5F sNdoT}.j0a6RV`Ⱦ6${erccͰ;D&; -A3 (Fk!6 }j/ 8ƚa#ev쫸M`CDzWt1bfHY!A`C)1bfHY,!{ͬ_p#5F ޔ[`٫n>=.z0a6RV`G8&wqccͰ;p_p#5F w.*{EpdcͰ;@5E^A:\aG1X3l}"aw:\aG1X3l>^ݺAdo£+2bfHY}ku' #^S+2bfHY}u' މ+2bfHY}t7 cތndcͰhnƕ/ 8ƚa#eoGl~jk2bfHY}{-wmdcͰH7c6.{gn/ 8ƚa#e/l`_p#5F L.ߔM!{^kWecͰ8n4-=&p#5F LviW,3 (FkGv l ueҮYfQ0 )+}A`x2bfHY}+p8IiO.3 (FkGȾ!snӎafQ0 )+}C`iG03 (Fkeq{|i?23 (Fkel{i?23 (Fkؓei)OYaG1X3l,:ۏNk>ffcͰ{;lOHrBefcͰ{kO(P{YaG1X3l(ڗJب,0a6RV`O}5M`Vj:3 (Fkؓw]'^doupyE7 )+8%{فfHY=W_=A:0 )+'8, (كcͰpٗA60{g7Hƚa#el`$7Dg7Hƚa#eo l`tƩԾ<;QA0 )+}QJ`<}ً̡ ҉fH9>??OJڲ ld/G|yvvb6Rv'La5e lɲ/|+{XZP[`p'UA:6 E؟<=ƿ=u'5ŬD` l ʮC`?0+6$IaE)>~7vU{~O"E8(D`'"}p)QG Qx~)QG"Eqx~)QG"E_p)QG 5y~)QG"ENw|;9(<(RH"Aճo}></E8(RH^Uz!?8ÃRH"E8OȋK"E8($:W?qͫ~# |x _p)QG ၽ'oOBȇoၽɏ[? !7?z꣧[> 1C`z/ko~aX9% @ l$ @Ԯ~ wpGOϟ|opuFI OYR]?0Oߺ9?"qi8g[w'ɏ4쓧wga\7 #Oju!M-/(V?7wo I-f2 =$˦^6$| -agQn¹9>!M-Ͼ'aT~23ozDw~u?{˻s4}BGD(hdu_WWo.9>!?MEor 6@Ǯ~ ȿ?򍳻z-4{k^>ʇ} ll=b_ӷao6]H`tmF3O[wػ> i6@~<ʇJ+^V^}wF`[zxcϿ?_[|?X}[|l$ @ l$ @ l$ @ l$ @ l$ @ l$ @ l$ @ l$ @\EЦW IENDB`bayestestR/man/figures/YodaBayes.jpg0000644000175000017500000023520514133140641017336 0ustar nileshnileshJFIF``ExifMM* z(1QQQ``paint.net 4.3.2C   %# , #&')*)-0-(0%()(C   (((((((((((((((((((((((((((((((((((((((((((((((((((! }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?󖐳msgrxMso@GZtz:>FA=:h_P:;vRf$~gpx4ZR#9kԏ\sDF9x*+XnCDGB3򭣰܎Ը,U;ڡlu#⥌Զ<yLrB\G(%I|30$j{uX; i}hKW*M/|yjo`$+?Za&`Þv֚H?An>emݸȦХ<)qB1Lʝ^P"F'^*-lsjlx{U؆FG%T!=pa4E)#'&Nqi"x$4{J!SS zE{*҃<M bH}[t~Rr:sR*;y=jAi`:6u*3z#a :"sN, 4FWN*OGci8RƇq<}@Z]ʈl jZ'Z6Y<#TfaK$<⯡%fiҐ2Ւtj1JBiq3iZ䑌DH׷n1V׎J8 :Tx~&e]289+=9{VahqޤU$ޤUnK a{2j˕ w5-ـ#{gܱ9N=3jzKۜ tX;==i$)y]%.Ʒt;FqU嵜ʞbaϯ\j FVt1yg#ei9qI >Q!y1Sϥ4I*l۞&26ikc$, ~N)Xe&[OR:qTzѱRI&vګHg<Ӛ2p0x4ǡ>wM9gJ0h9e"G.;ũ"3eBY Du֒e-13JфǸ+rO4X18R3Fų&[z{ 'zkRğB)l=:Z&1qTm0;$Oj#T$S׹!E:Rm@hh`# |Q1)! Wq_֔@)1#Ҏszc5k铟Aޡl20qWkRr1M!W\0c Z`P8'Lw31&DpqZ9PODڵF*b/0s[Am$!95+Z$QAQJ *Ln5 ܪv zΣQZƉ cFsҮ+&୞W3X cҡMN P`RME*#W KTp'kc~n~?91??yVO]팱N00y'QM&32<=XSJ&%f\2֗by$NE6Q08?Ʋ0~=F{<0?Z}A #gh\8S$m9:hcSsOlSH ss՟3gl9\g ` S$ $OH0ң"osynOjBrˁ隍q;;ӗ+JF:mQ8=N =rOST$RF3IkgZXJ~+r#KZ%ڤd*̈_28-H8Z[˹ܾ  1Y^ԑztVݨ"ȤHkg׎%09dWŪZ;b;+<|-\{2]6C{ Fw3n ӧn+"<{)`nAw̌ZYqǥGnd(I{=EĊ,o$;V۽yWTpA/-i#eOL-e]K@j6hϒvr`g ̠p;VєB61Tyֵ Aw"[[4Ks'SF=kIZQ ]M֨`OtRhmwį\֣`^(d+co6LŕԮ &1lQ͍1f{ h :oZ5oc-ˢ<'JRSx]dUʫ oag)&x[¤Ca)+N187O,nhuWeʱ74+Myr#{0 y3YaX:ں h|jԇITNO<+0 O dj"&sjVgցzF^=: ޕ^MǾ:W6oCP|U02ZGmwr-J10z vt_zm|mKV_X^^[5ۣ̐P@89%M6 AH?xKvej1[ږc-@퓽=+N={FU k$V@` ĖP=@9th9@8O+fȢ]PjgY"8eZ[ ṻōnj7Hh,b@@6, xfh5ctW$IpWhl1$jTt8_:$zޓiiyB s*9> YOb &&|X9K6QkhkE qsqnp8Y kçڴfI3sp|X~]?h+tV]s~J˞734\V"x7KMbM?U$C.n\q ?/x6:5q+ZYBJQqbup|<-'bu2|́}ȃrG9Ma|=x$+K]s}tHP9v82FFKSK[wVxe{wJ=A㸭—۫KOitsq5YJwFP N15WB [yw'̸8?Q^w|:KdN\ȹ&]i^ s\f9,BC`u;xOP־0o,'deI`0~2:B4 k:_XGi6`pe@V %Hm[j~ -5eޓcD [͒ylr@9>G1]֬(J^ ܡd9frۏX8*'ɿHwFp*&o(8C?JdwL u5.MTDSrcNzH/"@ISzM 2+Ԋl?OZƻCKdp gޕ4m_U!wZ R?zi5P`x(rc-)ڧpu$I]>(tr$=RC4X흞<6*<69=WLd+!yeПk&KeIRddv?2:x;7ɢv r` s)4$i Owp* %֧%=s?<[޶h{WIxri6\*D1{mw\2/S;t犄 .3E>uJPzҚ3%KRGr03Ti$x9}h$D!O$qZtBztZlҥ<zBIccdqcXKZlч%g{嬬34xgZoZ8.;FΌPbd*U1PfP]z[+&Vhڒ9͌H'#fu2I'two@uMRpnsk*]FZDtlꑮė' O{WxkR߶ߪݏ՚$U|3y ڂѨa&^X p95YOZOmˈ,wHm8}/ƝAq᫿Bj grAedQ8t2|t^_F7^躥a.jdY;9@u)58I..G$/Ǧ5~<#htCZ#& 'cm $pؑ@b0A#М<'t+_W^'].=j HzBG(HQ: >ZIIҼgfX H ߝckYl$ 8q½>!v^Ŀ k6:6RDm\ ʜ8LO&wrnU1$P\g# c?uuWg!W7 N2|/J#HU >) տ u~Ȇ 4)K,3J/$ʜu(q߲=J+o' 2#lgtK2F;]}_ݾ h=SH~~\S w"aF+<57 a˪j2B-i9U-8K WkoRn8feHo(0K7Bv%xt*U[ ~h6I^>$\v&TcmImB.oHi֥بNj79MBdNjH^uQ*%l2/jK$ȩS}8ƋP/h e'ǚϐcڅXFb]q#4sYV$ѥvϒeqqV-sT|?j6jmF2cR'951UּIoI<oYn,a%s#Zφ[O6ZkWv/D1]q8=+XϘ|Ex| LַP` c J ӧſ y-jzD"m`Dq1'}uDsׂ|M_G"xwTApeŭl/&ޠp@~9uKxPYho p`\Iؓ(aK^3Lo –a&͞zc/&jvl+ mB.rel@n>c׋tHԯ!g98ԡg"Ɩ^]8&'#~K0q*&|=y |eHݻ{I k&Xjw]gi wg[-ͨ;V|QgL7ET8-A*w.Ocgj~*kM="Y!! Yv`sW~VPfk{.{ V9d Hm>?3뺮srS5-Y9yQOAPNRehWKrn2F;CTcW'43]^޼r]_\u;F{@M7 &%Y!04(C p ]JϚ p~)ɧy-CQa5ޡ8u e9]X|GoRP 6A7V7[H`^+(To%԰yd #w;Mnڙk8>٫j6gqfb[4[xc\} Xln=rB629k[6 8R]٣-"䌜) =$8;^YKmQ\tzݞ2M{p헚Y>a\VؓnRPܐǠ)ltS 8#p[IҼ]a}{ !fǯA];rr=rAZk.avfՊEIXڪ8qqUW 1Io%lrFoPG^(L):m)l~M ,K}ipϥr8?(8Oj5R{AL2Q(n1 ^1bJ=082XܠTX5~͌*?&n~\= 9>Ey`|vzJH$CNY =p+U3rRAw;q']$wO:F9'>rC*S90?Mm%hwHp7z}oiIn±fm-튼'Kz4B[iy$TXkɧm%ae#?Zud2 ;z҅!eZk"aë[Qxn##Ǧ)N?[AU1 Hq32j, s ]Jܢ37XH9wHgk]`Jm!c]x\mR9r=)ΒUFA<ℹ$'s{hXjM";!S$Oƾ'qj%Q{I.kH̡d!ztGZci8L6y*=ji 3#ysۚoQSMt I#ߎ1S=b'oNaVXZȭxRc߂_dj""` brՒ,Cs"z?hBlvnG .V u8e&`/v_zѪ<1@^:֒\LqIQ@5}0 5Ri3+uƞ3җ(ԮctcrʗM;tk;+gGfQ*Wb^WOYA{w=+5,[yh]\,OV' Mlp4u 2Dpӧj̰9Tp'-oesFHճ F8Zq M Oz'uCDS rXdA`jLE@Q^IcpI1mPKOqe Ng։=)rvXWI8 ` F֐x`CKM ڧ$W39l)'ڢ R64k^^+ɨHG1yw֐ZKGkBz1Ġ@<:2%$x}I bvYfW#tf'%N:Sgwci^8$&I^}H7H&x")$f#I"'h UMt K)KUӴ{HgHZdzӓR|VXt _iP˓;3@~:jxCzMŵk6E R˂Nzw9z55\Y NrΥ0P[.`39G"I8f8JXJJ [{˳!i{H&`~ jk7싢I\Ik=muG`ObFFG#43xfLd(2<zWW?t?~Su}A7\#eYY8$o>WOU*4zNR =1ɯq `;n#]ۜHGo8)doY5РI)aĜew3f9ٷi|J4?<[_|w+y7/|5&{tֺVyEޡ{P(PXxZκ2A:ưVEĜ9=J|.Fwg46>rsր[Ľ7Q{x*e'٤v YrQg=r$c ,s8mFMS'$g=QbҮ`Vޗ-nSK(u5ԆvZmk{)]xx)APO_sZZorNkkǷ*C.##J*@֡R,M()/Ns}r,b鑟JRwV)m,^rqZvD@%s$)bܑZ{ f!B!3đ) J^/vqZʂz>+Y/.dx%^O:ƜU7?;º1Q$P>'\5ΝnUaIcBgpJHDz9A$DTYa+Kzpk>^fYV卥1ɑr9yW\< i׀>Odg=g\fX% O_^zzcݬ"6 XlDoK D*SZj7{T4;hom[fR1 Xp |I+#l^}i8Ґ8={_=AڬF 1Cab|Ӛ> GzA?΅QZ!%鞣 wnYێ0p*ݸ=J8c&llšxhD߭S2͖?J#Dk[h?*PŌVWbVuI$dv56{Y;$ڃU=fh /dcnuzx܃ihYR?:u;ĺVio̚uϞ Ce>zFMR玓J{h` q|͙'yֲn{f$KZ(-p 8t&; ]::╴`W* ࣩq\Ǐ5]{eŲ4p..w]8{t5H"7m&֧o=VHlAf`w.;Se3[ɟi%!Ǘvϵ>_ nR\ϲB{p3}ꢮ;DKWf7rUA65s_ui7b#IeFJ#'r `?>*j"ܞ]# x,w\UPN3\  yt <7VW]?ڮJ 626G'>-~3N|?L]Prv*d@\c8MƊ$k]ODriT#1Gc&/4y?%DV6v9  G'$i<}?Z&1mCd$Q̬q@^Ok|ys LbO Ok^ _S찬KCp!dA"32h~]O~{PX\OpL -[> t.uV/c#%dKk3-UUK G^MMs/^25svY6[YZO ܜ;c :S%K ]NlzHo dj"[yP.QYc )m!6Mpsgq#$z_j=E.)YBb{:XڌR[H#m2Tq5#AY.N@w?k}*rs(2Z0f'n*Y7Q*1(5ט"FfUpTG`ϊZbkm<OuxoCq{fxÈˉ k|!8VٚB1OT'?jj'E^o.Iln,F>qzYuI[p:Gh"jٛi$9ːp2yϨ{Y'Co;GZOC2{ L R)T`>5CUI ꧨa}?D2z?θy'Gpǜ>=]+!`:>"@8v #<40[p}+9r!רJ9d$y:UgOOJ6,$=H W#Qcg95UjІ8,rjT?vO]?ZXtj6#ln.1@ G8t20P:szX:;]BA yv9UkZڤ+rzYB:އ->0z]5R`؏\N+i&;eR,HDÜ/j);E,@8ba$"QCi2(rFΠA$*Dm8A 1?Z.#ƮB7`OZZ%eOBs[mrGFG* kS$[gU^$ lsER3MMARK7V/#kn{Sđwg0]0F8xj$n5%ш+[BS&pW~6o[I;)㌜:㥿yg ۰E]`#s튿x}TH`GٮwhI_LDc57PY[G}Ov3lކE&Mb)szA86Hc1q#-qJ%kxm2tެ <}+FNH(b{j!ԤE TGFk?R, 9 &AQ|ƓVW2|CbGGz~mm.7I(k[5+ƹ4;:6$P:uZ>m'*H*8Ӟ)WnNVGIY\薊Jx㎽;Wg,AHEo( Qs~*hb^ Is$ۃyǭk,ϧ_ݔp9OJճ'RSc9vVSq)FH4<T {5v27.ܫ|\fHI`~T3`Nr;T =y4 .*?:NATqLlnqRi$^ⶏ ݼ3֗gdI;mhx[Ytjve8PU9?<7]=mP>R$#fb:]Jj_ oWL&`Tۙ1`kf ko]^SEe6phM&H% 0rFwcb<njfkk&,J !y TyD/xݗ?i}g3~|An³&$lϱԭ즹/nb%BpQ`p1ב3c;mV_ew) Hlr>\E&-ۭ*Xe pA92ReoLJ^+5ы(0|:T55)זx-N$ly[\9ҚX"|=#\X+[y SsҺsZvnW|;ay-wHWs!31Esjm<7mx~"ݶ2,Į$r9jIz|#_dVztG;H?TҿfkZ\Y]&t ㎄2 P%drV. i:>Ut7dc_׉(KM}: "܉RxA<΄$3X_QEXvd{̹빽Owz}$x0Wz:$ΗLdH-|ܷZԯ++9WV'V\m{֖_煁tӮ8fn&sҳeb04cpAK|*g'WClV+HFaN[ GiZJگd.֬ImG; '5rLQgk6B]8XOJf]C?q"\ןXck"=X:ہ[ʵbQ]K^\%O(NKBk<=B!q6MH/o!r0B+u\hZвcp A򻗹Z궗*C4цN;Ɖ6s$Np1g] \Gx!6u^ֹI:c+ `8,Nx!?J2F=SOLqץ!G֛`Aޝ{ӾO#<yU\J fP$.s׷U*"-4=9{D= 4&!$4dF~lJLdjFϷJ6gH돧J?Aw$wuߊ wM*G{(:ȳ;,rAyᏏ6-ei-L03,c!2@`A?hIΨ$^Mxg Gt0fD m|88{cXCO+Hʋ3A:50c?9cs12?^!"xu1u+7 ll <nC-q?Fns|~/N!]ܶ[5زgi%nH*O֬lد0ot*{ 8u4D+Z\V ϋ$ }9dyMJnA"I:.\8kkV,z9+S.-lU*G 5=C÷laS, l!sɉ^Դ>lF<(N31КƧy^LzKYg(>xSH> xRR>kXV;%Et%rsZujj[ixS@Ay'.b, ML<%͎:Io|3/vWrZ}X Hc+n L=%fN][Ҧ.] X\g639$'Y_ M>FUw@\3zVb9O6 O 5KRSU-2T9#0s^o8o258֠x½R_=[V>fq;'⾋լ`ҥ޽{k}S$q koώ70E}u0WygLO\v u~x V:֧K+`2Ċ3P=7ͩMg-|Bu *rMK=p+s6z?Zc>0c)}Oƺ [_hiZ:͎r |Tul4s[\ y$%iKmlTxZkN̲'pMϷ8q J,Z(R?a|֋c;w_[r_:%2, o#uNMNU]ԥK]F% :*+A9YNvM7O[_zP:WOzWxz|Lޭ (mbQ }6ԞO*v8{Nnrw{ju-F 6Bau$ PWwqj{W_a%v89cx_/{W m?xcڹ_j޵dsj&-L^\j^rV:w:M?067WL7LeqJ>j7OtEd)T*Ps$@m*Gƻmƣxu4{MnAHt_>'c/]c6iki6, (g ?|a3L"syk~Lqs+p xU7.DN]"F^? >i7W*ѓe<qTSxwMm F&;`!?a؊}J^#g{o&ۉ@V ;;=+ӢMMRQp dCfgoZ F00 j{h;Z+~~h囿JL^:-eR4 4Q}6>+nhmل.zt`=K] ȓnKX<ȇ'̕0./%bA5%GeY2Ս# jw~M.rv(:2KnBL*CJ;ˋ)E`,1zw;aȯ5 +/elgIu*CQ#QՁ+sꡚ,UI4.cD'urewa@;bFrg3w6л%-!w7|عGQG'4Z)MnLi)'=|؄)u?Ґ2z1!x5HLA؞4߻\}hFsJMz`wCD` #pOPzbC$#9ys\A^4{Sh@>3G' gJOJVauFA8К~AbWC%~?Nu–crp1_V`w] N+H]]7[]Z;KV)xF/NDkĮa#NQ*6`Sf3֗ qB4z`TFy`?<˵E/~AA*JEi A1NWڛWD6D.>W^J.b6Kf+5}HH=2FF/P+ PMUd >}kK_ZK2ZvWIG- ο7rjI(cax$2 QT8 ʷ_ڭ Mu3ͿtgNk-s,Z74-ī2$*zSzcU'7o9*@k$-gog$Dgj6798'KEcs&jݶJ0bMak{zui㵖HJ+QIheWY!9Y]1iYzHRjjM! w@[Ѡx܂ ±<ҌA+-S#$eo:QLБ֛s&%RNG{ךiwJĥO##.,4mTuFMǃ沵]2q|;f1{WG֔~"#kKDG ajάmMyѬz <>1gkZ[6%؅w)=>W?Mۼʙ,PL)6:w *x g%~-`I;kp{ a)2NL b#߁`^ԧrqf+@'j!>H=II1"&t֘>>q;y)!u&֛h\y$hf rv>!$BRDgoȬ\gcbeo]>&J;o![ߴD6[y+CF_uMZ9sy\Hc{yfsIdY645vOxb dFemns1F\r0(VQy(mBvM15"*r7 5xHU6q$3Cp=JFŀ8M/i/_O:ߐnZ48݃᧏{}Y[u i Ba 7lS]oDӵ {TIQjp,GiS +Ě|K2rD%Ir=)21R88/ _^(#ZK"i)q eG d)b)roEYi{F3EȱK1,(^:֬vͯ%]ĚIgI-67#$dTNBI7o'G%]\7mMu;[ƞ5$4(w! թkĶzQ舊ҶoHx$g+$LlWvPzV̵hɎ#E1Zsd@  TNlp3wTѴIw\ꁑ S~'vh~ƣ%d\W6Cz)$w$dVu7pE\DtA!]}GNZ6Œ1to H\M vOėpG*dٗ;#H,Adr#nBn#(;s7FA>Z G3 ᛩE4|Gn\cwi:,9N]c%C-exʛFG:Ʒ<. P$6HeCf={q 6αxJkZK8.e2i E p1_cRSB~gqt`g@d;-^$cˮj:݆ts!WrʧY\ )3IѿhMhQJIr#h+8`v~"۫ _h[ɂ䝪E K6](T@dUsIaK-GYYݽj# p␎Ιc0ҕ8&6;IlyMOn{no,Mt5}S`cpi?hO f/#LZ亅r"VݴrT9#$Ţj/Kt\v\  2W CHؔތ~e _K;!V t}NVin{\5ŤX/x6=g[jW36=Z%rᤵF6B#y _/9uj"(f;s$f =z|K|^7,ˢ9@*`dq[+feʤJ#7`uǥSey!q=GaьާSs!H9v94^s[&+ɨq;tw۹E=z +B^mPtPBUqN-F #".HwUBdFd~H8Vϥ]KݶIKoK(Ag8?ҹGo9{I^21j[.(ɛm61=GG(7,L͸#g-ljJO-H˞U} DɤEYFRoz`rrk ҎbϑOo7-^qQ\Vm*<nʭw5܌eI.4zgڱfw@Q $r[dqs#1ucwc܏C@ {*zOZ`Zt?Og1Spk4R+IsץWIZ0wF@4"6~y$9֎95B@Ϧ:R 93Ma3_zC6oOڹi湡6Jx#ki_dy=}X#V{J񵮿{,0 =H=zkBO/vKu}jsoKx,1ApɮCuZL喢\ܽW,3]Ci\Q?nFgym,++.XgI]%h~ewv}@+)ʂUB 2jY{'«?tKTpfW9ی樎mKZ?t0hyee|+(TG]|=֯oabv3ySwʐ;_Dž?g_C }̆rlʜcsFivNbKՕs=$Zclx>/>48k;Hj"@I _,ftJbn t5*@K <-t׶91LGuu @`1EtKli+y7>hr΍?.4_ oA%FUT*ԜR>ö:uΛl/5^k.j9f4Z^)/k( '?2| >rGui9";HXdr ;= E|4of}W se1y\w ͷڻt+sZƚT1Z_9_26@vg+_Cu{GҵڛܫoV f3hqMtk5Sj ;o32:r ❁$Դ%('A#$rĬ[GКM%rmIkCB0 q'w$V7xGO𥖨*HpU n'x)\.c6Q$.U@$O@lAx99+W4ߏEN-~VW#W.d Zz\8mr?2 !"HP9‚y*cU9n_RyN2k_GǰKd=j1g̥֘0y=5mQ r:յB^2Fz)Ϯy6Mv|ݞG^וNs2;)<ˀ?M?OK3$ª+&Z0K{ H$=+ osD^f29utS>4nfϩ& 䎀W.PIW=+9p V!P8%}G9gSS$Jwx]w<#JL_9=ʸ'yOjv$w{A'${VԊq2uu#qJYsUlh2=ij̢z%z=@h"cD͞UsV?;f?A˜uӆ02A41;X’}!*N{ Xq ú+74.ֹ^AFqҳ.OC f=\4#Mi[9 KrlN?ơp:5FUPiMä;=ŔrBݑҘ-cXu۸nRRq5ӃI:?* I8GӵUBج'>b%8/3 p*Qd-̢v<⡔,sB^ RD'C}nXъz}gǛEsGkw^@N 犙HhKxQ 5N?n'p*8eáJ= ng B[X)%]ۇ\Ԋώm jgKE]O$(xNCNIe{ AJeF2+9GQee8Uo/.Z7 qj#i5]N@A>i7j-!SL~Z]=n{\T4T4i>T(./мQ&u|j@wrd$RoM "<8Ed\RpŁ1ӭdYTlc0LtȤtZ^aX3zcUd,Y0޺pBgN6$V`#ҹ5] ֺZbJGL ,~wl 힣1XBÐ9H\9ހ3>4y-`-BA]c,>)XqY+5^skHdoYU:n݈U$v'>ՕLjt=CMF ̡f#;|%sNԙ:.k:}xWy2W8{(Hk _~vJF,3Fhs[4f~5@X:u֩xW{En_3FzFkCTtB;iWUԡKrlc;r23U} qI`y0+CC|A χ;jV|rD8;FPQQEx>uힿrky2%;#*XWq޽㦛yῄvxrhgX;oe >.*Njk t[[wPT1+hw˒HոӚ'NÚ<+h)Gp JI0zA,V7;i{Laق0ĸ# C P!kR݂' bv_-wK>\wqԣ '&k> úE,Xg$('Мխbohaz֡gaZ}tYa6UfڤK V:>kaٶɓq,X$u/~&o4 6ܣ!> == iچ>iv:mͬ퓅 MOh:džq]K8ޝ2$u+ 4kF{]B+(ɽH2}|I?j k2i%&9M21]p31Z<g⇋.4? _^XH5XlD,@b9۞kqs3YnJ f72A Wwu{f@#r  Y\C}uikses -ʘ匞W(Fyzh:xamD2F;tul_cQeH%z1#)q@#ޗP7u}>K/wGK[Y#.To`FAi{=tkZH I8ZF!Cssj+H{M{GWh戣H'*8YZNFUY7np t75 &шI< |AKt[N;T'7D;w;[:dgTH }%2 q [F'޵ )\[Ѹ"HLB1ף$uqTa#iM_j\QW'VMY^Oh`G^2ϭA(!FTIe) IU8-OZ$F:vRg1\_44o:%Ŝ‚(૩R$9uo|F{&b GaܥCBC/ώCxĺg(|=jZ.gk B̈f;I(1o҇+~Z&/, (<`s~"- OL.B~9\g6WgUmoxSwbO"vE$qKw8f$Ðc*ľ> ռ9_#52.;v'$0iLg~ϮuJuBCp0#8/&]_$Z}lZjH@F߻%xULWK' &NprbXtH5]gD [=CQ!g~^凍5j:guXm%s(F\ĢcC&sٻI"m.dbQ-ǖqn  nn^a}e4pG1U*!*$ j{cl-*K`$cqث0>ƽo.Lx/Lu7ć2p7 v8_/XԵZsAYc [4X^Dx}-ׄMrY׭6WOn^h lK0UaX50^/Tg* Jf*׈5>"Ir^i")m2ȸ۴|sچ%;+BnY>$JR*ɴp7 <`T..ZOooƋsہ;4F*8yr1jEr2Zܖ_v s}keq*G4yP= T\+7}~X[U,21Tk]ngv{{M*UaDaPÂQT$cW|XĖ:zdtvmjRiB썧$x+,>o_ .|e ӻ!J ?|GЯ'/+]3Eqp,~8-Cg;:UΟqcp6RA0e`A=? Y^mYOz6-4as-@%- ,!ciυEJV0 ʔ`I~8%:^:Z4j%< ($jXhH=8 WĈ5.lOwg'=Áʾ Tà_>n0YHq0Uw~&5i~<;y*=cC(` !ˡ]+=\ ӻPIx#Hq_Rmjx_^e;q)(o$3EXEz\0RFd$C0qs g1qᯇQsn 3&zm=My=WExBm/.m,26|+#Pv=iͯ - 4 ; n@ rry5h5-*Zx\w$<00ULM${GS$H.UU?O$}NlU.V//#p'a;?`]VM?cUvf 9^w⿎0o`9l7`#MăI$2xp-3vi== ~160d$c~/_u?%[ BB bx:SGɎ_sA*O8E<d錷 moqH*@>5+? ž &h2`C.1.;v|Az^XiR#,V&'yo|xւfddI%y^.q(<JJ:e#9QDcry9/ߊRhtI<;$>\QydǔR85d;y<.gKO-EuyD}H$,OlgԤXR@3C֩a 5 YCГO[2: bk{ߺ5v;4`wͭGuQD9 @zc4q$1B%s}=}ͼoQOj7t_cq*"|\dZ_"Z1ZDZOc^"f8 W]p} K@#`c/䷻d[p~77gqk=ׇ٢F?+{{Wזs47QRR0kՊ/lg9k -Ո u/a15r:u?@sZ$^8MssJFXPFxCڌҁ61Z}pSXe ,21:'rI ·֣#!E`p05>񇋴>+m'Z6JG4s(Ȭ@J|B= ]fP{^Rdw)dW$&..q!U8TYrA׽Aqszg] @ASH>VV>V^icJU3ʑ󁹂ֵb4 HHohϒ{vL@%AJjȡddvW{@6L dxzWUbiwl #](LDYYb $ʞ䃁Vh/$q!iuAë֎k1[ z}?H!c#8g9XS~9C^wgnm'*]Є?Z:5d|tVE%yVE #~֥_091Yw\3ZıP2?*ҲF[z$F8׿5P|N7G^(>RFWW'@}NkXT_\늞5ރE9{zՔVOqg6,Ѫy*噏ܟA]=ae5߅umQw;WՀ⵹6:ُPxBil $DJEr8*/{,W>C9"hc D,4}\ew=֖.{)iWf-ӡ?UPK ݬW)FAҐ 'q`$yBsd>ՙl3lKs9sCDrmǢxKAwD U [v? uW1C#ʰ<;IcOzRƚ[{MFyp\q<{۷=k* Z5ΰ >b8 0il8I0R#G$dsdwzLphzZվ/;@*.Iʐ~qQR<z'MO}(Z @'?EZNS˭%㻾P`b0K uiB<;JBo\j-j ̤@;<3ku]WRo]ANHc[&BjtMj4{S/io4;Zvl ^v qke6D( 'F;k oxU㷙Y`#8?ݯNL2kt {}Bu]J$}ݒH OQ[Rg{_aw*o"1/vRx#V+-GL9z\ClJyFw0iX:—*F@9v4.O0C5( 7\",nصS\H#6Ǡl}Ȣ/ kSúgFFV?60pzƆXm}Z]M4yHs*폅Eح}vzL/\b t sڐXfx%,SFQoFSֵ㳓U4#KxQ ʎsУmOEik·0EM>XԕEoeGFz=JK9%p`S? X%ugCyS3 ~`He /y4/ Y {cbD(:uPGa*c|JE7.":g^,pcr=8=x-sSW4-Ts$XeHzVd&B-#[kR#d2Kmsӊ3nn285?$Q5=:ROFN 1RFTXnR8z{%/C7'pB\T[P[H ",qNy4²^iv %2 Zv d2p:U=TQCd`zu-$3quK!q 𫣨Uќt+q$N>_Oq"9.I%=䕉Lgsw!dm܌ q!gfxSs1T@M+,6WA[Pf<jHlhMhfD7R0gWƛf [ ,c(MiM㚅[\FRD8`20x[=f߅WQO=;H޴ <{zJUtn~<7F8Y9x JA|@]IHXR跰Tr8} ;: OBOȯUke q8 ٵ߳’Gky ]j^d.Ѐ h=_/AkqX L7*?w###e>>+=K%ܮ䴅PHI|r2r1K]FnxO6>/_NfS+dm*!د~.p|F.mnty[^գBH`ڭnD=KD,#_kԯcN(#\F:\#F dzޅcjMGOd=hZǀubcVo⦵k?5Ӫvl--Y[A-G5K Q]4ˋ?!4J⺟Ix;ɮ7֐y)zC ]WIOq;&<9#` %_Ԟ([æAxB;$fοcW7𻴭5`eQˉU2cs|k|3O<-i:j"Uo# ˓J8Kmk{+%Ymgdr;l`++omLJ<]l"3=Ϳ˸ yVo$(?f)-?7âȱ6AldMc73\7x4{MnjQEO]?:ԺrYh#Eu't@Wg~N@{Ph[KЍFлaTDdڍssy4}okf#dEb5¿~#i!:Rk3Sa.<sT1Z>yyqxzFNmB0MCYx=H'[upWSpwq@?^ox>z,n l=ߍ5Q__IVPȂ$p a8E =OL);+1O}Ak: 4bDyd$pvA^Do~xIJODbaHА֔%p>}[ 5/]I',r8 G"Sv, ui}6(@7Yc2H|6kRvE=ƞZ+;;r<תWN? MZBNJ{5ď}ԗ8`Rb svTxfOw~L(2$rO<x.~6>? eReaa#($'ﴱm >&hu C~`Tw?4;'[ 3^hM3jYv!xvY-tmZm.-D: &U\ 6Tz*wMtpju<G?ުYǣZ65VpXhMH!28$.8=ioԼ9? a#Uevu`M-Y 9: 1.4x~ ґb|Ԃa|݂O?N cE6 iz1꜅fؐ?1^/-#2(uY\e՗-XԓIF+40y{AG39z#~ +D8|8'zV|9-1910`xUB| "9 D9,Ub;T(R} _q;EEvn&F!A~ |6}S@0sP*|4e!2#pE`|n(}cZ4dOƎK$5mku̮p9n;j!4z7uG;~z.kЮbPU2nڒϞ7xJV}ebQdS*rA+Q+h+XH1 9楡hsLq*8ǸBN):tVā_ZPHu@8Tdu,GQA'8@8\R0篾*릴2ڢE0DXkZ'P]>/̫4InrrG⹩$]f7/xFK5[^X-,n?_o@lj|)#[KlٵC`ʲ\ SO⯇?=wZmŞnp@Pp%]`: K+Y)!Y[Ԯ2pIDG,#x>yxcs$DyHIb$SJGO:I|A2?\!=+uk|E}G:] a`G$y`})9e k^M[YCmrQ+`ߎ}A"?u+Wn&43},O$p&s |</jz݅sYlc20gX |S|XO(ye}p f$9ŋx7FﭧsIpc +U0XJqIs|A<5qjZT2";FecǕ1;VMzuĿl^(TU2~l=J#?{ZxPh~|/c`.&R^ǖ$XA&-/Ǟ՗ဃ|& y ݃8%*s}j:ћFᮧ2@ HUUA`sֻM[wi+k(m:ĬF!H\Ľ 4O4(Qku,"k mrdj^^xRZMΘZ!n3`2OZ鰛 ֱuk ];RХ"w[˃ 2MlYL##Xּ[u]Z*,e'zd`#n'O@x w^|7xb&+{$O:pp\(ze9_P"?t[FD )S~`޴lR)DЎ !lLr> MloRBJ[<֮\4lN2}OF}ARPk=,~5QV,@,N~byɭ;[D d+9Fv5ׯJ4ʈ7g*zs_[wK,,D1qqCV3Zgh ?#izewTIA^r:Q'tI;>EYG,Q {vHF( Hwֳc[X ZYGowI`wviiw~`HgWےRVq ^U. ֶIj:}itH0A޾H|?+&$(zeID:fNqѭ'\S?HIrg-QQ=T ;>LsS@=4&}>9bsI!33( Uiˌy髛؞,&HǠ%`^`i.QkHE&Sr񐼏eH q]T2eI3U4Fs &08>/^@^6C3֥Ҧ5-㎽œ3=I.T8IHUbOP J2˜=)$Xv PAJ[yjU=*9SzEiC)2\LϊgnԀ4X]E߭.q4Z3 ⁌զCulF}|bFsKw#dQ"sHIj7^>O^I]JČpqDb59'aJ}͸;($yu.GXc 7$QZ$F:֯kabD1ؠ=OX#W҇M}!w$ϝ1l`H w!c[HX#c>5aag#9U˞+7kMcS5xjYdX1kLzΙ(6'6N* sg,E*q*Wo \E"o+25·[{bS`Hc&*q;DSz}:ls[!zz.9ߎ1LB1]&1FjN2G})Σޕӽ7pHLq]ZdwA5[m&Y>wCwj6<#*<=_3éG=-p<)>>T+oGη/\xoZU,Qܳ3 }T<[] w]JU{ -Yv\du0p$j߳k[dC.68lJǠ(Koj &9Z׺͌bǫ(x// N ̞t2\Xi`pi R8q$|Oe\ x@DDZXzՌ>'K;Hf/3,ڰ!Ld2wE{Ok4#j 5v 335h->M^6n%zƮ Va8Y[χ:~Mw2%X ,I9R?äu[\sԀ]I$LMo`+g\KK#^) ƟC7>xi:@~Mu?5o텤HtM &=05ǵ)l#1aGv>.i%);B<r3X{F׊<Śvvb%7(~V  8_4)q*e"3mJ~ďi#׮sH)?1C\,9t [x<>G[*;>;a`k /!ciF1H^wS*{qހ8s|OƬ&JBYΏ-#p8 p+x N>"5kMNQoc&VyU£%mPO=5k?LujcKX_c#u <+cxOXַm(ԭd0y Xm֥'{}M_x6ArR#n9D+?9S_^kHt}I-qs#F$+edž#$c+ +48Raf2Y\ԀV9+ZN Yw%>ίvilFs / G,vfN3Y4^ <'iXhh,  ײT @ODMmё"{VRu lamCT{#[kx~q]o iĦ/Y@O@늸ur`UtzT3\Eo?,랇XctV)z)ˍt#- khv}LҝL#E]Of!Āw6H>.m^N֮^r02bLE&^^zx~ۤ'NTvbdGܳm0skxu>e,dzSOқ1끏UfϥTIe7Ib1OP78Lzch+ FNӳ=qM=iLcҐR0}G,A&H.0$:_h>(u+7lFI Y rj3Yw^hZ]Gឳ_=n}]_jʬ̊p\g+/x-w> EK[D((ee6MlM!x_ş miuksks Ge-xGP W mjDŽ|KAA yʪnP`8lm'\՟8fK=ק/id}G RLGG|Cm%bxKFB=?Ȁ 9Qc{o`tykxTov2X㟋~%_M4Z`pO_|{] {;TP2{/.|O>-a]F#aj@?0#k8{W4-yv2I+֯(ޠ7̇xg!E$pd(I'  r{s<).}+z{+u%)$(?1,w1#8:>,xž"c<ޗβHwFYG7yv"/e Os2^1`hi 0Ĩv)kͅ<]c5X&l.~؋9Ek/ QBY^$(͓w%s;+WĭŮhAAM+{¾yx*Oץ^Oͣbrw9pN9'7kiy2Iphymg|Du/6eslLE&AXv9no|nK_z.eE[_K³G8Pa`CcH5cšu?SnԦRX7 pF#88x,VZՍ"G0fm 8M5sZOīY緞+9l./md ybdp?EjX7o/ŚQ#fW +<  IC7nxt4o ¬Ib($"*'$`xot9eiQ1x3}lNeF4Vj:n/ܕZyo@ggxZSH&ӼK rWǕaB\~G3_xZroax-JfP|PN}ks=u_>2MM7c29vQ_i6dd USDLv,|}xq9`Re"Usrl({JF* sxɠ_#]W+צzT%1j.1\iWX6=0/C9I\'֙q?jDs9UFR:_=5&!,bB]Lgv9zWx[R<>XY6Q@($ |DRnGs",n-]bg*{K} fB&7D҅p{fb/Z]$;*eԋ:eרZhuzE1m ϥajW sb_;j%6\=H,ԯr'[ <),żP(x6w tX pqCWWw c&{Ƞ?2[̑m+b5V|5&F3ҨHc`yr6yWev9_3ӯӊCwq>=Zv2?>'g`҆:& ;zZF l/$t ^=L^Li$pMvl`18243Ro}OߞF}4$2"ܑ9>"[ ƛ~K}5*FB$WEzgn'Oޭ|&]Ir+)·9jfKsx|b JR)fu`FTrP}sY^x_:φm=ŁH9cYW)_jzy)u]*)(b\5$fPi<#Qq$/8в=X-39+RzV3ܺ-Ӕ&* ~-CZ/9^YđA-A9OM 4OʐD#:r D4vD1?<Zfl)zU6;cL9s7?/Sj3Uh$\#? Nq}{ $Hzgt1Mur0A$Y7$ rEIGn^ަ{PWv:UW=@?ZK$};A)P1W$ʢhuUIR t"HP,9!P?!m$2YPXܮK}STM*42Cu)V?Ak3x qJp>\=p[x+ 0xGLW\|7o:!+WA.Ty[ .~~G ӷ?^L.y TG?&M9F;bӤRl,K!@,q끓Q%} D[݊1+q 0T9"pf}A3+vUk,qo#b%71 68.T?O֥EE#Qԓajc,ˑ23)i Hoq{v4 ̊K!K^JذqwiSw(7}N㰂U[;NTLzo6A]$:#}.+<GFF=3?J.rcp\xON1R@$ Ro?A.Z:c[ aM_nh3` u;!oK/P'qϥRbhgل6 ۣV'fI>]_,?D< Q[i;K`%lk!?ٺݼv#3GC-Trzu{@#oS`{H YZo7pwb]6P-mQұUg8Q}MQjp #$, \~z,ZAHJ( 9*~"n`y>R8Rzz}aqbыl+uHҢ?zhcLdc9;-Ia}3ҶWb1l.æ`0Q 7ON8c"}ɦڢ$,%;[FC]s:&2Ft$2[3ӵ}"^ڢ}O %AR0v~V"OPFDpćh#I+-t)Xڈ`˞v/VŎ^A(ڰsIܲKo(0Q}?"Dx9=)kmrMjg DTq{Vҋ3l*f{ncU HRdųAe"%aj7iL8a34@m9yW,}Nm*Qx.}HOOjeiNjGҴBd sGyj1PH.}jNG9ހ wJ ׯQUK~̕m?izvCn3Y ӭJlZoJR9ҵlyfMa@X#܃NwS:=&xG{}OxMl>-vsGѠLe8ܙ|qh}Uűx;_٢ZSms--eF]C *F0 7}umrYL$PNqYrsT[MG f&D ǻd`=B+G pZ6t&@sy'O\ۭjk}DNsXNO8k]NB6k^w^ĶpXe7;˴09')nK.R-->pGFr4jubXsos>m 0\Iē$F><0`# (38IcEVF?u{{2孎sS )FA8YWqjJN8?Y=Qg4v@uPx"̒ c+ׯҮ:-Kbn!A*7_~Փ$mI@}U/"b R"mi+ a2Vi-T Xʣ9pjf6o}nDz#j 7-jg󑷡ܤyYξU15ڳB%lOOJEЀ9k!PFR:bf,ьLɷlk=zS) : B3t_o jdenmi#,v{?:oUpx'4J4v&u,V6)!Yox#sIs1*}ED (F?$1y(O3i-) χ!Jx{V5X-1fh`{kWb^ǚ,MFLgԅ95\WLL:#iʮO9׭ Bnb>S=s-<Z(ЭMϧϕ݂G 泗Sΰ!1Lo+nrH%lV1Udg+ɕCFO]gGӰ&qP ?($1ECZpaRr9@BC=SVm>`V/b\@#|&45UG9.H!]jsR YWqYrA8(q1|xh;xI@5S|1z^ t.H5){HyDH 6rA$r9!]nm|C5綸6HLV<)~ M_[riOr#W{GQ@Ӻ!>[P:MG @֪Ch8s<_xwF?uqom,^B+c'ɠ zu^5-V6BayٔҮ;xr߷(C9n[ 36 wm@<Yt*'?E,Hs) 9nqOʱjj[/țk~汊@B&j>s$b6@ ]l\A=гo($qѫA R@A?FRք=6T^%W(O >mqm9 㿡wј"ך, `O`H{U]70mv*LB28w,Ջ?w ڣ=?Ο|(ng{R=*ū3N{,qD$0*s殘:5OD&>ׄ54c M8"g\rrs#+ȅp;u5]2̸ )P.xuv+or-̬l.:VEYplLcWa7KqtKF-mںF5+E|Ѱ%{+IǑM_Cx% jsV๒I zZIZV,]'΀H5 沊F,Q_==jDR0Tʩ)Q{5YN}yg-xvh[3yrɷ$W(&ЯVS] ٘psDT#jp$++uN=wW;I隁lsު NA=V;O#j$[S TzI}@*;Irȟ;I'Ԝ)H5^CH>h"&$d3#3-9n6@ʛ .GoZ 8‚;jX\)gRҜqI L ;6x}@Hmѩ 3^{y;ڲ4/l`Aیi+L<듚'LLLo']N(TJ1׽>`.}O(c^@t61[p$Eۀd` c3hKc'4XYde\*!;2+QW`4K.F:ȧ0 };W:} ϫCcn$,3ZMzc{\;7pojޓa یV׆uSrI:{?z.]M^ *+7h1;$JcYkr} Hh[~>HOF"q5#Ҵ&.QAA]-rJ穹عgr>k2xDI>oAs,!EF۸t8Td#=H7'[63;p8%$sT: >jT83LcE|?iLG}IbLPz#> dd8=zHҸq 1n8>Y@Ar@㧵7AS_0&I_\"1C"GO '^&lMƳ`"\JŒ*SI掣Homت@y&xa]RMF4YV#8ݞ( O %E(} F8ZЉn`Xϵ#GsL;, D`q'ոh/A@ȧbssag8JlI\qXj>#N*Jy#yY͐6p? ƕ8H )\<;5RȏmGu;Y[#}?*b@]ԈYX۰HG T?wֳ}D\C"60,~`:ߞ8:1B'>\{J],2ن%{Y^r,Z"nme ~npGTaeu}rǟ+meS pzwҦ[Cr.I䜏j%k)ς7&qIjQg"qjIFǧ?Ji*rth"i:ԑSFۭe;s}qdIjB\,C8}¥{(|7-OCRceGrA}+/Y]ɛKw 0G5(ə*5&|}kX`ILے4р!{;!lq6RNu[K v-8>^/ h_bp' %"y}w+ŕ8|_6EfQ=;`Nz񩚺.qs\# }+)ۉb-~UAnO|*u-1slyl$He V: 'UhfpzKXږ)m$2蚾[Ǯ9P+|-n3#F6G&- _0*zm,$d BNx3Xsem 06:uiwm<8 9՚zȾ1n-b{k(w$`=hI׊Z[稦c8x4#g$tw)C r[㜌{Є&ؚN:sqb(ž) qk'9iޤcA@njqBlF9}3Vd9 *qj䕦A'w?:7ʠm-ҭ41yZ2$w!(ucVke%靠SޠG^`s\g~sqL Rci i2Na@Fxn(A y?nt.yR~BÝq֋lLlE9\$/AiGs*P9Wϕz7q!ʞ fQ0%I޽ -Ed O%>LbR``ޤc dTm<*˒Ts9󬞅-E)eAdFѐ 䎞*>YVY C5)#w01_x:+냶B>- ^6?ǵ#M YmC!U9 2XZ8d|@uξu"bs: |KfX&cF$7L2=gO"v 6:n$gwҬ-x,YaC6П^?벱fOKo4{}- <>\r[=rzW}[iKlF# A$k hgYRxi$q-%uʠcGPBy\Cpu! Zbd{$7G@θW24S%Uq7q0G ¶&'`5-;ն^cs"Cm+LMcһ[ҖܬH>l?JiUJ0U'vQfd+264U"&mڷȂ 0-ͺVź p_⟜1֢x [2Fv.x$^ WsJҷ!n1ړd*a㡦1sԏJHx?&r0OQu$g=}qCH8N3L =w$qqShQ2d<}* #֦Q9ijA #nj&C!uVCd }slmoUAfh,[CI<F;Ac>6+"naٙT8$Rt'ݏZ|Eacnfv<+K~qxYѯ'igT;o]LWGG] ~ܝע&f?X.$ƺ&aѹ8x(enK.〿ֶ98OԀ3RЧgVyMD%%[Ҩ dԞdR\%0$0xccꇽ(~!Y-ݳj1Xї?Oϧ~6oZRʓ'|?Zjݝ*((1ש:;m3jKd ǂ)r9eN9w9'񫐱\ -ܘs\n<`ޡr I:d`Tv ӟio9'i2'#1O֫H9[r }*HԌ<4-Į]8#jIZ3$I=equoL0԰zPi5ŋrW7AZMl# m#1݌Z^,2\ s(+^f]^#g8,-y`kcMe+T nsٚ5~V pVaSˍ2VeW\0B>zւإ ì#'\ Z[2G np}j`sw~iWW%E( fwT6~2Ԓ:)UH.\};ckBm#jӢ3jծ%E*99 r1u4 +J=? Q\̻h&e[M.`o#F0ϷsFk-XcDiӔ%{r;NU)Y|qu~UnW22^OlxV5WqzGb?~u> 헲 vqƼU𮫦i04j69e t7SMH47@iSTJIOIQ'EM";wfgɌһ>\\51J~%G{w8kYDBm\ V1##84%$dnH猃^DuM8n:A")A;u`2yS[v!x#ҙ#2$& HA92@$*KagcN>V6ۦ{zj ^C77MjRD7Q7NG\cP[ pA98郎~79Uܞ}i}P$I9{PsK^A`(9R=;G$h9SQ0}jd$TpjAA:VKs*T`22ӁJ.[N0K~WmuǁĶrMr->5 us:jl8p9 jq}gOP׉!6#\@j2E":)r@57 YٵwnB2N#!@ԟgMз7,@[nĬ7]'+aKթku "bV$;˔#f q$5%)#9} P*Ŋ*OԤRr*e桫X㪂 mXE$ )r9ڱ/ ;`{VZ$[ɝ?Wfb;F+T5+:Pj]X:ލwy-Ѷ]MJ`py1vW_%=7p*墽 %Iҳ+]>ESx|%sthˊ)d3pol}? W:g{dWZfONBA],nĂj̪|:Wˤiv0'1ڼ\4Ѹ|F+i;+эcL[~}Qתxvqk$9?+ÊyMr, 2>n@m+-{CF[ζ# ! T̬ak3ƴۋx}V}0H._ p?v3uuWH y29Xsg9yлn4m YYP[PGyn,>dn=9.G(Χ6qiҒ0$09⼛P1HuYdd96w=B9;|ZVHGͼzkѼ3Wlr`~}kg-<*:jIEyֲb_kWÒsH [UC囡ooΫcH#gIŨM /B¼moÒ&8=1D_MM9T|ܒk&$eLg=y1׭Z?&~Rlq؊o#ZisϨ#:^8 d?p@5ɰ"A5N[$J~栔{ֱnq~N89׊nfƐv~T?*"^?ƫAEvcdM!c҂ i~a9zALL4ĎOąN[* x<⥉#t) ;Ҥ' w,H#iX7sn C/9j" 8=2*-ēJ z{``~:8&!֬dZD͒? 9Gz<޳ a` mP2jO\?÷ GqN:V&fb1P~n9)j&}?Q+Mr@$9/b9/J:\g0v6HgzWYHb0݂qRE՚3*" =OMgd`pZ.Ɠr#d2/bbRiM*r}{犵!p#1.UKvaW>tm?֦Ŧg{#rO41PNÀE,i33U+qǾ %Y[*}+TK,A%s3n;+BAeYmlhǘꄅ'd{ťխ.I9xd}G< am}6h3g tn_|7֬w)ֺ]3-(٣mrJ:S1P!|K,RrH з +OdknvF ׮1NuR'ÿ^iQ[>0Ox5z\W^6L}|0BNTܿ/[\IyYQt u^Y!T$)X҉jK'uulpWwtSЬHp>ұonc)$S&U:_ҍ]+T ϘA8tlsU"liYRR:%P{jvApB '8ֺw= K#M4-+XsӱV4f+mYcOX#jCKvJ,3'Z3峊3yqgHXѯ#V3/gG3VXn$rߗt3gOzh'GhH@vUM| LDdw֎絵F2GʺC2mu=feץuDĈ  猁׊ S9&Iby};1 qSOj'<MW :cd(NHYxҷ+gIҧ2?ar@QqSB+0H?ʠc95E"p9&\䁎0zT#lr2sEq8p3L11W!pH } & Nb:;ԃ#sn=Oӭ!F$rq'q}M46QIl 2ެC {搙lp.a$s*O"Ono-~ȚC@<sYDM/P. Un '\? &:gD\c9.0RT b z4٬Kvy:*9[;񪃾j/,0>Yی=6h2)F2YR,wˈ]`rx Г4!Ťb0JoZ<+{PXЗ~s\{v:ۛe@FI`}0}? ,+3xT&+xc[K O܌FwmNHvͱCp1-!A""q/4*x/r=E=2F%{syGi'];osn&HwM#;F8Atc{ dy##ަrU5gi>yu[ў8u^ޮL~V.W:#_oZF-g]?' ܇ៃda9x.$?B~F }tb/TkaK^%ͷ:dDL;߸JQr·tp B=끼Ւ`]A#?ur- J۾%l#9ֽ1D ynzjEK 4+%gto`st͵hn8?Dyģ1qRfBcv+1mۗ[-tCT֥l$("[U&Tw.h:"WaoH],rfG9#>vxXaf͟/ZZEȒ;>N2wy&dʧJ\.iqr&x~9v˭ {@'4ԞPrsg5XBxҐlRԙ$U|ζUN2Ʀlֶ!1b&NI$@y=*er14֥g=1ӎ)Ҩ:1'2u:(-gT&'皞\JC4!`T,~ҥCdcsQ2Ox)1=6?D{L5Hl!ʐ7g9皉}:sy@JsVal8JLLӵo0O ^F=:֑"Cn~Sx{4I LԲ_jH} —VSf|ݮF N~Jz/Fz{; GYb# XŞMϨYr4.Aryһ;Ue/`h^NAc,Mk?,end,ߞk'̖E##+ SXm71]V:JVE^: OMK0H~kZ(t!fǜdb3fԧ\{o* ,O'p~*]$VSd} 9ǯ.+W0Ci_]H#!O>xwú?:5Dv.SJO-M&u=YwWo1ZG"Ҳ3%#wW'! rp{s3#^J|PFխenè>m ytٜzckeHɴM꒻v2g56e ,P qKȥ=FmÛY[9Rǎ;hE≬EnNi8إ+\x[&g0͓Ā?ƹ)Y0i#ҼajWzҮr ? 68Sle&H9S}MM77M3,O%[rH= rϖ4X ~ZyDR_yd&Un :~<浸`pu$㞆ۃqs@9'#1l 7TvX^Lg#)ح!cTlOChr$p>TWqX8㧧z)@l8܌`ӊ~`A9%zbXcrQ\p9)lF*mGGqOV#at WO=bLkK*3)}u"ZH7 1zʢ孌xrSC)Ty-ӽ ;KH #=Uh#@5&H.@.S6tO(î?Ͻ*Ag#FVVJfqpfnNs\vfr!grHAU#5rDƊ(mo'/֤gQ 3ڱhɊP>U9TIpHdp ۹!©<7kԘd0=KԮJ Q9Z|8Rz]$r?ZƷ]^k A/p8 +h3@Ĩ>rgֹ{ ҌmgE.XCc>e20G#q;b_<74zzޞs\mu'1WihFKƤ"3Z"=I+9f@I|21NOctJfX@7{p3qҲn^4ey\AZ"|I#YlSaU1mΆ2s+|==k6=K{&X*# =ϵe٭oD뒻L~U#: ]DC>dBdl1+ZmD= l$Vs-C hR;XIÆ j3pFF#'?SV;ЙzV2##Lx9J0I@Lp"8?sVzWuRPIaC!o^;ӒfH89i6P$P Nk;2^4~n,Ϳ;r@S:ݓ)I&7zҶR<_YүU.S9{zפ|!ev]N(,4r.0G+4oCծdm ;KM!Wte 34;dmꞇ5oV.9VJ#O8juơ#sI@w+O"Ĥ1d\y X~~8;I E^\%g8ʟ5b`;92+U7[R+u1 #Tu oe=#OP43d/}ZFdڌ0OU-ng,Wm?{bw i_O`{{FKK%QӐzNMQ\rB񶌞LV hĢn#r͏\cgKS$$CU@bs=M`-icˍ5_5Q$b'z:W-HGd-X˝ fx_aL.9XG9]u<+|r?Zm5KPemAOcGEޙClszTė/qz5O cW3}A$9eQ*l$V ɎN_hڣ[Ved՘ȼzc7Cj1̃c2c~OˎJlsG08lZ<{X1olf+ʸ9 隉'95!&Ӵ A>rzNB<8oEw<n8P=[ovoJGKqպYH5wzz]r#'IBfnu#C#I6N=C-Tc+x;~q yMol>w$Onՙ{Ԣ\eUҶLkN)hÄ!rr}r+c<2)JŚбT.A;YKerTn2F;Z暻:MEAexaf>sIP¬6^RqwG;v]( re08#?ɮ_ Yv<6֑\#M]U/,dyc`Q#[ICy[h,8uݝ7`Yȳ?P\N7&3󬥷\Vݹo'קSr,"$j5Ye;#jۑxHoPHÖ+˸L d ]eQF ꡑR)v;Jtg|m9bq$LǮ1^ :+H '#,3J"b2CFbM  :8'1XVPvN)#JabKhUP ta-hf7F:fe002GC؋y?R31B;q12UaG<ɞ9U0!ltZo I=h!s&p2NJh3t20GL:$.@6{9Ӕq3KebF?NK3OzT,$I 6;TL5`UVV4Ds9KV?iG;V:i\A=qmglGpMV ZbnnՈmnF=(j_[|Um=,%nNz3SK{pO(rz,lZWs8Nd)cIdȸVh&sqHPT1u?+]i"IF1R:aMξզRQ'hw<2@(֡.:} DB }#$fkhAג_H~X=yW`]/eZ"1ʰTҩ 8@ .GC=F |`=Jѱ:&ټWj4rN{WA۵´hF`ԔE[H ddr3RB(I 'sI"hax LY@t' qՓ4E{W%I8$sQZ[F B+ QqMgh#Yb\lb;/݌R4yo˅kt[T;yv zCdTD8w f˻V6`bq2xs&Y:MhpcbWi玽+3X}Kc+9f,Zu";~ DY2=p5DUJxQސ :SU3T!dzQ!~+q3銮l cH 5uR.spJR4C{BN`݌ҡrCq cLNZ(`ulgMʍ/^ksqVgp>^2(uV\?)lOL{HA(2Ee̿1ڰ{6ɫvT68=k!/!u Z:19 'Zz:PzոdF$w:佊ia>nL޷rXsH5Hάx?*lgMm.ŊrFԓ)1b'q!HFr=y[1Vpkk5 {iO||ˎx==3GӖOҾFߙm: =!݃$M|&fuqjtw2r=Rw1}J3_q䏛r=|ʻn6Nyd(ˑ9ײF))Dlni]J[uԥ nvc>ܚס9]FHJ9iqT<2qfc*G~_Zd[SysjLEeOpc^X T49͋XZ sy)b^ƽmh\lA]|-o;^mMϝNrPxS!c:TȨo+[+ f͑ߵIu'iE+ b$YLOvQL qF=Y/̥NJX3=JG4f!nE' zз/9?-d:Ojn9PSpϵ4 aǽ7j$bayestestR/man/figures/bayesianMaster.jpg0000644000175000017500000026753314133140641020436 0ustar nileshnileshJFIF``ZExifMM*JQQQC      C  " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?i>t.̶w=N+ </ZS\VXrDp)eH JoԾ[\5?Z_MupӐY~R512:_ ]嶇O+3";E Eq_ <=K/ln߈d ?zb<\2ċ2Cuùmi:I# tBLxǗGRMc;oc2Gg{ gɻ>;ǵp#,#I>{=iRڥ}v<0 :goY]m7iҪ wS R8IG+ai:֫cy̷~MYL9Rq\n]4{ɯt^5-u(Lx+Kcu l൱k4ᱜn  4}~HCʆN/KP2 IO _YְqbDV`CG<|Q1\C#m˓CuK@e=h3E'˕y9FN3Z:ϣӤqyE!I7,Lﱽzf/?|;M/K5/,0)ܐ#s%V> cH7?AnHpMi> !v\[]ጱ^ּUI=u k_qsٸC<{Nn;G |W}CkH1ῂ;xmM06ڟ;u#BH٪)e̛NAQ5kFos~"/uXŭ¡K{pyORk{2XDR^gP3n־.|,Ef5nn ;b Aֹ߆> ktQv[Kxp!:GSF;Ig; hseiDzRbÁ+5~ ˨"4wnM:=OZ>(tMIºm̱KnB1 X8x'5χ淉ȼފF qz&kZw|Έmn76˒pXc{:O5,b?R]O3:~?g_Vha[`'2}R>:t5xw9&[ )霊{9]ȦϻuWnW><ƹoxj~c$-o~V1ھtM&eq]Zci^ѤUq\Z_qyq*=x=pk&5=+6f8h_)lTw k6h1aHWVu ]BO[=m߷|_z XLXd̿d?688rOY>ӡ q=z#\4mYwhq,sV']wCnq}߉#;34't}b0X>s,y݌t=/3 :ȒLۘP1on*nlx~-տbI$eA?G5o(mS T*OkAy{aDnl7Кy~:lhل\L>dY  {b'uR^$Z?.5MkM xT`s+ܮ~>A<MbhT*Ĥ,kjRgt*$|i!𽎗g/˗qzmw/&FV1Y6$W>_hFٛooG^ZkC ?)1:hO'ψ'1{eIlWFcU FV>=kF{cj3H&Y!xokKI"gi,xNbZN} ]f_FS_~ [}3.61~=%K!YaU'w^G޸_XxٵmDWC!e^<'IY.Ƽ+²=0qGlD*$Qȱ~U#i=r}j[! -VK,~bq^k}WPXJm3kz3_g쭪beƻqmsn͋]Z+I.=?SI4X_D߾2@wa2 F*2q>àַdFx+ ¼tzOx!/*Ks_Bm~%^Co)cna|~_Ocyo[y̒كݱZWqW>y>Z_}lb?oJ{QeEж݄4 cm~xpC mYebw^=sW>#|O SX4"1:v˚#׼voJGC_M+AmlZM'Z ,WQXe}1Nk6׼n̷ .o|¿xK:l2,wr+%T'k-W\n.>ۣݴY9#ВG+/'/;~!ѼF12޸ `ykὟ4X4H5]|aP@<\z/4eH pPq^ZWIvg75 -dy,`Xd⼧?=/6(/4[yG:@+j_ ~ռ)so<ږ˼-.#<xz޷i ]/o_*֮yνEnm/iD- t_چjjWZ7ZЉ&8; {:4 _2!X885>0<%sIu[yOO派U=9,X?_G)7~*o mur=6[F8b@ @ggំ*O][T5rsvך|? _4VroMܲF'R v?K\Э.ooQ qi,\aDd⟅^E|Eak9)+$^ _ ~?Y0yrɌ.ubONk/ۭ bOkw\&=8Mv;~K]b`?+ts p#=bg/~Cc,fwvj,ln#_>vMszt,oCjgrћcbW w+ xz6:䜶8?Zs ]Oݗ˄ci| -gDfvlڜYjz=6h\RHޕ?0=A:Mji1cmgT`&hX+g_H|qzW_n8WAn;$1Dvը4//TnʙU^3JڼZ46Ͷ8Q#L{޼WGD#+hFjtۊ Pד޾wNxsx7'kvtC>1񮭤'V kȕQΘ޾kG|IYXG(kq9=ץs4>g|2am'TTm^@lJ`IWxkZֳᗗ\UOٴ9G,ǚ >~#Dш&,6p1+G}B?NŤ;zrjyO~'x)XMCm$"rqAFs+OCk'pS1]y#91u2In|6K6Ess1*[ϧZYr;?z^q_g.lE$=+>xG$ԱO5НUz}>|tumVHtKv>dŝY9n->/|Uɏzai DEejqq9%A#|Iuִ? i ;cqq휃Ҿ_Z}wh|JO>8xÚ}9C=ądq9, isƇTk7Wz-R+K,#D"'Fo~&4>+ 7Y4G@I‡\n)%l2?w[id0v8bEza3Io -©=?R5oYɨhqr8MČle=3ڲhzfmOot?c'-K3g98s[(roԙlSM[&fh.L?L|_ζ~WRnb?zc4GF~v+סXoUz:fk]p9 XJ) _ǩ>׸%>-@H`AҽG[ٖɬMƭ͵ģ=G]7~8^#f9vK9R=N7 {c0B?ZH07[?-^<*~_oTLڍM0SOB8*siO-=ǫh- q0fӘI J֛on-~Ie*7px,?SeO> S,֌r1!<gug0ڝÞQEJd/}&iw>W\¥ȝOLS_\O؟]K?}*ǩi;$&6Ak^ǬmL m9֭)Z|tS$GCaS;2 Gjv6zrq GIk?g+΃mial-uRrxTwr,{Opm.b!sjd/ xe曤[cY"`:|zRVkm#}7lH[v8m\6 $ģ19>94:k+;>RG#8#koW/| '^/QG6:CvC3ZFFR$;|,E׬n<>(VwZ:,n!ݍ?+Ҽ7M-4&<Ȑ0E>?|TywMq hp:v4L̲\`,yt3 3"Ly&5(M,,mcs7 $uH<.("kZQB r9I$˃Tԧ+)ϛD|ch=7/ŗø|?mΖN<{_Qxk+Cu!-4>%¿t d:pclАAd0ןƹ2teUhΈ^Ukw=p`“sު |aU9c#U;9?_Z|BJB޸/ xXm{?/rXL>qP9Qi'~82oTQx&J-sO4Ii}`ջ7tk9i-|Cִ ۤ,6{s9^WYߊ^-%DExaڼGIyjڍhY] 6!1kCOM /nKAΟ-J?ʱlR\}CS|ԗ̱YM}fx*AV㢜dkľ>'VԱcGˍ%05 ]S]hCuߎ?zolT ه4J[&<-mJVs9bzǷ}m1ofE@6{ʾ 5ZڏAou+LmfnNr>5om*_HԦܗX'z+L='} [<j_Z$R`1r=뵷G{1Wcr<mFtW46e6h*{SR]?Z08# Ћ6n-|IsZ⹐iM"|q|+jWL3(G"Tr5Gq㫆}Z8u"L8NB83ጓZW F4黟͜G2խeԑE*;I򯊿Z{8p<bw4pκu똯!ɗiێ;Gcqq6+.q~olWi?5Ư}6+ZI m/LOk[l4[AyfktXǹ~Wש4. cg=F_Z ?Eq1@.RhYkybᴗ?v%>cÿ-$\k5B@d*Iہ߀=Mz:лG՚2dƝEIJ҅m?R|E%޽MKRq[Kf!h~¨D瀼&%T&pLJ-nok {~#-iSN@WV ڞqݍ^Zba :t#7bϻ4ږ6S0  ?#oifZ`̑C|\0:\ZwmV_2X6\Hޗ V[&.S<q5(yI-=SZ7E sǒV@{bxG\6èBCh@=z]Jho:FSFq*: 'u!xY2SЇ,|A*_Mk͎IT;V6տm΢?;|ۻ/ȏ[Ieq۔H|Rx:K 1a%eKR`o8iʎ^f~]&=f2h\c#ZѬnc-|cNԠn|h7n֪#: =jV4/ bIԳu?iiQ:~\^ylHR$ҶHla}5^-,5+dv)"}{?34#jqUƐQ]nMvH=G_ZLSKkEԥd$cg?]Qqne+(8ٲjFWO Z|B{;t,Py~ejg^2򉦳Q 6Fp61}&ɁVe^CsOƨ^闷ɎA_>r]G+_Gh0YcT׈\Vak30|3~[x^$.Hle3M!AtFbZW.4YE q;?QUƶ Fk۲-o7y#o&'f@vnwc޳hĨr=#kgII|>jz[tmĸހU.k+SI^[61ԱpG~ SVSސ;WU:%Ѥ40:^{7Xu SEN:PKce0=G|_59Y+*QG9Zmτ.&7->;1h!m-FGfx^ݼ'\s©D5];ԣKo鱈Y#UG<7c&M&)Aklj,-BO7x+E1K> -t=i3&5i>&mSZu *aY!=1ڎ*RxV#<{W Pmo6ryjNz ҺzĞ.eփ 2-5'O*Qec~U/&^m6K?Vk!X%*g<sN[ŝ$Fsd`|m|P7fs՝Jr}b$?b>_cx:}dР}ק{/Pe]7iwy\z5Qq%l|E(,7Y<J}cW i?nM<3HݼKU cOx*9fpGGWy{%Ēǟ(]xwVA>eƝibOd SûVMt=eV~ҽ9]n4OEq8\*>wпobЬfϗ5_P@s~7ױ#xvL2X xú?Kl-tv["cGlI>?hV~(mRoEKg-21s횊nɗ7f߁ Ի"G$H[xj|9Q![/º~8\1dsֽJUzu}+Evp֓[G<7٦S .jKzoW9|$W]nsBtO)q}!Ww`v=MM6>Ob?ʾy~?hIij79;O@k:ʅ7&~FZ6Q5˝:NUXѭS314c'7Y܋ Ct>|ے(8 9-w}a>-cqu7Ryu\\I$ܳFKE#h~tmm#} @ׯ5}+|c&o$m\ rM}9ߏ? |#c][-"Ҽž5d}>a$< |Gi^֬"q4 碞p}A5Ӄ )萻ݽ#)L|GDx}RcXC1\ug"ՠUlQK>C`m5y?mM+PY_bicxŏp=p*R;3fz5e?E}ƹG !ylo)3*[Z #7c}5 ڠ4yktG -֪L7:i'lM*qoUuK׷qJ-"#34lïoJYYnn%U\ӟZAEE\9;MHDbIb_xIYS$~\<JFnj*?ԯ;XT x}ֆ{ 2j L0zJ;oZ`U+zQ{4jrA8ђORo4rm#W&ҮnUk%36iYq6_ƒu$EMw!cv,5+9#Y{ 2zϿ~ Z]der\^XG,Փyc^Z?D2giM|F1?n4w [mv~,*&/hmGƮW$*r@_~?P`y2z׍v>'|՚%EAé?ʼ*;NZ3Mѿ ${Acg» E7'v _QX5 Jhv[LW& kr]\yg<è>_.ůº֑|EᅼXu붹#9I68k ]NJ qW?Fd=+*Cֽ/ux]nɒMxK>m.Yۗu+Ycex:gp} 5Gήx&-;<9[$LK1+YZ4]u;9uř} Y\q!,wBKkjuYЈ\}RBgҼ_F:ⰴi$canrF9>[4:)zu"L_TxdT z*9w1jtm^?Eod- ֲGAhیS J0|M9SӼaxIu L4񃎡I ~N\Fԭͪ5+=GOZ|f:Z!˦}Qֺ79l8|aON[+;gVS5*nxi$DYqw>E;| |6}"KgCc`=3\[φxMVⴋWfkyQ^Hw#}_ i"$ul˵Ԏx|Tdg}coV;ŗ\%mtS*rX+1Vu/~˫V] ,; gL;q3YjZlj[ *G!sهj, /iUg%V es<4ZwZx:BY-Gڟ`,jx{Ni? ]w^3ǧs}LFkvm%lq2qdV7igðyOҪڭ 8NMXGSx&;uT118AVY8V~vF,R7d ׯ5ϋ|{j6Bqc=ǽ/NMgVVY@ѴjNVMƚ#/5V2)V[L:hK6dn9I3rx?hmB|Igtegb jE,Z-3n62LF[u..;{RM's״{;;n 1OLs\oDe{~ O,M%w+w4Hd M3V垣v#I1:8ԮdfD]jhEMrиrzd|W=- $VTm w:!qh>`YUjkq%rI$dypT``Zp8)~#/$֢m67$Hg>gq\~8\Ǖ#JMR]A"hslv*E֎H!oz}Bѭ,F621Y|Ƹa H#wZXO_<42Cڸ͋2$8!94.Xlme%ZR\){s\M-Z$g+1jTOCXSv*Ó^s)F}ȯ/Hҭ1M z׵*MDIoqK 0-o4{T+ ,odHF3khG(mLm\W5Z-mo %SjG'~z\0D+]Y c=Mk/&98gǻzַe5u~xAZf4ְԈ/&-jH1I_U?GR^hXw۹ 4}%Dyl5vH9D ϘӸjգQ8"݄elc,HzI$G 6&L}3D#W̯5ȇvV*9sڼrJFѶP]{|O?M"Y6OZE/XwxkO1čʏNzW;pZiM=\7yO+xE_n=_M$j K`AՎ:WhIq*̄ s±%Ӌ%#u2z[ǂP^X{C6o$|vڬPEZ̩) c{<fI_Z*&n1հ1Ҵ;7KԈ&ːىzt8c'\dmt{;IHqv{'v7Rj:^gYY-6g?w>1޺Oz9*܌끌{W|xu?TB!VK~az9=9Uw_ kMR]YͦYWNQ+ӴOxBG"dҾeSþ4Duahau1"ϘZ|2MkK Gkf%ݴeD:`c)&;I qcqcK4<컰A>$>hKmVnc$rnf$-sj:cu"̰7=~v#h/+m{vHj?o?<3e Ah=[z>6]X$x?_S].;6TQd>GNi0ZmZXn G:H>*ywgſ=ƕ֌42(}7rb3]gx^(K[:if@qd_7&eֱ9b-Ml?{^W36Gs[ÎOG#xv>C?a Mkw[:7:lz~[,Wbך}RMi~{Է*ƙߑ^uT:MnEP95|&D64/xuu ۩f-i60Ec!51܋$vct#s\oqV\^!HI]+o\l??!ՆN3iF㱑M:k=Zo2Op?}w62-t=(n..#wvo=ϧz8I,o4xgt5wo7}sřvv5OݎJOǫ|E{OǢikp@XՉ<]>:mWEҳ8'CJizȤZ ('𯑴}75itvYLlR8q}܊jq};8u~񵾓%_[I$b;l֓z-XkpLK9^/xRּk9%33?- _ӍW_Y}&WwO,p}+qzp?ϥ:ok|-$Ui<1#IP9?xgq֋]MeK}EuXN~xD4֤ chɸ[m-o(u;XUjƌ6XFb `m91Upکw-x٭I".U9j^5=I\fq\;}FdEn<\2{M #B mKYME&v*><Դis dR&9d r-Oz[zd0"pG{WtP#5Yzb,Q.d cL-;X[hVpӏf9'I!.@eJ*lj4vXdw+\2[A4Z'AuoqG=6+0dXwF뼞qS^mV3"l==-mި듴0ѶxsX^9>륎WfG9l<_Fa8E q qW>zmxvMjC8 sL4i<1kgkGtWR)Puy91ϖ|pvMxGm3JA>lĞ=)#ӧQ(} ņxUdȳW_3_ߵ~5犼;ak]C:ᗇ$FH{{#o%R%AgZŋD0+,ӅIcV ωV0~-7 <_BS~k uB$E>+$TiG XWjG|Skosq|@%-K`k$O\5+2LvT9Qz8nk8r!X9?Q~y ~xSK` s l51FRzuQZRAxDNPO). ѫcG?e-55EmOB&4+Y\Dzۿ5qz剸DPw6ȭ+/ t<6|s\]XHP|h⾱n_<;>< ]\ʱciU9ߊ !ߜy|l{#o|W7m& zs"O|W:moW jv/m|3f|Ei#iɜ "*ꊌ2Ig kLe,%3yˋ'J僥rI`u#iSZ_ 菪D ǓXnhSS*}A5 hL6i9,9SSg_oUɧx彗qZ4&x[iVi'u$n4ۻ[;.tV O;xrO ]1'žkcXt/21I^kz XQewe9sW+ٜΓw]I/-`)`ОַZEHѲز) -7##j@zsg[j4y8ܰ'}¹7]KfkxpY & KV_: +|%5]xKZTAo"@DDlWJXc/:=6Ѿb!!^XghYVuk[Ki-e>b޹ұ(#Qɠ\G`f `7c߁\%si Kng~viFZїJՑ[{g]=U_43H3!]$=mFm{is$j1zuuptMvq6VEꮛ3mP ݕk?Ʋc 883NYFsljxPMԯ. +JѲ][|P5{21-nux6eso r kxkT|_IIgy m1gO/z }OfM-|jh +fQp>t)2G#)9kZڃYIw`̶krΣ*p*]]Cit]JM_VYT! +=sk4o`8& j1i2c.95QM%sn;[pvpIyjwAZZ54*όH2jz|w2,縮|W_b 3ϭU5MKx`!۰rO՝_+t|GFၤj3H,ǯ[jU~o-tC⣰[3%²62`7WW'ɺ$ϰrcץD.eG3#kcRGeLH.0@QWklksHCMKV\״Ck"gp{潊AL63/ jx jȟ1;t.~dv0l+|qυo##̘$>ݫ/ZK+ȮdzV$X Gu}{߇}wZ\y'o^;Ȟ5ԮT:(9kdp9Y'g_pQknKO t x5FC["o 3^ٚit.W\IO$cwlC/ "]:u4AX|I# I>Iu /VG"ZQXϯY%>`^]vM('<-xJ;>myy2105&- 5$YtZ)I?? T50@q\^w|`/ ֖+sׂr<|vfK`ʊJ :$g |2xSd'r> kE΍G?|1L;$R=$Z0^6>% ['P:qJ7yrwknl c$ 27Lt+ IHYn#oFⸯ,-]q8=}q{ȭa[p/e,1<Qk/nv}t1yLx*I+>RG{m5׊ 16#7xþ^jRL"Dqq0]CEI|wn~boߚ|MqFܢ5j7\K$ eO@۵b+k[9-rIʔȐCҺ{Ndž|I;Gf,+#zW }ku o2H>ξq2w4!2c?JKGMol|S=RIԣ +1/O=(((.fSX|#+_-"vMS^]R$>uLYWy7|zqX_Ŀjfqu!//Dƛ[,hm`'܌Ҿn19YXlz-ſjҙu-WͶ9F2GZDO<Wp \}_^Gu'F02Zuڀ:=}iZg@Z W2=q^-dJ2WZi걶.XĐ`Ou󧊾>9Z ͤ ېQX'9c 3 TrF,cy>"`tͲ GE-[ {wo*>4gI;Smiy uFQzJO:Lq`>ǧom9u?ZwrG F>|J -JK嬍{^^M:fFV&?4I3ѵ~K$Ia+D}KY<ۍ>?+> ћ=]Xpß㫭FtkLS0 ù"oqM #!ަ-W<F}[͙>tADlH0>#j^%@ dL,GJ YG-FAS~񧈼iX "`?=KqWҹ%hZtXRʻz {OB|+4~|Bg_O}kvڎs\_-a"^I|kیr9#ӏZܜ5iS1u0c4U>#/6hS?Ljlm$ʤ-ف$`r#G՝3X\.ͱN ﷎9|D]hrGu0Y6q_Z]EvY@fps,֒<}9e]揭]ə?3oۛ^{54C_sg**Ӹ3 jwK"I4cxAJz]?Sɦ}5~K_iw|ֳ Dg=lcһ|c~ Mukatwo'Xps|[^|M NwĚ_5F0qҾx6ֳx_GM.6}֣h#W%hJv:&ާ|t;2z s<9u+ų_G. rú&Xcr/TyNw'Im[YMl;rI潚^GqVc2xa܁ںȴc8+V<~ŽSœ>V3^[hG]9%Q!3+"Uh*z.SKxNj #i6Z5LMks;?uZ&ZkW2#]EبvPXSk^&uk"k_11#t>ح-c 8f1}:ڲ|M;^Zvbr:;u5_X)Etٔ*f֧2cΆH5*wcOz,7z$ֶ-yncm`ḳ_ [g .zqv7-'AXfKE21}Q9SؙψhL拨ZHVܫ(?Zƾѧ,fܫDdH##?sE47$y]ݼo`=W/xroa[h6Wq-v8tվ {Id6l ,oؽ#kֵ]K]Mmu=v\V(, +:׍g ֛#!vxã$;\rZFl.kHKn,`@!V# t;`k^i@#|;,&=VP6ʖc}kOM.r[G2ÍpF@jį%2G/P9 ;+*Fgmmm۝>rĦ1K9q\Kx-f;.,ypϾx\_>#kcC72MqA5o(C-6s4^\|1tROc|91Y ),ǁU7+;{|_ryz᳏_Png]J!yXx#Ԑ~uҼ%Ke"Kx RY^NTӍCqut&dyn,13au/iJ:< y;x '?&=ǵVLO߾$r~R>ئ_u&,=rq}O7U%?B"է{7qc*ϭuZxZV!Mnfb>Q޹_Ik[}Pm4%`|EPg+/Ccdz4踫455[T2 A=A)uE~*ͪj^vl4y/$wX!=.xˆXGGJ{߱xKɬe+,jD$܀8:TѕJƤt{\^_4$ĥSP!Sqo8a0?S]V Yi=0f=s޹/ŭyXcjp0qWL#ˡnNKF9$!3XWvRi/ΡuD>\lub܂Ao{{կnm;ĖQmv(O,~Iry`?$2G$H][iouHq[j7s'ó^^I2(TW'#gOmʷ2ۇx{W1o=8<".fSwXHÅ'<:u Hh9b5r=V\S,#30 &Yw-s9r93;J}`:~RsotPh}A]wn$\W-}nQ%<$?^+KBٙrڿ|oa+ZP˧F͆{V4emugw,m( qwdžtu-u-h 4}}+VkE;1LzWL|1}d/-~. @8EgZd)$K"K`ܪo1?u~58TIC|ۭ~x{{VɚV׼JE{oY#g^'?GKtl(_ߌ<+TR}pAh-Y K[[; W^iPf~T2Zwo˺e۷|8Һ-7_U'hw xQSWM-O2z _1a^6[8k+ɵa]TOVlUn|Kڭd| 3:7جohYgh-\,%AӁJgu}&9IB[uJỆrɷ见TuZ۟X%_O9|mº[~$I躁9kUv_2)"3Ho|}G~ kƟ~64Hl5յKA#Wf9w$hFH;0 '_k=I!_N xOKσ}zXu +471#+u=eN啴>%!B*X9R\-)8s(N׳iI߅OƏ_R_z|Zё*Dac?U;۟ _ mLԴxxedɏ0_&Av W?T#ޥ&7~6n$P^k-$-|GQgwRd ?Ş??>w[ W76N%?8<=kQ5z}]JM+˕6(5o~-·ToFէxe~3㹾xTem$WkgC(eO}m$z0=y߁|ѵ( YC=w6+')c+G|yG跀h?PvH F]&%ޏg%N$$eWÒUYgY-_˘l56iQ\G<|ס̤n#-mU- [o\4YlZÈ|0nQz18~$E  DFv }ձ'eY&SVǵ}9'<)EN,ƙ$+D6/k. sӞAt_<XԵoGMRmEm8f6< tW]i"|HŃA8b?]t>N;Z;X>7B=Rl-n5R$U#yƏ OƇ2Jz4|aMA7ȲS:4'wq}kNjuFM86(ʹy@9h+I_\5EdVq=?:+O:'YMKV\NcW 5>fLvm ;mQҢFqYG' ?] %|KSB̲֮kK]s'z}Wti1j nBsZƥQg!xhGrX=+oJLvw,i$S!P1U$fQVotE- p«*H0>d7W%_M,1ITbV4HdT n 3/p:IoN/Mn25$޽YK? 7*H<y/JhAg\n<r3`]|D{ˆ^c't'$ ՛)@zv ?m w$8$8ֵV1{} 0[wV\7Π<2G+${ϏVO1"Ok}M~Q~i7}> [ M>gW^kͩku/LK線gಕ#%>m!;r#*yqI 8\'¯⯄MBK]VMÜd=> (=m9&b:Rvw7ecZb 2,\ާKjCsZM{v []jn,%BiI _[~;BVP̊P<>ƳL/GOFVUh2̢F8gi,1A8d|Liiim4KۋVb+vCG[^u[}M6㒮'=$ iNE\d} 7wY$! =*< jT+k ̀Oќz$c2+THOm$k*ww<nu?7+k$#{ocW[hR96%#,дJ6ĝHQ[5dKp~TuJn/T"M;=w-m\4zcC_IyhVVHx!~ns\_Fc%g^Izڷ,qϱ Ud{־. :~)Jbn^3YJ/|)X"r?6FELdO_~:-Kr?{% t|_ R!Ґ0G`6׽hMgۨZ-*ht + d82'OQ֠灔~*ɽ=5orEN8JϨ펕3t5ᶞ_!_:B!&$ZӮL\;%~~jn'I<1kᏋ.4]MukypAe$r ~sSɪG{v׉i|)ˆR歇m%7KevJ2m{E 6п$~7H/s>c܃*p}GI|2Լ-ZZ]ͅ'̽ y/N]|5-wTy}#摤\gۘ),K#[6=ҾZx*wOfLc2n]M¤]+4⻭V!ncmRNk"o Cus$#P20\gk/5k°Ǽ*[u")q^)vK#8IhC^qҨj3x7J;Vfq.\;.<7/f1ݘb?ƩfL;cÕ85JJ<Ş,Pcnh Gj 4gLoaehgv1gGi'-,oF As?5>9YĞ #6񏔊 v>#T Ē{{h$n mGC2ڃ67*y'N~k?k[KѲ VžNGΟh]h,-L6O<\4M9n,u*K]gb&TR0r={t۶ ѻ2Pϓ'ں wP^kB݌'} 1Q#ە9 9L*Sq>7um^2F 3l ۞:W~/xE ,RAkqv#Iou6%K[}ZwݎUPrȧ 3ǭykK;[9e3PU10JG)W~(CKf 0y55tGbDZ5,m5w!a#x5M=KUK}<z Ԡ9mcϼ3=YkƋ7|;T[&f '8ּw^+$77YyX8_STo4;1o3İ>'n[[}ƻGxnTl}rC/9׻~ο.fеRK/vZܟ <k4zf/D# 6Կnӭ煣c,IHy=x8$<0M\I6QT8߅v?=ae^Xy|pHx ax/id~zh2yqjT|a.A;籯4'twEh8l 6s5#V-͉3Jf *1^?|p\EuFi%y\\,(f ~NҖx/S^ӭVlO'r^?!IuZhn(2AKQWu$d +8%$㑕y\c'[7}/ #EE@,[d;1]oäw !BJ,9fK{{?%ՌMБG\bgo)n4Hf-pzyJU}:X<; mМ4 )=}4'lwﮣ7~ ԭZPLq`lWx?/,(u$l, Xn͟|=7,R0{֗_ <5JD›8y*xq$:ۦ)P1yӯz ;cVKvx]ʌ,NvҔg>C4WX/[]kKYcҊϜەDjZ7cP|ókkCLjLk#[#|'sUBK$i_n"e-M\,R`IbBߧyo~GL];Iw;a,<đǖz|_Y]@Z,"޼I|M5OZ%} [2[űcp=wkC*wLM6yզ`+(=1ɯS(QLxPkм2u  co3@dw~t7>Rc̬WDOiFψ=7t>yiW")mFc 99#՚t'ֹ/^LLXME\ڃUSIԯ^ee~θXJð԰il#Vy;5=oLi]ҩ$ j6ծ/8S%v^=%+Z4.˞7A1]E|'7f{h ^/oKz?j?ٓEkvgmR4QskLgbAiַR7 ^?2 Zfy62B1֝\=,D*ROc/1f&;QӫM2O=ѦN,:khMF~s_.NаHImq7; =r;s~晦V4'N:A9aa|=g7|rڮ]iK\i7>jG'xrG湇4/)J;ʛկ5joz}Ė7#lR\]_\EYa7( GҾ9ZL^c|Q?f~ q&tʰ~9oVoȺt 7%<H>Ol%^OB½\ZiYQSuGSR? MxOJYm)Sv¯ c[^doxK Gp˾ A ?;|7Դ6:kɥCv.!.'@ QՄ9ZTd;g늷mƢӳ9?z-;;dyq.qvV>9[KXu-2Q]mXqE| UJM֚7Vڍkx̫'|n{jW{~k0Xn`6Cq`vϦ(Qm~׼Em}4"]KWF kLu77V[&Vk|p2,ۚB'L~=Cǒ 3N]?ÚM'c$K>J{T(RxU'σWʼnN4K/]u ^yJ񺞊\ #Wy6JRu;.gwkn|00Nyqks=n7f[ s\gԅ|MfZ;գXЎPd95kwFLc!Cw^KRO gʺc|0 * 6ćs^&XKଊ3a{<Ȯ8HlrW^_4R[}Hw8ݟlEA}CEsZ_EaoIx7S29qf$1qѱڦM[coaV%5@mM.y 4o L~dmF8{u|B'l+w8,r9]Q XiЗ,[I~kta$EgfMxmQ6d8W>3GcMNI/<8־Y״=/D֮/5!e,AeUO?)(DҾ$R9@/ \Y<&twg'fdXB#Z>Z͘,bS2|+\i "Gt5!8e=\ 3RN2p:'ǁⵓMi7iQ#^?؉fHCC?LGSx>Ki& Üc#W}6`!)}5NmOzezC$R|w=kؼ36 V7^Bȭ3/WZC :sתx_+s#Jl*]J=UKnΨ?ŷLvZwÂ|=0IJ|E[$ cy yU#dW_?,xl;wmNg=Z_=Rk;o.=(dmCSF9u[ Gw|UiZ֟4fcx'x팃ӵy./a}giWSHҞZy1>8Rb~ M0D9s+m(]J0G?2t.uyydfe{׊^Ԭ[S{,yh1 15G>:דʶ۵'xh꨻#E|8}YA.PN;:ɋ[5s Pid\=_jFoRUi&bUefȓ Ve<^tNvKS_ᾒ!տ"U_>h?xWԞК+?;uΠ9.CYvv|ǎ+tr~Gc{w8" cvy8GFkI0gsn$p~\ͳ乲fTv>8bsy??CՊNG[~Xk3#ڜі\Fzg8y-V twmyz~ւ3~,Zt0n©+6ƫ嫓!َpi7կ㸕DKz >>m۵ZC/迻c4ZیVf9#`%%yVu/x6O;W/,K{eQ)䯭tz|+ggoe$\$wYH9>񔤥RDZW᷌0f1T>ahasG/ex,yJᅚFXʗ%8'?NkQH[DxfI4￴d}p߼C>w^Tb!rB5H1*zw֛?xKo/?h@>]s]3Hxr2 Z?w8RS˾,:Tk?Fw=&K7oÊ Sᾶ/)dO2'G qA~i~#,tR861Oʱ=+>hw.,kΰmAF.ǘ \C~Z&s.1)efq 8OW-O5WEπ4mmt`$rv_hl$i=OsLMӒ5g껯5t)o{ a$R.S]h>;.=4LQי|N|Asvo6i#YGľ4YX}OB vCR2NJj5)D/{/\ԼƷSwh1$tlw> Xg}M[R6䓹r=kwZ^l4.Y_3w9q_|N}KN73jS&Հu~,^aB֧&,S=NJZz0C{|$2|H+:-rL{B,xgUL`rrbŻٙ6PrJ:9U#*{^\x>>_f=MFpe?ܷ֩.r?ՈQ}> RB&0nۻ&𵯍lAG + z亳dmIG6.kEu)&_ώuءZ6 p?Q_F|[W_ I}]Cȱ}c<5ğXhpڬwOtCrAݱ=lF"ni(nI/VL.*ha9h~j|/D^ʑ͠y@]'⹼gu wRʌc? M0/oRo!mT;Np]?a5wg4/Hw 2~A_;p>O֧ +[F̲KّT}FiZK$EoA}]\)bT pU"8\N#h5e7SStO>W'8ʞgKKi^uv}mc: x-Slάo9[O_Y\i>J#aOP+Uoa$ 4]en#KXn@ F:{X{&<T׼gq%Cr6L͞9Ե/j R]Imiw,@=4/Z2^RbSGk|<֐*YB8HGM yC\^4WVfbx\~gsM`Gu"-ֱnc%wGs!n1^FNqҶI-U&[ɵ]ʭSOuEI~ѓDx A\x,mֵu;'QֱcKFԕDWde~SHֹ#/zix/ZU F1Gҽ pz/3} 2E mO'Ҹ鷚5ƨq\2Ll[ܞ>iڛ^C.XGk9,3Us'G/uEڼ EkXOV;G6ONV|O9|KNOyxzu9bzUϠxqd^n.av9Qf7CžX+VW]V;~n$Ҥ&ە\⾾$㨮|~%֢urE=s_C]I49:O խ"i9)o >$\i^)RޗvndmAv,9FXCNszMz +EӵșY$3˶&F蘻3skjZ؛{{'hwҜ"\.:'4(mobfVr=ċ(uMՍޞZ*$mq=j#bv=\hb?qV*Q{2mi~1<pUemc+}L66shmZ`&Oxn+kak}oy5%~Ɇ*Rl$Q̊e/p> cǝSL˩j.Uq?3cWf^*RVP&fϤʼ+ ]LDpԺ7>}9J?i[Xd&,.O>oʠc8|f5CV^Mn7i ul;&gX(9uTv|穈;,o5yKWs4*سjX1fRI0:zkkL1qgjW*WsIf>r  C\Oܭֹ&N*g]nty?|)im#X],.q=j伲7ww~qwe S^C?-/4uaciQf`0q i~1rWwYqO`pzRƨyM#𜷖0̒iq˕dEYׅUTtPWScikqnnDsֽ/} Fj>eO>$%\jc,8}7 Ko$=^*Mh߀#핆G3>s]@ʝSmc*b G.z*q޾,d^H!E=}9\V\ם|V.kHwڮP^2jgtԭaU5:{6CV\}sQPIkmlaV-łU 砧ʬ>%d#f_ҽ[-7^@'Ri:YGşUI*Fk~D\5t:˻U0ϏׯV֗J2?AW|}ҭm|ڼlBCcןvvuaݙϷ.#-Ŏa%"rAȯ;V.HogIaA?o?z47Gf -dmx,WUc6-H-WOtDG } %/ooaT3;5]ko_MevCxG_gq[L6}ܐc.ftn+Dv yRG-fnˌXw-&"rߜv)5C2D.cp{&=;]4;?@d dv6z+v+e);ZD qWsk{i24A6>IPD=G\Vx>ϫ`"9#FI'?+ƿKU$=Ƭ@嶌MscӻZ|&}u$wq+JYgl_ ;ǫ,l)'^>[-^ۖ=۷y$}ڨB.y:gFQPssms/QW4_OI4}5 "2Ʌ`֤! ЊqV=*ž;>&^j i649(Зm6n#9Te=Yxmx"9W$s GRx6}\]@fGe<ߜz\}OI5X,3?^~zO˱-żqn`z{yPI-mW6 8 ɺh7Z`x8/y׷Ǣ[~ n}]i'(? ZZ'~X:ԱXZ[RN|W^55K!k[(?n|$J27^Tn;@}'+ e0ߔ_J?ҧ5|K6Z)F/$̹ v: KFi;.eq)QА8x/OZiu `[dʄ_FU%Itu߃^#u c6oİN:qӎ/e7i#ϭⓥ0Tvs?XX,T)hA`Eol=Z 9o!zt+g׼caɖds> '-3i^MF)~;8,0TN=gc,V:TIwoW?t&t#6cMbI'iWZwψh6.RxM⯄xvx^񝦱5/D<>q|7h{/-7Me$q<*8=ߏ\a"K}V&Y$9'<9G㦑 x.ݜ< Hpx?$WSb%G&HO5]aiFJ~Vph~x RE,13׍WfH.oxؕU*IOnW߳.MR\Ayᛳwn;+:x^_2X>#dܐA'8k!ap N^;GFʆZP^KMGmᯏbTC+y\Bo_ysXfYL#wZмg Bo>iogn^GN:4}Ab֭u=MWǮ3ӌ/\Ha&6$f~zq66xr=Ri!4UMZJZg֓1K\H}O+Jݟbj_aVe|~s2Xhmk8"QU _ǪxOWƝ,/nF{އZߍR@ gR/Dϩ_ -JVlCd7sſW|ۣ+IKsc|_"]mݬf 7>ym%.fe22 :Y 9˞oCfϮ8]?i!Y70[c/;J[ I_qzOyTgq|߼]Wu[FԼ`qTκ4]ggsy5]NRm(a4WڣIXKbUiN$碏^7W6'qُ9ϩ?ҸO7Јn=h, `At';;1M)XcZ]r*CWW\//jƿ ޠmdHe?t5Unxr{ojiv:zT/q*krGݢsWV( \:!^nGW$7#OigqV՝DyC^A7I>0MB* U]PTMCٹLdqksRDbG1ʗ UUV/~~BӵKR ay>B0;֕+i9=+ڭ΍+}&꣰" ^ɒ0Ǿ}+ZD7 pcRN. #H1ɮDMa yLxÓ6\Zi-"3e2ޮJ1ueNh٤QU +y9>iL/oCyZkM|rIִ\)2G+1^3+IOY0c7(čHƴ](ƞJ:I7|,5i9"5^֨%^n|sYz_tGY3gQF֬TrgPƕ5j nd݃vpk/xLT܎x!/0WsuGc{noL5Qo[y?6危0>Zo6V6q߆oeRJX^*o$gnr y1Q>?UdˌaF^t{yn'Q'FQ/=4xkV!I.U.p5k>! 9<1$zoO3|wYrj7VxѮC~g/53(ޛG ;;ؐ<}Ża 7-P_VNF8Ӯow пw*pԦOᎹjRJ@$/^ƾi'Ŷ6ci$A%m]Y}kywcqז۹^d0EguCm4xn|ᴚmQ1ms$G$8=0Ez߄+/oR<*0w^,Ÿԕ6u˨deOϽu,jڞMl.Ϝt;}Kኴ}i\&-\c<]!nIYD!dҲRdxSnO|#?? .4/vsiWVq)BN <(gc?d-s`f/|<ЬoumlZqy",sDetTb۱_4RE_OOS%}ս¤Z~حYc 6c_^Hff Y|A٥A$I$yIio LTF'i"6q Vd@GP~jËf!o|zv`X:Dv0C˴Üכ>o ]I ì:O⺩֩(9~Utq,_2;C: h#.#Y=8Z=k/At{ޯ$vdr;~K^kQdO! ׵]J6i_cݛM;1EutR6n~tsx.JGkMv[4;1"kX>XA<湯^IsIqn">|A SLH9Ǡ?Z/gv ^wqCZGtpShZ2O$hWx_sq$88Fg8;N957tzo|mɍD"0TBXqwկ~ؚObX#d{ݾs5F2#܇$5|GƓ7>[baN w`MJFq~o/,u='1!1\ +O/\؋Y$*V+t8W-^'kSOZ3̅;>QXxC\Zq-w.6ʥYdluxlٷ#%(/nu>e>۔S)7p .%6zlr2E8+e\N}_3\j(9HՁ,W9^>xs%եJ41'&rQ^ye moɿ˩U@޺!/t_1?`fha՚31Oֽ'[-2]d{OrG}j^F[!r==+?]:۴A+>DԉQ F0?:)~,|\,9H!:ssõ_;OuE͔֩*?^VgF&Ie |Hk`VG?rLg{zW^1p:c/KSNF]a)k\?<T_t]ϚkPYlsy2!<}N1^h~"VyK,0hEP`8Z>_xoU]dl?1J9 YFNy;h E5*yqӸ;2&ceQ [X{Ijtj|FCS:ڰ:Ѳ.a rMco>&I]}Gޛ}GmS=;W=[{_$Epȳqs_MVM+"Ntk5t~#;% u6e8B5~C'hd ~c붿|[.Xsek3@6 0 q#nT#x|ݿIǙП_K|-K E>Y9hÝy5/I🇭.Lq.w5\jWbIfC@QYnEM b}o'`m"fXlc"8JO8EœEu mY>_q-c6<E[G"fbF˱a{:nZ'y=eY,˛{Y@AS7 g;W-xK.t4qݻR?6ߍ/;(CqX[E.b0.[Z߉/ W*$V?3W?t3LK8dՖѥ_$1uv:/ 6PIF?e5Oz]wRD] |J}c25iFska:җ3~ab|>o6rȭ 񍅲UwWU~o?v+O WQR;3ƻ n`: qeUޫ !\)T:}/za,[R}{Ʊ Xdgj(#G?i9֏yIy!3X(b>ױ/?|SK]sMwѿ6E}r{Q5G&%0VV0}}GC_]ʐ,e=;9r~-.PF;L}sǿ\מ7ZH.Fl=+|QQZI4ij.s݌<ްme{աtkfHʅ,p\ҍjN>[]qml6#Uu<E,Mn}m߆(yq{' 7t~'D~3BNfElcmc >tvܱ42 m۫- 7{-f+|̭nP=rjgҴZZwfVicvwst:'d?3WϖSYltr^> G2͝ݽ:bF~ͭI-ɗʞx渟xɤ3,{}qm#[F6Hld+55+>7c'ʴ>gtLR[72k9)v-"m$Suw-nv{hcoG&9U}jUj[wwrюo"SH8CunfO־iu,tdkg^id=*IHCgw(63U,t.2986Ge92UlI|Wg?e|eX/SRH܉f- g$9XU?_>y:k"S?nǟfW$vch6  m>0U4ctGɯ0N"Ah;7]!r]tR6 A8+6x2Nt?<&#5BG9T ދr'%]ϖx\zw_L~ԟE =^X[Yҝ+Uq V( ӓּŸLG__i5[8a~yF+a䧤YaqXiß/RڧZevڃL8 tSbEF&}|7LjSizj .n&:Jw`z xx*3W;OC8zu$SPuhLO{o`nJM :\'ïڇRS͑@ς7IT+}l/o}O>8|g=n5 m%KSF0ka,kh&h K*j $O~+$hQۛ#ڬ3CGA\u|0֕nB˸~}뢦*pסONzKq|şncͯZ̖O-f+aҼoƆmXܓvQ0đs`kꏅ?"-#t3c:?ֺ_;-B۔q"àH4׌G5L -GyjWӪqӷJ޾,ӟC\+B1':q_Lk߱i7>g.u{u[8qllz{?h/Maxyb"(0:Rg;ٜg\v_Ob H SY:boO8OAI{0]6S=z˞(YOgu/f8AjS^&m(Fds??a,qU79ui+4bQpܖ=>pΖϪQOvLI-|B?g_Lמ6d~t}􊌲\iʀ m3'z&dGi ')$hn\0ϠS5ߵ?k;񖋨x6=n8q6M>i0y5ݔ^p 6<;"әحZg Ĥ,0{7lWOt} B) /-_qtַkˇD-2Bƀg+z[O<5㉄,,{rgԌ><od|F9fג^MH ֽo#rI;|&D,[,́wstygc,|y*0z^8[?o26n^zu(yۦ'8U|3gh5ђ;]A8n FHϵs㠽Mcc}#ö3#Fv~b8J*Qd|In󢼟d??xym|9o9q}yjQ+$&?*~r qinmck6}{nk-$XŸGkvoM|*犦 4[7"%j>s>7t}f; uσ$AۭpGl}3\(3NTr;+Xu P?jn8FE> y\YlTNBWj7Jmb`˸|.6rjT#>ʗ7OIeE/!?z>9xX'$&7 Y.Wh'8LA[i4Y .>tݻkw?qۨ`Y6NbGjqUqョhjݭvcO r7C<}IF4VkHm}S~!^gwG!Exvēn¥AkՋgәE U(<ط}/|.Z哷%:zk+վ"jsa'iyfE$rKORЭ\#0nK" 2y&2l']KD)ACc^.4-Fۭ mDG_96uqkw؃ƙ\&x1i%e-üsG}Ss xn.,dhu -đN eOl`⪎>}|*Lޕ!67Gn01єGோ|1hfM9{K}x[׉x;]& xak٫si!PC.A|H~$4-\,w'@%3pTWEOSs{ٞ|t<GjW3hesѺzdvo㾵,!dTZk+H#m3i9~;bԬf͵,yW9\߇+&!HeRZ9@xe<rri`g:+M-̹Z#*R}ciu @77y3D:)rqcpt rE2cqA<?S=KCչly:>72Xr$YX g?uxDeW0OhbXX`ri݌嵥n?3~&x&Qe]`䱸p.5Mwfܤ^H(#!kvLPi5pndc_5?~9_nwq"|kCSid~όl'", bl數p.T"1|,ZՌ1jW/y3kO7߄#_ePxu $1( ~ۡAg^͓،ӥt}N4$xǝ$ŝ3H#9jO&ycǨ[̾bme _Oq\ů\.ʹfkx!tS9Ǹ7W4E++# :\52> xپ|Kݼ^$چ+8Oz<BִZ -1H!\o\)5%,^8=|se־U=s5SSg2yVT>3t;#=F/aӮakxQ$.ᐒ:=A_>|j֗Zx/ƗƜ!H$,\du6cdYU^:W~#T[xa4DdoץaR1Q*/g&Դ.ܨ$2?vJz1⼻?x89V<q?Nٯ4Yn$Md_$p[:"k"[Y$1 $pJm v8`Y> J8cq­69.Qov'{}Hi q֯yue-?PXr ~x{~E]4.I='0}Ms9sSpuq]q޽慑&Uǚb+u=ㆥl5.cG=+fhde;'$qѝ[V3*2+#W8ߴM-9~JڶmZѓQMcuˏҹ_o%|0{)M4Y_x_o.cv}1 :9.پ8Uu_Sßɫ&ݒê t^VD<퐩<}}(>l$#?Nq%wu}fM^k`E]d(zm;Bd(&]h(.;&R]{sqtM,Ϣ8R\HOcO¸ٓc^Zൖx5o\DvZ7FIFf;@$W7~)ҵ7.i7,{uX}<>idvtti*ܳ+RGM9<ָOmJ;K8hƖ2KglL?etn-|+AFSDYVE8W_ |@WoaHC+oa;n ˫8exMJ%ۜ$BL q r1Fgo 5>X=zu^/ KIŴVMx8;.UChxw]E43|^JD]I/H 7?'z!IU>/8?^3AXHKm-{؊9#M\Ý8KÕ-fOٙ%yErD2VkW7z}*~ϫYHG1 2] C eO|w4_qnѴkn&cryqUtVb(9zYIS-9Fv}>̻mU֛݀3CZ ~%$HRc8maZx.AaTҕIJ2V[QW}mױvP.(l+e[yUOԊ|lyvKMVu|ʼRc-Dmv9_ak+X6)u]|<VSMn>xi||g8Ib CB#5xqtŘ)JY{ތv8- "sO@mQ$Wq2@G]סR!.FS{Sƒj]ǃ)d$#j2M^2ȯo;LxkcXyYNwc8eu:O W*ᾏ5 h=U.^0bQ#X̖;\nEͬϢ.x;^,ߋc?m3qJ>@HD9S ^[^UP#s)VC^}sXwOx6F_"*q`TRKˣ1ѣ_:5_[4xs'ִr7 I *DOł NՋo!bJV1nWS' v.lp9 `#? izT<7lwUhp|xeBW~ [_}bO2BV뎀Sڸ׼+wWO\m%/Tכ 9V=?#LWwkFyc ߼Gs1<+  - XiX. o.Q vz%|C6װϑN ]I'2ZhZxQмG|K4[ #,q/!G5}~w+_/K'[z~$_~ٖ;wchʟW' ~r#ֆO(n&IɖPڹxZ%dԊT}Y?~7Cu)4Vo4Ur26^7Ved`UH!~?pvmCi %ҥY9ڲ ԕϒtx`˙W o"Mq}Ș ޏ#?Zh?V?v* e#88[ Nќ*8ckcki7H%i X0 qǚMSŐ^S&ܗB$v'a?c+=I<'f551 l-6ߛHȯ>ߞma24HjyI 󀮸VWT)Tub7vM40E5?|CjonP+e7c5o-%18V8U}+c|IӬm}M$H \r< Si46d j Ri_&mBc';ƺl;TEoslt=ƙs#\J/C܌=?mǛ[춾 յ6;d[R boWi^Y05 YL?HԅG/!)ݫ/ӶYd`r=q^*I׼'F)ҼZ}\G5v^[G7_ӭzM6PcfQEO/kj; Roi;.K7{I 뎪x?p?^4 Xٱ,|;B+Op?mAaYwExxo y]Em4%Rxbc}TY!e,]R;] /WelnJFط GrٯAе/)w#DPu(M-md1-%~aǨ,en:a S[\Apc)$w:OG-c_LrVe4F(eOcJ3۷{Fx=%i+?Yէ[$TqU<~oo5keb!.0 H'Fva2 s^kGpɤX´r`kA}v}KyK'UiF)jeRf85++M4W~¿>2|I,/=ڥ(EF+1rE|? /M֮MRuf!o S9$uZ11ѯ[^]RNMTt*}Z?M?b; B#od;Er*X +oO-·s"I$&;aN1Wes®I-7V]ʍ'O;Gs?6xNY%Y,Y~0S|mE!Ȫ?ю zV kgV$#y?J>mWeprG'1g nwJ'uYeU Bp?**!n}PԷw\jFkM{>jo ؈7 sY_~@{Go?8ҾҬm|OVh#t"OIT&vaЭAqT#`bծ f%OM޵ڃO8sᔑHnrBd^5Gʏ\=;N\xjOM.յi.4v*VGZ'=aŮBrϽ@cOCB:dž\rI\1^ qK1[c״I#w?f皚i^nNv~~~n#frFMxgIihk{;}2_W9B%WZ ^/gN~Hkk煵[$ʯzWp٧cIyf6zVt9hu;H=|ڴ72H\Wştֱ;WNEwd69*H9-ny =JuJ479c8{9߷9|)~(WFekR#^7ӑi4 Yxk{&RB8)W?u-V}?)"i~)MNf302 N~}I5SGEaq>3ZKjpӣxouP]kpZEho5&'YO\0n[o?#}Ո*wuhO{?j7+}j~ XlG=C&;MjM_^غMӋ֟-C['i՚E*~N`dskg,hy/Owׇx:e,K Nsw9#檷_h ]jڅ 6#?uUxKקxcɯ2\a8rF?s j ͡j7qA%әdrx޸,3%Zq _;~^'e֮zww26?w=0E}Ckѡa{ w9^eE=6; ^ Dz_|Rǎh].h妊eaF*pGq׶|^{㷳#0Q'p=IJeƷ֚7x*gue4~ 1 vN[Ğ q,km3<3lǽ} 9*n-;;6qGK'ǺT)FJ}˧ĚNJĵO$9ǘfb2`p3KKoiY'm;cC?`]#i;: KJm٤e8=Ʈ| |YV;7T%TI-?6F8NIg[!h ]Ë+y֯%ޕ;fȈF0(8ܶt|'VSѭj?޵ce 됌i=ID,0,&~FVam y"o!c7C+I\G= -<֒ >ԟ!~4W77u5voZhlF~_>z&iv\I^6w%Z6߲.vfd_z>x& Gq. c,½lFI'ʮ|Y/klO[ #c:<eڬ㧥kM;ݐ^4ǖ?ӂk:dI ykٲkY<7{)lviVP] u зNQ2k |arxxYt]ZVo#1#rx|('"ʑkH׺|xwbqG? |WG]C^oK"x_xBmW#^G c7"9I)?*6%}}vHQ3d]X$U(sq{Y7̫\춤.EfT>Cxx쓡q}x*bs_u4~ ~,GҿmfYx%2X$d'WaߵZ}/u_oHsrmncאY ʤp}~uJ<<g> #95Z¹SZgkϕAHB!amgM<9y /#}sл vG_i_]S[GH5DI`l>1#|⏊^>v6ep#sGaz{5ť{ߙWM>a7{* dܓcwx"}Ԍ>dn唧XkYg+6WG ,:wSҖ:8;wZ~#YMfx1ڞ&Kx);cq(橼a9~ ,B!E6F>s^O0T okolW5*[܏_e_|K[QX ,O+hqP|du<ԒtV~Z02Եm>NhEv xrHۧK??xgGھv~zǏGPYF9Q60#cA*HMK]W=ٚ;xVc0f2+!f'k_+7W ScZᯆ|y<&&]?Y.$1Ety; 4ib_T'ZovMoNj?o.M>W5B.Rʲ\DqG8IB=o`o:Ŧ=va9m~a!7}*x &ѠY^*wpGr *;Fs &sMXu Ca-L7P4aZ9Xr+o 5Q|T?h x Om6#ܵy 2F$h&95Q«_?@*~#_xGn:H$]6[:$ y5HeUOsm O|RV -!qKz`)cq|Uc?_5+e6I-W=l8zj*TZ38?tVI4Oxk]n|)$9KßR||AxVqpmF1HIpgr@z=!,~Yd sA'_Ri_~<oX)&3"0aU*ƸAPFٖӽV?NoYy9?usIE+`V/5C%Ɖb2 ֑nlG_, 4/#⻏~%fl Ya'v?|Piֆ&s,˫aG_^gI$wZ=wcg}*dž5)n5IǨ##h0ܙ\f'Q~׬B"_Zs-ާ8{.ŠĊFVg&Ǖ-4m;o/7p>+iw1b#PH6$6 ٘3^u2G%º,Y 5VԘE=Yaª~y-{Wy}͛+n+}t)-#{G \k/sxZlOvz*A93YFRr쑣vNMf}M zoTdȎE%GxWZ,tY"M*nv :TbAe/-)% G]zj|k*u|lͥ\&ծvIUwj-tqw4VSn8g9ҾRs AxǞ"c×/r.:5> Z{_i{[ x- )P ($_iTպks){7]*º #L?6bџX}BS4;ZgGo.Oa`!3q13㦓 SSci~Rbj]&v>*>ד">9>uXg$ g',]c񾏥^)]/RkWw Jy*dd0$t5}fmBRG,% ܰT$ y$Şͮ0KXDh$_m~(?:ApwW| os?=|tT44Ьd4qG\n_a;Y*:g]Ͷׅ0Nӎ\GhQlk_3JvG)4'P[t.8Ľ|WF  67edSӡjBj1Z3[[V Ȝy{O'һ*Ie K/S cWuR]j4#]FF3=Gj[>#"F1%[XGo#و,= [ԣe.F"o1ž}0_?MZ!\d?Tq h H5w>֝Φ8Su&ªdC,a܌ 5X`W~c}xn4lĭZO.'pIң.OEPvğ/ù.RЯ^݀v4C3[a_-+F_yOH>l9?g$Oi7ei^`;dSORG(]ma$?)5_B?zO+GWM$gMN\@iXYCxpqy1xT2|9޵N:VYܷج-#VPz gȯ غ-h Ph{_Cs3oZ7u8`=2>Kyr!|:f\h yZè[Rͅ+hbg?s~r],\c4l=:Ο4 ']I<iFR rGŸRmc~n#!YeYNxoޞcjK(SV0+tlgKm6 Eݞ_Q۟Ah? kZνq<^ն?3~/ooxBakd-岟 5Dv4?5Ej+/29[?P?,>|l!Xnc^gnPKZ?#8T+(l?'oɧ֭Z7+1o! $wFrJ =k߲[|1״VV25"9!LRnHg[!N~ / Ŧm]Z_ /­FcpG#r0䵺fϟ_$~,|2iudHgTx~1I+-{@Iq׎w5|v4|>'iڒX;y7/ʯf9`1ȩ6_x+ib?i4F_-?^;x9#O*:=2A+صk>Nj}2ul7VF.s(y``_,~"i,>(6z>TTrM\Lj1k@+|?-5/^<34 ^ZWHs+t8^^~qOh&X7ZWvwDa[kX³u_nF3exo>[sʱYӨ~*xZꖶ7 5bd%V1dl+Dԑ/,:4mǝIF~M [,[I3MO ،Z獴 x^U> !I&+0J2wڠuI#S(灧'.+-P['ZkaI<~4WW:eľev1.'iQ^s?|95{ݔҚc{ aIVqk|?kp6+-$0ѶE;{W?qul-|aͦ&Eqq+کUr)$!fh32=+_uk;Ki~" [oID`@gtF`vm(=Vcm5g%Čq s·c9 ːAUQ|B},v6RYr, A3>7U_Y `y8~ 7s5qx\}[FE !(lA'+ߴo!Ǿ&KQqͦ_5skHWaNH;1'_ ʼj%j**:Wғ mƠ=k>wW?)mvZ|+kלn%ǾaxO9th)RAv_5ƛw!ǹ# >tr_h8 z9<:'|IKeZwV,gdI~x8b\t>ѭ%kFCxdO<ʤd|9=Bⶉ400wӐ︼2A>{} u3D2_jS,Otr*ԶȣUB ;UA><-6:ks;4CO1gЀz~|᷂ 4M'APpً>Smvbēח+:h9qkme2#oqmY7Oe=F] zl0B7rxy!U]͜:ݿb?mM.k;>vYUXhmȭ; V=E.ڎHK-ƣse-أ*/kI'tUٻ/MJE<+Tm2iNH̪#bp}֟?W0yuݚȮ y/8b 5~'ʊ~ktv[{]RB_*< fli"u6(XN۰p`)&RHK@|7{I~M )jk@x̣2sgikK-$v!f7y@ ';g9+?E2K$z7/0Txl%$Zĥ}zWcמ+}f5KרU2q""=s[/>qi1 8vP.@ xii?hRxSEOeRoh!%@c'~[T\o-$wLOD1w9-_^*]xUgn@`%UWӎyu}^NR"y\T3 F8ǧ? imE 9h5$}!b#sֺ|4M,Q3ɒ 8TbHGq#g'7Ύ#ĞբiC`Vw3T?e׬tt:d0ncH2AR^Q^\yY[K˅rQ[ >v*[p1| q::]{mܟ,@$B;0PI~Ll'O5~0hbt8dr$%r2Fzwjo}m}A{|NHbh<_fkyykwcBK#ۭL}02s:_~k&IUMgX̰Q-,N EYrc)_χ_O߇>Y[ACD`FҪ韴=ƓWP5 [h-1vanbAFsmwTU?e5 igUb P{ɝj_5x O>7fU|0CwJ7wPb+"xٓH5E'{ Ḕ5fىA(xnFDHj~tYu5APf -4kҳ*ZFR/#+Z23Zuymݟ .e|Ooe+i{6lHց: SB <0&KeZ]SE c0a[qJ4mcx=.<5햧y"iPFވ2]yp Bs4;ҼG}FT=ž=:\u]DRzm&1^pƧf#1ඹb3yH]#iW>ѣ%Χ:XfJt_P|1ƻ[kQiwZfJhcdDU1Cgh6qK|Ljlt5bkDLaƱ!YP JӞaʈ~{7Ñx/+#i4?h({HIkԿԦW mlHȆ5@:JJPLu)+?jF9E83A|^xPC].mMU AC (Ad\bkv{%*Ot"# Wc A=[M[ݠ*kg,T䴘V>(9c|RjO.-kdػ}˟YW]uVORL9!vfy#LqYYʎ\6Enn._"9Qan7 bp+?jO~$?Yt>.#cV 1y.jjң Y n6 'nK1^ <Z*Ŗzq#Z^;K&܌(:c}۩O'UkxUBY2.;\-#\r׭xhs? o? v}~LVGGn̥0>;+=VZ¾i Xb c?u7`Uʜr` noqMc>9!EwJfI ciꯙq_{q~,5_ͳ.&) M;yn~|]K^ֵYi-$d1H˒woIg>W;(-uo-پl[O{P̄df ';53xWA٤[>e3c0w.ӅUoךAF3N韛b1j_Ķ K3'+P,JF̜VfMռ kck(E!lхO-rU06`C Jҿ }Bo4V+="[?0[w1zvŸFjMÿ \dSVݛ$FfyǧQ.h"3~M>5?5mrEGu.XTGq'C^O4Ɨ\G Z""|̍«;c?[tAgv8nUf=W 8toD%g I\Z1RJprr0@Z^ev/C7.|Aorl%U/$y5##pđţE7ּTY'8nlYQ`>NWƯWG:ԋl-dDNSFOoa%ƛMYR.# ! 0&)^5dGW-_'bf~qǶG5K8X!PE!$qk!t&Ry5g2ryI:"Ix ~ॉaD,-ݼ*Alኂ@_˝/Lq6c_r IJ=zT >K/n}/Ixd׏Imo-19=I%K^dޒ?$(M$ 1<Ӱ5^^,I2,VH^۟FZZΫ_]f]%|A[2=Ůl4k)giK.7 GJok |A;A`[iqA;2˔m$f^_i+7K|mv̅eu'is|]~V15.%SKm>(!t b-ߏҕ>5oGy^t #G Fb|j&OUӬun-&D 88=rOc>&~[ះu ZazF1c,3{J_5x"8-2m!3iWy]>V<Ӡ+]L4ocRa&W};{F4s1W8X+\iomp#?t@Y8} o~6jg_4I2}2@'$fo^j Ƃ]F!LIeI9>`Bv#"~ miIs3[<3\W?3rԾ[_Kgac]ZOYȄi_i:0B8`Xl ]JH</5&e.eƢ ,jHێ 8)=˨|k [I_ kVCeoj}͌ [iݫjչSM=Ouvk[ _%a12*eB pXN7AּjZԚ,7p(xAld27!ծ !0kn,  "!2r }E\ϥƎ)%#28RYUnO|*o|O}P.ϒQˠ#d@@?PZ:hVv~XMfkE\c]ŵH! <=y e2| le}?ź[hiQ6xMv>b IAlx}wPx"u2BYF xFsڳtE6g}&ghKDc>`$69,sWorLS߀4ּYh *y<\`Fۗw2p)mYm{m ?K9~R  󟛧Y!ɫy G\ԊKilPB3o?{;k~g[k隥 'ۆq_-2r OCQ׭9Hϵ2^[|aT-!Mo{C7ۅ+' 4Imnom|Yluo%n@hT?o<9y#Y[j.DUՒ[*~gBcfx—^9cQ4ʕc),wQҡwEmt[%&5U;6G89Ω2K-y৻Q໫mTTKK`~#h<}ԀZ./1 sV~ Hm`ʝH+!SOk94PC&\$H:dzr.@"յo}7,;>b:n xKÿ [[V$=ڂ*%f9< T^֝x^;?hTS.QOCsgx#Y7,ţJ6X9"1x=im'kܓI E0Cv$Ulho`9>"h渳Ic:=ˌ6|˝N]'_X[*gP.`X[%H\> xyG{ȵl0o\1kfa>JCb8b29q֧3Ү& #Ć)xQH u sXM1-m,t{W+!6= bK~\VNfծ"ed!H=19'uIG-.!)dLd 5k^՛YY/RcGb@r>e,3Vo#o$0y$gyt; t$4K9$֜(GIf!H#n@39<ψdؓjj4@@pѡ0X~UU ~s!c8ͣK5sx9I&СIsyXWHu_:Fώ!PWhrFGCO|ի]{]?Na_:V IM8i7!XHX, F )<9 EҮd]ao:Y$|}Q;<- ]cqGm(H8bH*z޵OkfѸ>օJz[餳4grSPOG0roCIet w B%%`Oxn⾫a%> So#G7"(A]Óؙ$[9V@bqValfqm+*Uxx n8ؕ L:s2D?kZk8nmQaG85 {\yG=ΗxNGB2;Ӭ7ӵDSn9x$/9Os5ĶMɘ.Fi:݃)%D汪ydQ]5s87W0/_d:Ə\^D̟k(J CkNHß3= ޱ|u:o٤J$vIS I4ozuڍ弑;b+ Or7H3Y)mROXF;[L*?g)ySjXlpg=Z~ǂt#ӝ%6|~>Uƛo}cù͇Vi_08rx潓E𥞑34quH@,$/^AMxއwV&oiy }q";A+K jW2@t~?.7Ju[BRG.89*zMZX}MŖq[m/.dΈp0A:t? C/ xf,ڨVY(SFyhoWմǼ m'8K1 R3@㸯 ƫx70gi#'+GjSMwp&f]W:YJj6gG\Zu՚([9_^HR:9? ~SEc{aέnXL%=j;7zhfAotv|s0O|]?qFYϴrde V.I- 9-`w2HLn H kxo%aKtYp7`qOa=y;~xv~f5Yl$9JPO=Nk֢LeM5]'LIʌץG0¾&}|Хu%T1 ͭϋYuogqizM=ZQcAT.կVVo?~^)T w{ ҵ^O[1Y_; <ⳟ`GG/ ՝glox㘰$iP0stZ֏6Z)gX(e* |(s6ƷI5YRy1ڶd6Q]p1k⫻d7+!7q a+!$Ѥ쳮T [-uL;öfojWח]Hmq}tW`q*\|(ZgnMopr  ?jO^KK5L"vMx-Ji.-CuZc Ɗ!>mp9!C#nuKCX 2nJ@URrAkuwIh=KI,"@:; _V\o%r\Tdeڽ<_jc46[,H;vrz9N~*|H'c55V{P>e57~͜^:)LO#P%qNjj:}f;vKnP@JcZ}6xo;U)rASo헥_Cʸi XgGp=<)þ,%u RC 3C!6#qKr2Oz|}{3\i%u[;aL>~!͠_j QM;0.ךӘ>-Xn>$Ɠd];:<n~g / 7(կ4ĤyLBWy?W烼y?- /K[LoٗyPӼIѯ/g-l 0ΛwÜgu>"IO]Mҭ>s3+DF79|ln?m0I 5ڪ7tBpGݞ\mtmXc~{gQ۷^gQlv2C ssB,xou{qu4<8A5Yx[wٗi&lm.ilPc #6[]ke O o`5?1x[[ wZnTưGVH*[# zOp)ǿ Fo5}xno,h4^?e~YyH(r/qUi&_"m:i9FinJмakjk{TTO7CWvR^sɭ( xKV[Q‘ßROTL|mycYlB#Ӣ+i8ljسGj7/~_eǶ Z>8ɗ|g,{Mr5-ྏXVҍɠHrH\秥_n]hok 0xh19'1҂^%Ukv2L22N޼5I_jW&E.܌cqsZUYojB2Mc'Z5eX6`˛|i~tō6ZCw.c BZG5޵kgXQ8' sY?'7ǾFiѻ3}/֗}wedrHz 7-7VV7N]V^Li$p} yHxQ[(ʴI[ljĜԊOC_E6%IIM8ϥi [7ý)XؾHϒ# *Lֳ(\breI."hy$9+׏?iky|e+ɂx46;cVvu`==8<^e ś]ݫL5U^՟7bQi~)ү4͸2qUisW착c0h֬> Tvqn2/]*K.zR_-Zfc6vʞ?yįzk]uSm#+{W̾!cI_[]ir$lɻLW|*i㯉:׈/kyD1~fc}j_$3uVԮ-H;otp>劆 jm{oTGo;yQ^~=ڤԯ-LI~ϯ*4%EIm) gh=i'5.%զӬCH.q%rv`zqOzmkk 7]BQQJp3eM-6m`[{Tqz|FN3ֻ=ӚwQoXOKЏjәb%'I4XAqux eqr/bxsQj +[ :9,Lr\j B"秭\Džt_ jeu &G]ǡڻ]'šc[v2ErMTpsⴱ%5_JH|+ui"͎Zp |2Ok񎀾m쵯Lj [UQ\g">)VTYX>Va6~׈|deV3Z_jVWT-q0= FOU]ZmtYKneq3>n xlJu| Ҷ 3Wf IðWѵO}փ̀7"4! M{;\@_\I*lc A'`=9hG7c#쩫_jl0;n 8YzSoq')H5gM[Da*lSӎ^oE!Ѥk]R[XXuK8ךޠy>T\guTx".r9_Lq^S4 GZႆD.0q{ jWSi mKO~ӥ:70X֫cpLGzd2yh;>-5d2j uӽ/ug5PwH^So ksK_fi-ג[\ª|Ub7(O5֯;UB야sCpmG'<ʳ񅯉IaI,n/KwdI_OIX`%X,!iP9i\xƚג ehO'{`p ry$x3PK~h?iknط_,g< '8O ҢUu)jy<7-.P-bIBm9w)MZnʎAGX&I˵[yKx`?:yt=-ld s~}+l|9M{;F(9.LO|ǰ]7JmB[agiV'#?*<k89kf^B?V=FF.ÑG=ڦ;!{1$>Ʒ? x25\Jtq"ж!DnjWNC6M~?/f>Vk5-[)o8w,zgGG+K&̑.pO]dB%َC-QNդdJkMɑXFkPSaD.V=5aicgiqk يIb" 5[Bn4[d&vdrvVmqaܫDˎ2z,xo 7V ,^ylcPռk+[r.+=u5s;ޛ+yQU:~5-:lG"||<~Vܡic=_^]]Fc. QہʻfגjڃތmD?P0:zלHCOZI1Ĉpk*i\XFZad`d¤ۮ4o_,6v 5-Mj,r6=O|9Zk/<U~s_tLukY,Rx yj7&[ BkD{Tֱ6vBKH1պ+<UoIb.Y3qls[-j3ַ I4'A3@^Vu{?B7- d~Vtzb1WΓxn!xmu'R|ПʽvW &iH3 cjz-;c!|P$|1m6fi]*$^a\kZ2 hOk(sxV,7-i| o m’ * Nk/̱7 }_.;?[-q})p#="w4r98ȡVGj61R|+*'X,??jJ֛leP}3_cx'<}p-⹸ sڤ]i>f]Gnc`4Al=]I7u4].t:%E*ܨf~-+?ҵkKuv;f*KހgSԾhe[Yb<3,'b:t;Z%\^uf,̾?JE6oxQLXrG\־m.hl!]hny=>|B V5g;gxC▱kn,m$5HP;+HvkӞ&CإL#g'fZ uk4@ ,Cu,2A*&}'̱($hN.X.tZ徛5o8; LxD>ֱFw41KG<~4|9uԶ(oZdCvY#?w=ID-niä]+t5x+_s} [@zVjO6c o4.6m e~ezV五\6SU%ݏpF2yWywQ Է^[_#3S4z4yn@/i=N. p$;zi _YƩ}?.If!Z,qXWlt]BԷ yxs^s=?ukxn=PgF6G,8W]>?\Z_~څjݛr0 `G_ƹh6?7PG<͇c /&ٲ.pLkSk;SYjc+h&( Pe<'Mc%D"HvOE/pyxEkbb@\H1W'{a5{,v9%!NkKjgoW'|?gټ08'hm/RqXnvQk "m*;Bʋ}ǩUO~ n͵*YQ~q+5o-Z抒Jp̪-Aj:`W>Q]=KrN~c&?HK۹4QS"dSYǥeUWq֯iV0]x qc($#X@S %Dg`CV<=5r*i*2נcl;W'Z 4yQAe_Ž/[E} &T7(W*%_u8wu\TZG]~W7K@eՋ]9+\\3[<(s_LúSnm!V9Ț%5&%9>cJ(whuIT|Wunip:(@.$>=|ۃxO'pdMʹd( Oe(Xpi#!X٣\U-js{湝UZQEh"oBl=Ƿ5>x_BZbE;߽ׯRuƦ~UY6ӎd7|J( Vn#mu߄WK0.qQT4he[xc1OJw QZLҡ2D61?QU_%kkJH٣VBOz'_Or4dEctio"eܱoj/S3xTkMUV";SNfUQ/_; I1#z槹GxGN3+[ >X&HYdX6 cEfw'{4gsׯ]VzsMmo$gfފ+yĸ#RQmXٓU&*KYDP bhv.;]2۷ϗ>X]76YV;QҊ)W{H< Ɗ(bayestestR/man/figures/unnamed-chunk-13-1.png0000644000175000017500000010400414133140641020566 0ustar nileshnileshPNG  IHDR `gbPLTE:f:::f:f333::::::::::f:f::ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnMMnMnnȎ:f:ffffېnMnff:ff:f۶۶ȎMȎnȫې:ېf۶f۶۶۶۶nȎȫcfȎې۶E& pHYs.#.#x?v IDATxY}yi 04Y~a40nf|/? _?LX`[o۟> ˕?ܪ/?ٯ7O.nT`[/! װhMSަ7Wۛ7jo~j)7?_K܁/7\?>{! Օ 妾ЂM?6w\6/Ж{K_|'КvXo @[ ML|Kw]s*^l`zȥJ`КV3s@`ichw6kC7}}Kdn wl"Z}-h^?׷=E TM49qvhLs;?=]o@߼}қ%0Иv/_ꇗ{;J\r0ilg>>/]/ݑ}`]_oE+7M`Rn.`_uv;{_u[`Rr__+}O61-u'{~wK:_ٗ>= f lBjv f lBjv_HP 0ſ\]١qvU6j̒qvr{^Klcܙ%0aZ^Klܙ%0lz^klcܙ%k p*nrL^klP pu-RC6Nk 0YW`*.%645;T`\2W_Kl)jv0c_+lP ܬynWC6Zbܦf l3 دf lEZaUC60P^+lj̒qvv[%65wf8L;;- `;`]k K͝Yr[<6.5;T`[%6jvƶR_+lP mV7١؊ynPC60u[af l`\+xYVk *QZa\UC60H_ l+jvƔk pER}.١qv.Za\RsgôB6;`}^3Ka9k \͝YrIF4f l`0>> m١KK&P %G5;T`CIg%CЀ*ȫ١H:_>q5;T`H5VC60tN }HjvF雤 @X(1}AȪ١D:wH*i[:wI;`}l)}`j̒q3z3KaG} ,9#zȩ١@:o><15;T`VRC6P_o>@)5;T`y!١.ϓ@F@qt&}2jvKDQC6P[:'K(*|١(-]H**|X_،ô(J.ܙ%0I `u5wf8Lx|X[͝Yr0>t.!}VVsg@UX>Jf lt+%}UC6PT:>l١)GK85P 㥏jvJJg ҇`E5;T`#c*ґ|XO@AD>Mf lt"(}VSC6PO:O>~١''K@P8L0y|XK͝Yr0>t }VRsgô"dzHDuܙ%0H,`5wfN㙤#*jvjIl` 5;T`x6 *RY<XA@%(SXf lt*}0WC6PH:g>١($]3KNP ԑ٥(jvH҇`a5;T`ekxC **1AXV،ôNe*j̒q)aXT͝Yr0^[:>K3KakKwb`I5wf-jvJHW҇`A5;T`#xQ * XN@^X,f l^Z,f l^\,f l]\,f l{]A,f l{]A,f lw]E ,f lw]E ,f laڋJJ҇`5wf8L{Q]I0,,9i)kIgEܙ%05w5 ;5jv5jvzU6jvznU6jv:Nޕ7jv:.ޕ7jvե8jv8jv݀!Y@ұ>3١U:v#`^5;T`JnF̫f laڋInH̪,9i%)0;`tƤs3KK JzP (Ic0*#7*}SC6Сtf>|jv:NܰM@ҁ>١O:p'`.5;T`Im^ ̥f l;m@̤f l7mA̤f l7mB$̣f l3mC,̣f laڋHm#ҧ`5wf8L{ mE<̢,9i!H9ܙ%0%30;vjvzږ jvzږ jv:nڶjv:Nڶjv.ƤOjv.֤jv֤jvjvjvz jvz jvfwmR,9i\:e۔>+3Ka;NFO Ij̒qCUp;fO )jvfO )jv؆O jv؆O jvzئOjvznئOjv:NƥOjv:.ƥOjvڗOjvڗOjvjvOjvf[z@,9iU:^>Iǩ3Ka{n.Oqj̒qNӵp;.NOQjvږnO1jvږnO1jv֎Ojv֎OjvZ֞jvZ֮OjvZn֮OjvNΤOjv.ΤOjvڕޤjvڕOjvf;\O,9iN:W>c3Ka{j̒qޤcGsp;n.Ojvn>ajvN>ajvڔ.^Ajvڔ^AjvڔnO!jv~!jv~!jvԎOjvZԞjvZԮOt5;T`-J7j'`*iH:Q>{ܙ%0I'jҧ`;`#B\LVsgô#K@j̒hOO>SP 4'O!D5;T`IiS0Q@suZ@LTC6Кt>P &ݦ%O"45;T`Ii0I@ceZD4LRC6ЖtV>P %eO$5;T`mIwi 0E@SYZHTLPC60]HWi!S 0A͝Yr0ރt>3Ka{nR'v5wf8L{MZJdܮ,9U@CEZLtܪf l -'}BnSC6Ўt>١hF:GIQP 4#O)-jvтҧ5;T`HhEs p*VcIدf l)}V١hD:EkJUjvNѢҧ`*io[:DJWj̒qCyث,9ioZCJY}j̒q3ا,9+اf l ,}n١hA:B+K[=jvng`*حf l-.}zv١Khu S@^:@K_jvZ^RC6g`*t~ }v١Kҧ`*ioU:>G>;ܙ%0J'f5wf8L{9IY͝Yr0ިtz"}nTsg@X<>7١K ҧF5;T`YFDܤf l +ݝHhP Ds S p*tu$}nPC6΁O5 jv9f l )ݜCIljv9f l (Inkjv9f laۓO8j̒q{s8;`=N,9ioN:7>/3K &]JrP csHpU@J5>WP [sHpU@H:5>WP KsPpE@F:4G>WP dCsXpY@F3>P D3s\3pY@D:3>P D+s`SpI،ô$#K{Kj̒q#shp,9ioH:1ǖ>j̒qslp,9tb.}^١X_:0G>/P /KjvV*ե||OhKaזE`4؛??}Fr0Ӿt+rirgW_D~(ҭCɝyg`_}ҩQc 8~?v:OҥQc ؛ϿoҥQc 6/>wH"GJ0&;tZ`ogxD`,Mv~ #W l`LXdN _ l`]Lhd lMJhdEhR:9Azx4١{ܐ?֒4;`}c5QJ3Ka>V5rd IDAT;N>֓5*S f lcEa*١8A:XSzڀjvNN>֔6*U (f lxc]y ١8^:XWzހjv>֕7*{'f lhcm١8Z:X[zzjvf}ncu꩹3Ka疮=֗9;`}nc}ʩ3Ka疎=֗9;n=STSC6ptꑐ:*#SP )zD(f l8##=w@15;T`I١8N:IPKqҝGHzZjvt?;.<RC6pt?*åuP .wf l`#-=@5;T`Kqʨ١8XK PFqG^z2jv;B*Cێf l@鴣)١8P:hBz "jvf}&鲣 13Ka;ڐC;`}鰣Aj3Ka:D;:ZD*fG(f l 骣QJ١8D:hGzjv:F*Cf l餣%i*١8@:hJzjv.:G*AG[PC60]:hLz P L9H5;T`3~tњD3KaO9I{5wf8L5Gs# t,9i?U:hOz&ܙ%0U:hOz&P L9Jw5;T`S]D锣EzWC60Q:hQz*P L.9ڔKs5;T`ӤC6\4鐣M:WC60IhTz0P L8ZLo5;T`3V'[$錣UVC60HWJ&е;`鈣]Vsgô"q+=@j̒qS#Y͝YrY鄣ezVC60A:hZz

h#azTC6OHzXP N6zVG5;T`{Pb+q:TC60K}I+С);c^qåΤϔ|{9Ia5:X?Νl38LҹFo gr`?{UDr;sG!ZA`;kGδ/۱6K:OzfL'?܋nvl 쒎5:Z7t{؍ߎ-]ҭFC /A÷c l`tѣ9CnTK:sL^[E6CRzl١?WߟA`7K}J-Й#;~N%n5:\/t:(f\ҝF҃ |?g;n4z\/uKw?7t،ô$i*=@_ٙw%Wh'ô$it+=@W&?}ҷ>_}Ia4]+v_k%`v<8L!ґFҳ te͟ l}H_rP`?xk矻Eh^YzzLoEޝm=O`7H'=KO/Г;ǙnN4z^'|kH6p]Zz|P \.4_#tRK}K/БIqkP \2H2Ї?nѯ> ltQFz. tV-"+UFQ 2ң tA`B `w>ƣG_nve&0=١q)MF!azPsgôON2JI3Ё;`}tQJzܙ%0R t,9 @;Cƽ;w^RtQLzMП~O9o.N& @֡OX53o.N"9F57C|mSn^HGhޤ=j?hx!]ciyS:׷Uş?qso/a ltQOzM7_w4H`ϥcC nJ"9r #"-FAZ7C_q|C6_wDtQQzM}חyUo~,َ'sX%ҳC6ЁtQRzo_z!FMwP`o<݃ -0C҃ m| v i}tQTzM}L׏_1j`Ci+aTlmS_4}<ߎl3ң 4m|΋g<4|Τ+ң 4mRno 9}v JGughڴ}E`t IGh{ʷ[$tQXzM_}p΃/f %hqcL`O6J7а*EhX؀fYVC6 YVzP lp];w߹o}5 KO8Ю;c?XK`3ӾS:.=@ߘLN횴36ę]E}5eg~}-!δ/K8Ь);ۘyѯ4^'9t|Q_zƁfMз7?ʌ6 /^ =@&tW(30t{1С,W\H`Sjb`m6 /^ =@6PQCz΁FM[`]IcH9Ш)=@gs4jJn^)"@O ҃iR%l KgHO:ЦI 7(30tv1m֡waGhҔ};[w_чlao.uISvo$07IGH:$ }37IGI;" }37I7I;ТI`_hkN.FvE5;T`P4f lZJz܁P #KcI;Р*adb0SC6,[ &=@{'7bh-Fx=;ɏ?yX'0tn1͙ڡ|e?~sv KI<Мpe? 6+['=@sugm} '_kEF-z5:{b}O?z<ͫ  lWPzL-/_o6l8LKҩňSfμ"~%? $0/ICJ=И);w\ [GDr0ti13o9{b|?_@r0~U:SzLؙ7U}~sk$*Z *=@[&yI lmbP"BҙŨғ[D6q6Дtf1M9K}hMVzL}LYJ_ mv{6Вtd1MmK_ &Y ,=@K*KO/CD`àҍdRn/ay/_ lR:YzL;w^ҾzAh=/+^]B`Ð҅4dj}k cJcK?А;W_SJ`3~!X .v37oc[>>`B\z3Ka_HK5wf8L bx%4,9`@bx%4f lO: fL'ᄈ7.i"a< ҡOKT0t[ޡ?8l Jl:_%ϼ6 'V n!|[`pis;}ѿw?^o a4鰂:Cϯ_._o6&V^@#u蓯ۭ|w[ޟh>l IlЈ}xuko.N&a0鮂3aOn/`d[^0tVJڰC{،ôo ΤWІ=;ר׷[GDr0Ӿ*8^ @o?۷?7hqtUs4a|% [r0Ӿ*x.&}7Yo( K ;v/o$ CI7\H^:Bz5-@IҫhN*$lw頂hz*,lw頂+ @=W'Υs J O`K$$};})xIzIq-6tLk7Ӟ)&(4 }3閂kҋHз=Rp]zUi5wƑN)AzYa5;T`0%7H/ f lE:&uP H(0*a鎂UC6 "QpjvA3 vH/ f lD`jv1# vI f lC:`jv1 vJ/ f lB:`jvfCO{:`jC'z{Tsg:9Ӟ('>;hFt@^ܙGNG`rjv K gJ>/9%a|[3C?zΧ' n^"@sk?[F`C}xۤ39y勽*"t<ҋH9$[9l/Np"RuOx]܎-t:ҫHڡO~KL^&@^4vc l(/]N0Az!u诿~[`Cup)9CnP]:`B2˷cyfN{`B2ܙrqȧ?'èt64d3O^)49ҠӞ&(Tcv_utg+_~las;  J 26_3woװ6Ԗ&,X7.}?'{\?mzG`om?o; K aZ^Zo.a3}{{/~~o%l K aJ~ŭ!7=zs?o?؛G^ſ_PZ:`j onl,]Lpzpݙӷ*!qH `j`{!q~+?:K{?w=O`Ce`C 0)ͷ,M! lQ 4١{[sҽI`}t1W/WF%8Lzt?ӯg|e&t.a+X/9^O~ t-kX]0^r0=KpVw|`G`CxӞ%8Tz۳3?o|ޝ;w?/`yȅƑ%8Xzkסe>`S ^5u蓯o.*ݛalTåW -}BjW`oyo]J!lзw+? w_#2 eK ^6ʎȂv_w~Q`CYPc {ϯZp"J%pu۪~=}oo~@`CUNIlrKe%v]PU:8Ga?}O6$8RzjCl(*]IpVd Og_~jJG+vUЏ޸tk~q5H=!nE0Vr0=Hp$oCM{:xI`C߆t" X7=ϟt?oPH |P  N^?jvҁ'I/ `=5;T`CE>*ti+XOPP:D%f l((]GpVUt`-8S/kI,l6 t!F02VUt`%2*0Ӟ.#Az+30̴f^H:j̣$3.Y;(L{`w H,"eMED` eW_r~_r XFze˚_g9 tBK XԔ>F`o.` E#^[&u&g IDAT_*ͯ*jO{:`),jμ-7{Y.=Ť;o<&~;|ק=BN^\v̛c ^]6Ьt X. XPzy oܿ ,&@TC6T XTz˩١*H,*P ^bbjv K/1`15;T`C: I,.ȀL'%G{6tҋ XhXU}`qE,eZ>&G`]IWB`CXiO ̀Lٙ|,_>xG*pMiO ̀LٙuVw7?ޒ?&\SuH/4`Svw^쳬~w-p]iOw"ЀeLؙw?m?I& #=J15Ov""w쁕 IA.悟:XIzת/]̾|Hs6-=ZqX`_l]?_B`CI/6` cD [`CI/6` Kl`!W>l B.=Q9NXQz &TKmV^nt親ϯ[o#rv5;,!։ &Ppӵ+K/9`v>!'o<CLء޴c֖^s%#;VC6+:VC6t+:^vjvnK!_=̃/6t+]:^v&w޻W:^C" y} voy*_΁5C/gh'Ыt@Dz֡o?3Gk6t*9^z&u㳛BzO~m%"Sʁf5C?z'>MD`Cҕ!jJ ۻF\#@`Cґ1iJ}_]#@`3ZӞnI/>`NvmJ_V~JاԴrҫӄys ~pܻM(WiO'0{ǯP*9`RӞ.J/?`Fo҅Ah߃Eė ]: =J D 0)}\n!"J/@`>:t/W^(7^|u趰|oߔ_d lP:o ,'xtdsS_xwOnS}{D6t(]7^lvw1|>`6麁f#6ҋڐnK/B`.{z ҋKНt@^zs١qt@IeenUfe H/C`&5v旕IUiO 4!yؙp*ӞhBz!8dg_=>:n%g$#5ЄB1C|ޥ`}їЙt@#K&\Йt@#K}|k?';^u/[?x\O`C_MH/F`:mRǾ?I6%4Ќb0C뫗W } lJ:i!`Jn/`o| [`CWE I/G`S:t Ǐ_~zsГt@K}Ɣ~="zhJzAСOv G>f6$3ДN7C77\Cd篛 #霁Ƥ$p2 }+0隁Ƥ$p i=]3ИN6ڸy{!iO 4'(SM}60UӞnhNzQ3oU_}/[r8-I/JTS:tN޾ݱ[6#2РN4C7s7\?}~K^&ukݍ6K^u&5{~C^i&vGo6t#1ФN3C~Љt@+8}7^Zk Hg 4*4֡O;{Љt@K8IЇt@ҋ8Ŕ};dNnhVzq8UMЇt@ҋ8ńݾfٗ6H`3=0аN0ag޼e7(3:9 ]O{`a `b`7ƛtp=]0аN0WQ=O{:`i odN='#/д71}we.lA_i oRn Mc l@:_q% mR~s+4G H 4.DMcDnE6//к&t@ҋ82K/RX"G;|ѯ= lh_]yE f lh^:]e f lh^\e f lh^\e f lh]:\ gJ،iOw t!Pܶ3{}Fr^봧^qOs_Z#͠u:lN*p;GZ׺ir:t@'K8ʾ^zq㫫}u0t@'K8ʾ}|~o?/7W\жt@/k8ʞlD'7lh[:Z cO+yRoU>6-,ЍbC_dy}Y#@`CI/W{:n~9 lhZX# awn rK܄-ib+p~p{D6pt@W 8}вt@W 8֖JzҹI/Y`XYV3% L`C:t@gK87X-p( }oӭI/ZPݴS^6t@8V.Pzҡ]J/\0+/|KA-UN..p[^Ъt@ 8VTzҕ޽>Kvb. m8X+lDFʮeA`s]kfΞ3= E`ד f lSQ`/p*OFJ_9jv.f,}g١. f l1՞ a6;Yp9t/`tٙ2if-}|9%\fF='0sK8\v(9`:K_jvf.} '١E f lOM`1p*;4җ1p*;2җ1p*;2җ1p*702p*7.2p*3,W2p*3,"җ2p*3*"җ2p*LV{J d;f#J_A5;T`Ce/n*7pHЅt@a8f lB@ PC6t!] PX١z(-}P =HjvK_^5;T`Cť/q`*Gǫ=]P\ugL/r`Nw Z=P^"tgPur`>wKu0tzҗ9G.@2١CH_n5;T`CZ;` حf lKg "};P aA/u`*!,]0TC6d؁]jvts0;KNHrv١I_5;T`CR:8`$ءf ljO %}ۺۙ19`=toP<jO %}ۺۙ19`tn`җ<Ƒ LP Aڀ/y`Kؐ N6P 9ր/z`Sؐn N6P 1Ԁ/{`C. P6P 1Ҁ/{`Cؐ Q6P )Ѐ!/|`]ؐ RP !̀A/}`M،՞ T3YrZȀa/~`UG;sC}%L՞n VVu37WrZƀa/~`UG;sC}% #0١"҅K_*!!0P 龀f lH-='5;T`C@:/`p<١uKQp}鸀᥇f lt[CxPC6\]:- ١.]^P W. @`C/jvftX]0^V{+Ћ.vzI^'=RzK=uptUKQ,3Ir0QI`f ltTX١+0jvJ'(=[ 4. Qz\:(lrV sX . `Mz$P:& P דn `Cz(5;T`դSؔ @،#)l@`W{$-љljO-9[ s^vH@`IwCz0jv+IgKz2jv+IgSz4jv+IWSz4jvHG[z6jvHGGz8jvH7Gz8jvH'Oz:jvH'Wz. 0*a<.!0*a:.!0*tIH^: f lh/CQC64Nb1è١KpaP h =H`u_}捷՟͊/ZKw@z(zП~!?]_76h"=J`uԿ|t#! ]@QC| kQ42z _ۏGWl!3L?.{~bS؋#7،*=@+ ඳ^won]G} l!sL>/jS/ZzqE4'0:tqS>/~uȋ}o߷N`CK JAOxW0k?RFS.{妐ہxVq}o|mwP:#SnubUy/L#SnEGہ9o("a K=q|뽗?oinPzぽM"&NXzぽu"Gh&]@sԡg#t SuJ:  TSuH:)' TS.>F'90jOg0 W`/U{nW{:iL75{7.at؋~GݟԋuNqL Lev؋^|h/?7ˈ& L&=^:п}{7?z{'o>/{D: W.~]7B:  TY.^+C#L)=a:p=y%P HoTTC6\ ה8PQpv\Uz@A5;T`7{3 ١/W:POlԁzjvgKoեSC6PK,-I(f lxRC6Ph{*4.Ph[*8Ph;*8Ph;*<Ph*@*Mz?xWC6IOjvf.7E١a|('#P06Сd*ŇQx6BX e_߭^ .=!f l:UC6c@jvf l_ A5;T`3e`{#Ч*YxFBP،@`] H١|($PM} ls1 95;T`SSC6}('%PMu l`³Bjv60i 5;T`Sۇ𼄈*)C Fx`BDؔ& OLH١n7PCf l P`pu5;T`Sf&=6jv607 VC6em93pm5;T`Sv_ l VC6U l`³f l١vf!D`.0*i)Ї lsPC6ͤi7,+f lZI ޮ$P_4S,O4YKxw*i"ΧYjzCq5;T`@:O|١ NoRmf l.N,O9EUxǡ*@:ϵ<0[Mjvҹ|i' o<TC6ϓngYyzwC95;T`R~幧& oARC6Kwsݭ0}Bjv\L~> Lz3*9K/"JHPC6gH6@[mjvT.PRxbjvt|H=P^xcVjvfS'||}`ᝌ١U1wQ4f lwjw= $ѿ*+W] K #ѱ*Igl^Gjv\ykӛ ١{`ܽNo1 o|tf 1K7dӻ [ }١{D΍Y>QIGn 5;T`$ݷq?HjvEmH' oP=tcǑJ( ١tvfgE-eQCveO%Ox$f 1ۣLz`P]R:d{IoD* IkV{zd靕P]C:]A`QzeR5;T`_:[C`Uze25;T`ZXgF`Zze 5;T`U:Vgi'Dx*g&#Lo 4ސif Hi?@;᭙ס|͛7zhz3` C{X؝JhM?8gCG7~;9`I`']-|ӓ酷rNW7+v;;']Zg=9᭟m]uŮgpO`_]:2G? =@8xUۛ77_xɻ77oŹWJ,Htuա/VyxwSޅ3b,=uwYl<KG#[P86 CoF|Gx OC.u3:mR@O{>^ <6זԡ7Uz~q+0Q[`ӏv6ײ7jG lMU6UYTemױ7j6̛MU6UYuJ}b1_z~#km aެm@o}ɻb1SMU6UY)۽6m@Ox,fڦ*k{=?,jg'LU6UYTem^zb*kmU`/ww{YTemSMU MOJqڦ*kthy۷c?`m@_|/}^Ƿ[ @/: ۏϟï_}W{ЋfL`@CА64$! UOyk'Ҭ~{j<)@ l*,[sTv}4c'^_vQe{_Ύ>1'>ڿlϾ:wyͼ]W=t^vYU{tǗo,~E}|C lJ7.h`/Jxӂ;z蒁Me?ցx1*؋y\ܼx9z蒁Max cO`u t~ȫ .=mnU34?W%Y9OX#J~'j]oV;Xd惯L.V=wo]: ls,/b7獯].d`S?~{.f`obu럻'wtQ_%]X~m/twtQ_%O?5+5 {/Zs l5Wm?otO`SMy{@ l2)O`H`/xg'<=*X>_녻tϻPMyEd@5wF}<=yM.sW=tCnnU%7">*C.3W=}mnV4=NۿdM3O6 lsXk;==Mŧ\Џc']W^ޒ}7;xo|xӚGO6 ls}ɺ߾s 7+{O6 ls"+%|7wWeE螁Mqmnj`/oִۚod-@ ljvYe<}VϾWogNslQ<fmnWU7oo?0ӯ{_{tlQ8fCmnU964$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! l_|g5v~i/_"c_];{Oη{y}: :%'>xK^v>յo>}{yx{Nm{DY{5+hk{bY<||wjWv#wښ,/gh=AWv]?~cR}1}^#Nm[a/s{}!ڡFhIDATwOWpCeKwLpW;ns~ozo8O C V`௖О[nU+_e㥆gFk6Y~1{'h^-)[y էd,ϧ(اW3qϢ0va=V=+o|LO1Y[&{<OO o|2)پoP<w,g_n<C`4د7{w|b`?ߦ] G;k>|~M~u=jb{=w7w vȎ>C`4u17۰u#O 짷^~zhX`/ox>p(|N`}{Iqd^; ߹2 Ա^>߰'|b`~?ꭷ{IxK>CGY#0Q>5?on#O {P`35Oh_<o~{3W+>x~MثWO D}CGm}ᇽ+oӇu!:H|xsC`9^+y8, _8xrzث}w仯hw[}?yh={.z_y?wyE'<ƍڧܽg^'G Ա^Y>=o?C1Y?G؏pƍ{-/0<nl~O6>|jᭊ=p}Y|{Ǒ78?W}~k}tC`4u4os'm>+bґ#O 4{=o y/v=<^lH_/S>@eW;o"c_ +~wyj`#{#KsktB`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CgbIENDB`bayestestR/man/figures/banner.png0000644000175000017500000010267014133140641016726 0ustar nileshnileshPNG  IHDR uJftEXtSoftwareAdobe ImageReadyqe<ZIDATxM՝/m4׀BUPZKo Xei_zOfA,AT} $Q'7^P / qvحwz]>Glv_}r˄ᄇ>LEbB|R ^zlaA}^„oL!/7<8|]Z zf+ec)뫾1}ٗޜͮE%'(*4¯?ziLT~UD 8'Tћ pO,vflvm#OI#Kuh M 3 ]vGIstͷgȱ-kqX_;:xg^tlʠF68zU9džfdxqb|tѩN:}{U`Ԁf6{gЛ?UuK՟Ըů5ӳ{Tm+{~Uh>|]voW>9\-!`2%Ty ѳoh_,;N7:{kH0]rԱA!fϔͮqA!lve_L~\v  ߿[{gݏ{OPllvr_Ua Xxr[Ea Xxr; _v9A!27^r1A!L} o 8TEa\z1A!+}&(|Lt{LP=UfczKP Xc0A8MezEs~}bw_Y7-9fp?g% ~ws>{5a.aqV_/]~s_~Fe۟agP mOFgYrkqOP}fM|?=jɃM];-#Wvac! Wo9mt^~U\}Ba8\纺'ޓtB:˟c{=_rs5~#7L\>?6}sP1w7i짱P5{g[,~ea啷4ϩuZgqԙYtBVhtuO^%[?[*4ܟ|W޶ſW8Q~5qTqsdl!n*esʹf;ױkSg{ב_m?=pcݶwy]ݻJvpƂk~GtS#/'_i?Njl Mc9oNι[! sї{gTN cjuu>3`9~懋>\sg{}j,),ȹ{6f ϱ턿y͒m&_u\ږԗ]3S>k63p{٤~C#eHΛ# m_Cm_CX>ara4pm~O(q^i4϶tؔi>l 9XZG#UESlVoc؄ԩ*s=r^xb9Ag^B/1P:GJc:2)lcl w mؖv׵  rM:S{Xwʃ<n μLu{בU`(,K ] خkc Jvc뤥1^UNu X|~ҡ?ӎe,e]1'px++ђx7~vG_c~f=ұ?2k48wo+v&(d{c #c"ܥS:i#Uc<?;MMhHL: S *J(: ܓ>OݧZܻ69UCب?Pfq ܰ[5VlCݳJY`Jaml2,kڑY'YH ,,'/P dqY-x 8 kTtz2x W;{@ԤT*c+y\} sl[cON^́jd'(i#ʖlVaK/_}*7\g};;Sl Satoop>;m\|tq]r.GoT"ktxVj.B:\:=.1ݰ ݗ1G.~ag:Zs<N잹9둛 Ǯ'k¶-UϳjCVmJ4/4!(Ljl5T8)oa7QyfC!~0a{[ mseMMJ+i5(,98.ma|Ź\Mo#4}s?7/Vx{I_NlS%Zɹ0{gGgdWvv3B\g}684V}Juжdދu`!@%\fZ&PV*Z~>/a*m\ԗپ »VaCUАtHqT8ck|vc8S!as~+(_5<_󵍊ܟs=sp“TlOG[o6yN.m ϳ:xڵ68gĽjGVDzL̃TsjANY#yg#Fs#7uWFoniV[tpGm5##] ߽Դ:GUq%c Ӫb1!B8[ ԧwAsw,VT>=[]WuU:k'(oJLmワ| R K/":a鍢`B~m( &8'[}~)YUլv*8"ťGzWUFW;uPݹ14 Cܼz>4KiDg:ߥg75I60ӎۖ*wumϧ >J޳.5|L_^?kj^Tgɠp?Np*.7ֵArlu"l>m?g9o7UkP"({q:.꥽V]:G%hkA//dzA:Y/LGa]q鳷l˂}I-W:ťu3iɵ:٨y~@)'罔n͹^oW%8v38esm4|rRΡMСL{3䳡Zڴ]y>){1έ3]BSK Aꀶ{=ԦjTϪǫ:x}Pz |tD1*oFTZslݽgșrFjrg!K K_]}Z[(YqoiV錻#Y9sЛ/icm:8k}-s.*gS)^imLL_/Tm yC3Pp>AVmj2@J0mWBUXɰ0]Vrnc&ާ\~o} GWzr0sA&!! ϖ[N̋&(A!u4 "/9Ni42R:)+kժMM3 h(58!BY/SgK`uUbc(!c 9IjVCAjC/&F~q(A!x:C ntam;8*T+vG‹OSrq릔B}_>kTuNW8%6}Y[bjl*M؉L];*g p@e a3bZ` 逶,x=ͽr# JTM)Rӎsۺg(&^7ԩSiD8ﻨ*,NZ>OmZemhc_q[?OJR8W0ymev"(dC#h! ; Q>Rؔ jCzm6bJT&0zٿ.x=Մt-x}\bddf_RKSG@PȆƱo[@ZcUjڮ>˒+n\rq[Aa:MW!65jϡ>G٦k%ܷLoۓxa* DT1xB.WO{*l_sӺ75a~Υy0REKF4Z["yz_%rұVMOKG.)S3J,FRwO<3JPȅ1na RZ4X9[ԅkB}41j±TT7]Uz_%rz%,|Vh~ $N侑h7 ! 9ziĵ1]bW@%\.75b`UlӝܫǸE{-M F%ަ Ggn6yKo,v2n~K׏>aBIH8QTJ)\qmΣTbqkNZ]rq[ ;b6{Kp߂J9Kx &LP8uS&*dSǣ%RkZuX)驣S-T)KYж ѧfӃ7 O-v0+/HTsaTrdSWh?SI iwYUok]>Ht^Ri#kҦRDzD;j+-ព*NY.\,P}asRMM#=V*rk7&6tM3?r/ Z/ꡭSXjلsU0徝0ӌLpʦf&,UFa]%y+[:2UI}ơ`g^7cjw~ ""}ⴓ_~6 'Tc`CpM ܒ$c#T(=APSlW`K)|ӪBiM9Ŏf?-L=.503>6gicٿ\5]SpJ6 䭂U:]O[H} JǩA%{ޕ ӷ\"`(}=VYHIMrLq@s A5Ya!YB}X!(3ʉ?nĖ~l175g v%* hhk}%,E,4c%Kn(=_8X5m/9P ʺt5hBMM<-Q1ʖ&Gfڀo^;l!gϛGPV}V"mt~"53딜DcPIhZjtL?S/فqґ/KM;LUYZ|j¦{c0uvts1e&`:(6<'Yw>^s:U\4S!(sJbӘw!`U_l6K(q (,qAvUr&;K|Zm̧0<K`d5hҎIua vX3'/k4_LL1px;xZL+8Ա*L#X눔$lhnߵ iCm˝43r6\BEa8I)yWm!S ȹO0(\(5o AaKf/V.B~\"\i{Zfu.na]$]9r=l?w3> t֎*,!&i+f)#8}W/\pT?&%1:]1hOIʻ|l-,5gܯ ^"kڠWŠtZh8qϳ6Y?U?t6ڧfL%^^B0-?G:iYO[%75jc?SIP?9燨wuNew Ý{=k; r$vjQm1A}K:uYv>jo\`1{EY`JxѣL ISHؽjSJnU;]Y5_/ |8J9^`5ؑ%m>%r %3k_2&''((y=: Y'& *RU/?9h:9mOh꿶GjoƫwG rpn+$u-{Ôͫm? 拋аEӖ(""(Q4u8":鲲z%75/5mBksgKnRw[ T2{0T`ҾȵЕh FDw[mNU0T:6iǰz[uj;:E9Ðh:U˃0FgZQ3_3dP.}Ma::Sj0kUP^/]Tp $9,kXX]UtXm`KԗK᳦ Jg#-^l !Pjw;,ZoA$,zBC^_47%maa ᜬae ##(J >g-4=o^zeMW/Ad1}~Τ~a*0L'@`8N E~WEWCk2eԯ0KFGPiFjz&'$l||jBk*eZPv)lt@ *:\dLb0tXXA!] UUzJL;*|CKo> .Tt 2Kvhe^> 3rC#]dvuj^:ym3ѬArͷYs$JIr;GORҋPs>3NI@Gd eVM(s,"F%JCXϔnTn} d鬝ݳދԤ:S}OgHP`ڱ LE|T=0UE9{/=e´J XPU`fRkӦ@%!SaKhP04Ҡ|Si+Y+TĥJmjR©ĴcA!uȼaMK8N0M`AwC٤谦SX%O\W!(⍨ןJHd)M`=B(I1ic\`C LA6:kW]wΦ C&J1y0i21dp P)ʻ-cit&JLFtnS*T(q>v3vd4sJ`X.*,7i|R]LIi@sQ!KcrL3?Kd}зii\^/Zj]: SQz|ڪuixM86_~ܚhM{˽+3``*Ү(UQ5-4xBTq|G^<`+د@Ħ&`ӎݳS _b@ a0nOjyS{W/7,B;2i;gD["Svz CmǐjZ$ᅐJmjru {"2!Leط{8>0oIڏ!\֮΀G$b9]K{T2hBQ:PhPS{~DubVdYܻ2L;d&oI:wP5 J(ciS߷sT6Pozq:OPTVkvU3pg%v'r6L sLTD 9 0ZJӏLPq+ 6apJM=4T5.9 ziR'.1̠ћ|Sr*R~^ڸ SQj٢#C&((5]jn:^sFshT&|9O6%DeN3 YG3fJr۝[h]ir``Ye JS KP%և`wAV y5ah,% mw9ayӤK3%ynv|۱g~-gՎ,A!Wছwދ·1(AuJ|Ow(WT]_ֵe&A*Se؆X)(9NW%BRtjxQ^MMRQڄiDe:(gT ۨ1JŪH()l@ag(5س`Sdvn`:^Ū lؼѓMz<%*s6CIﷱa.뽪¢mTEo1%g*$A]j:H+7w 43T58R7k}lXJk+,Y齫E[aa9LBa1$("ƴӘR{(&k'mr7.І*,,ygSb`SB[f7^gTUv {p1&UbSL!l-IKc9 Xzo~J8\"KB[2Ӡ7SPP;vSxB4KLC,~tKSI" B#1v=]MMԜMN5ob2T3h[cp*RW}ñ=i'DEcpS6c.x-+kPNm3~`\SC|-h&\ȢMo ^<״J,BڔvB6xv8YPu GP8e|{?{:*R ¶Hv%nRgݎaJuT3,@޲6X< (AᔥQ;2!陂 PjFیTmWbSmӏ އ3PeFc0Ot-zWE]jS Р n^ҩde6\hA&TisS+ R5aT5ֱ;its ok 64 n* G(c؁ҴݔdSR9 $wRxT3?ϚviC 4:s4KCn* Cl̉y }4ʜR ZgwϵoDJ-;>%ηXKl*e.k1tTX&+ꝂR* A! }UG fϧu =d3i ݦG W#lj#T 9|/^i u_tJw3P"zo~ TU51CPBFC?Dmyڿ4+tK:ܴlZv0/Kgd{&I SXۆ4ֳZm0p)g~X!0<jƒizjYBj*xp\sLI۟FcfGf /)T;tkoX׷-ծ0#fsOua@ ! H4lN`u]y[sPe=S* s(Ducldps?YRm!./4~ۯCFi o7<;[FKOKTl mcT&&Q"GH͵H[`zܰRC''Сa*敯Gy> |f;}|Nf[JWV0AJ!/_ä Kwlr漴nV3hZ*Ϭ8)>;UXxk89R64 ma!C뿔hU}\mZ:߬S8> v!ƫlg+T6֛75i61Y*sF8T%+JTcv%; iu_m> rSec:UP5Fz3M0m,%;Y=AuU}շP.T„&u2A1}jaGv'fIyKuw՚:k~l~AW4݂y Rj)=A!;tn;j/4 fdmG4K5| [Puf5MsͤBGCj{:[Ct=н6>U0[01=QQU?avԖ[;`jN m=Uo 8=5g݀wft1w wn>׳zUEsOq*t\]NlP di:C)lv2/YQXuT6*cϳ:7$}j{Jo^طvIUpu 'KP8 hRqKb^R9]N,`UlRyos{lt:C u S702cd:3>-E]/y*cyePusd8X*s:{B_6Y]gau.)}ʱKh|SUk)ɔ&:UJ :G{݌n?6F6 &_iK_65{5a%ZZ mj 2wu>)X> 8v쫎v0RMAZ/a]m/%4 uZl 92c](Tձ[p:m-6D¡,;Ű7W "pM/Hsm+<6M]c_~[vQU]PղS%\vWRʒLL,YMJ-?#g^lz |+of/ pN޼"PEHH0i V]Vy?MWU2Eo[9guvÂ.s_v\s rWWӥL~쳹K9ZYk5adœ3mjp5![Nbt%^ʗm.:UjV~ܟ{6[WϪr\z}/ǶǗ sK\]~w&(WF[n@ļʢ!M`ۍ\ne)?m*1Ky85qCgj(<9 ~>L;^w?: sݳsnb#K5Σ܋}wl#|쎖mUl\{g0mno'|]UhXwOݹlYZgxf]?uɍbmܷ^9ȱY-sSWx?=ןN)%L(/ VM[UaB(\XJCD"Sg/t\&&:'>1^w.ԅ]vDv8s"pׅUeϽſf }Cswej[j3ڼwu1s'VbyX3. T3r,9V?lCcǰpmoHZ9?bTajZӛ6di״+{f&SnT_cjϫ!ԈLFϪw`zՎ+NS*,bHm1bkeu= 3`t9HcP$i7qC: !$ 餔2ڶBpiTUQ})p}TqQ/ uLUk( !$\qgvH睓u ɶ{of:A=*q{5u h u̞øwv0ĊhMTUWA!e ܆PHUu!au>7ӻx .E~w>tgζ%vhJ{P*7Ԑp-uE#U`~O:wS T!iLs]HkBB@3Y| O_sr_o4mMhOi؟6+ UBrHXIFRTϚ`vM 6*UupJl(=TȔS_^9^kh\rt694OOg2njf,nkX{|V*IV 0YDpB / 4&)ϙX:8c !4vIsUCo>Ty_nTݺ>G]kgqΏ22{w*-0O28TӁ ԇ͹)s{i$: 3R@wpIZ3ǼXcݪ9@{* s?{ofk @& ڮAo"cmd6P).3k"(d&1U͘ 1lP>53Oq݌B61imUQdP:w!,څ9wLG>_GMaJaU 1U@So}@{Ǔ (fj:^CʃMŐ>4m;5 K6U4so9Xu9dPcdJ|\?it*Sr`8D9 {/7 ݯ2MMBPWM^$MDUuUӼnozR(?Nl?)Sj!ampg3_^fT Äcį)VTucEAF$0`GfMϩ mLAσ;YpmW\mǷ'.qؗtcUՒUɱHo-n}[$/&>Gؘ)Q{HPU, B_C}_R!_ClHg}6}x3tv|e'޻"LW3$9N}N)m >~][W& hZuEoՋ$M;R8oLޏ򹥳e6fn4=l/US㋯6MsҎ|cyZ:C roH;:c4~rd smuTrb*7+oI VR}}]9mj`^W w^ .UB6&x|ڳdrO|﷞z{Wu^o iNsUCsi冒_5$Th|3}އN_LhB)#e7,>T,wW{ a>^o(|Yo-{4>ܳ{Xخv ZU]XUC" t7_OI? ˍg H5aӍՄ_zlq3ץǚt=ů: w/>gQS_宧*vжO9w鹎 r,-ujk._7gPS?UjyV‰6p:C4L p:T,O3JN{qmS˶O]=9P;50sALhr:Ðދ:Sj{Ni^p>r8q jIӲ>'(`606%` Xt\bBՄ!(lp_Մ:wͷTkc?yw/=mh6]~\5@=ޝٵW_4'&Elc=z.`Jl` /O|R3a'w~P G_w_2xvr`* LU* /﫚Z ? + `JM9ՄКp歫0mz &r3 %KH)ǥ%L`0fBL9׾B t][y!d3)Oc(,!kSO81;z[n'Ov^Lsىq "^_hgM-oH`s=kvyꩧf=]1B1` . ϖl 7${gg>Ɛ0կΞ|s=7:E»5 FP0vUHxɑ2?ӍUBQkH*„~ ߻ʔ_~y0RY(,A!U!aKwt*E*SE0pq` `$c5 Ko?38C!⛵SI*M2yԯ<{|X# (WVg׾8CA: yFBJw y` &Ӎ?;]u >!PC&u%$,a?aa*_C ƤZMe_̋"RE#TA^IiLP0/qId60vYݸƛ$,&'u?iB!)PT]%l+$d{キe]?#(cߟ>#_˔c(*;L9Κk[ oڿHGP0TYO,6doӺx ßϺKկ~u^ו'|qƢ)f:tsOSEXɺ9=.(;,Mٽkp{;1|G0$"̏'?s{Pn'ks]MRX^a6cΏ 0$B_ͮVu _%P8.W8nԯ< a]v6ۭ"nl]B(*AZm%խ:w/UC!(O[lViGnjyɿ~ݺP.spJc!o3VmcAM*fu>y;|PУ$A\]ץ]BllR7 "k~Eaa$$y f YE`]ڃ_zs7t^l>(NmllLC]moRI?%0tms=ll0|B.y?%!a=D}5a&$_d|OB.L? % 3 wnwu~] x}%(R* &(Zڄ&L 8UKfowTUEHhh.k2ϲKsݔiU$(^U _ZlՄ٭yU$(?kye6{~=CRMڄC&dfU#(ڟN/»67_\LyZ `}PzSL5l e~&RU}"(胒k&LaSq1٥0idB>zkRkߛN}y1D±t 'Rw7^f}W@(dN_uv쮿xj±M*!aT?N ٵW_d蘠/^<|PMQŨ_bv [N|b0A?NǛ$F6zyۚ]"TT*OZʭßh.kr /e 0$z}w?N@_^H=Tث +uahfg6[;V&Pc^WM87 @yB>Ɇ&z?Yx:uIx>; U3 $aNٜ$of!Sֿ3 W2!k[Y~=:z褪 +*|7yk˦< L)Η'{<,S)ۨ*莠AYg/nS^ߘm A!@61ɔ㺪1KUa]5MM!(hH;on;uucua+e R^* v4F't]Bd])w'N̿QU>A!@B]NI]US &&uxL.< ӏ$(8WW6MLIXͣ?7MBCd5hMM2v aMLT.Џ*hvĤn?PplmljЎKOn>~Ĥ SMMb 0A! -'.Зi^}61{urfwy֠d0@=Iclbzզ&?ƿMMeimS$ﱛP5 g(GPpOr㄄ M);^u`5󶱉#(ا~=$̚{'NpnviNPOTUƱ^պ'T"(؇On˦&!(؇~=[S9j̄ kh` ';gmZFPGlKE|+[.`;z+&,pcڄ4KPSuiv܌]*3B[OP l`Lh.o vϷ&l.G~ ,A!@pv>wa $('NVukۄ4CPP&&Hv~aS إjr a51B=}խؤ]619zv#%gZz#{칇?|ŇTPB؄O=sN|[T]t{|Ҏfem½wy90n Ivc`=ʴ)M;~g'O:ujy7ey#w~_o喝_7aw߽}JSNPS?^nSYJ7|Mk~?죏> GPFݴjI8c*KVzx&|]AL~[Po;oð+uӎ/!u]7]pbOzkBmݏOPT'L@`pp -o֏W]88qcSvjժZݸknrn:7 2Zۧ0{DV}qTgsH@0_ٽ6F]u;%'Ma A Vyq]C] P~ \'|nr%PL+(8A!ӎ%Z{Ow;ݦe?Ɛ0^ U5TLp0u&~ ppZQKvʷ^xa:}Q.S}붝w&Auοr{yp03n:`A'Ȫ4\ﳺ_ʼzа ˶MML?8-(;8A<0?=;US!S,;;1[#.`=A!YvCú[:\'p7)?UWv=Tmjy#G|` a ʶu +Sisz áHXh1@ylv 6eqBmV7 upu&)^?~뭷^~I|^~-(1i=vvׅt!Q8a_jSyn*Lf%UM?1@sݝw9 gΜW VU]HPeˎ˫Scf ɫ[p9*"`SWL?hk7hv7Y]kgccf ^'>*ϟvL?~ A! Ї/\ 4$gv\RIJv4Nu\6$T}d]c}:uj>|lUEj&i xUwypx=|+,:nkA&ӎSwuY:8ՍwuWźcRb]>Sӏ'(Frl:;vX'?oвMU6UPօS#(Frm]PV vϻfW|ǥTE.N WIk>}??ML~ ;A!0ju&*HW*yY/ *)mhp^.?OUvrB҄v XV7vׇЮՄQ,nӿ QzjivݯuvoV6ve Nav@2A!0JubMO;^::ujPd`Iܺ@_K`lRVmӻ Fa*z /p^T]~}XLeԦ]TƝw޹3I|]&KPN]eX }<졇Z;}9Ӌgٽ޻lw3{Gy^w_ck;&w,gtG/w d10:uӎ&Vƥrnw$!ݺ/+!aw¾[ouk *au]qHW}U :MH⪶6\% 62A!0*Ȥ +ǏHBr*{/<'Wyu[]wݵߥphkqԅ/,A!0*ukA Umrwo ~^ ".ӕ/8K\;ckAjݶ5aa3Nq# `4xպ+i?;/bAכvXnC]hmjLTlT=s%(۶HM*yU۬ I6mbC`(Um`Ѥ`]V{]w_EUBu5w}/uqBm3 QX˲Wa*\7 3!e6/)Yiu+%RaA!0x۴5 l]eߺW}?ﮡkU2v]MaB`jU~Uܶm]e_ \7j'u$6a׵w}RWəy0B`^xy{PXqE) ULAN\U ޏGZoVCGyd5!a/r\SRULT|RQؕm¨;ゝ|ɏP._߬Y0as=7{^SڠecuSw TU`n64CP Z&& кꚰm5 K]媻*ת<Uqy /:>ca~?A_lز.Le`I]A!0hOr{Prݔ_}v7WYoSwʻ%;.M.Ә[0v uA.c!(nji[e򪄅zk /|T){&$[]vvjp5'lH\7exy]HSՅuk`%0Tڶ+m©lѴr Ry,p/_o nqu:s%Us %Uc̻\buSا`8g~B`*&^6553y[hNl 0Iօy.^tHX'ۂpvljR)G MCTe oUeo]VSW2\~̈́UHmr&}ӟC +uӏMMͿΆG  ?7yªg խ'iën$$wUH2Ux[5w:Ln۴fͅ4fv^֫.hYݎT$%ԫ U!*LCkn۸d0CۂL?WC2~}*h_J^o <&ܴb$HL_ݓVǤnM˺C'(z_|0:_Kq#m%lKxw&p׮_AfՅ)c0>1iߘ͞?J]l,[27S_xZS%LU :^Z0y7ϫz7pzA8 ` ~~Zu,ny?CB쌼iw1ϟt~쮿E m1qwniB`"l э]64c12ShmMF&Y3x;0>Z8mH1f xٍ78?=[?zcBӏQmF1E ߿q3wglh0FB`4w=wϿBuӴFa*A!0S\;=wB^#(F!`SvzP#aa5|_Wyݲ$&r^q]n5Υ7FvI GX KJ O9PB zْ:6FLQPLZ@dqx7(93Cr^gxL3K#Η8cjm}37װOy]K+o9(` Эl 0_Ǫ 1#(B* 0~?No'|讱svԪ|+(kO"ϼ=BO<PnB(枱3v&Y@_EagAa/(9_8[y+++?Hzj߇}~v/M9m6-WWW;vT6 mɓ'+{6r)(|'ˇ?{(aɦۢjl...fᶠ(3_ZnqTvUln%Ǿu3!a~qmįWfLPE%˗W}Gڏq&( %"XJ'o:;aoӿ~￴~q7|n2ek j)-"[\\L/?n/{^ҞG?fl6o[KKK/q368(t@awtS.:k6kyy9]r%]vuEoۿTAV>}:͉P*B0^{sP8q$)^4`ۏs Jg^TU@!JpNɓ'soﶔ@!,]VO8۬h?^}9"( [1ITv~Zd2eխ0+(*A!0ryW39irr-@ ˪?z Eh?d#F)dJot;Ŕ;ͦcx~~>R@ -lNCtvJJw[li4B azz:7( PTB(S37}Q/Tnێ#(j_v٩OXq`10.tk;2~+ou|ʠY"#Ui혲;ye (F"*Ҫmǔdw@ $j۵e (FB1ci;fi?NP ]VeU1mǔceUV&e&(*xܴe&(*86Ci?LP cƝch; e%(F1U(+A!04YT39ƋcPםE1PFB`(*"$t>@u0dUPMvYbrɔνp'رc‹pqq1ϧ>@q BЮ~wvxrj>@)Dq^P/ JvLۏWWW;n?VUJn1@ Z|mT@ vL~ d_(A!00YmS2~`d (ȽY@ k;Ą(A!0#* ۵E ("kAÔjB*^k? OPݵwko혪~|;B`@e&?vzPMڏ#\`T@e&ʢ8xa;r;[#'UBWz=MLL ۖUT{0mГ/S ضiBB],5ɓ7uZP=na&0 m0ݾu>|'A!-ѹI5!l]LZ- ^~[3ӖO5A!ey!a8k8@ -KLzkv>APlIVUđO:؉nUYռ ($kNلsFHa=  m^v ;ǖ"(z!aM[̽}Ο;%(zHa>l]'h>j6V'B~^ֺ6=8wRx_c/ڏ3oxf:Jhʁ?#~7Zd:YUIrLщ0wn8$`} O8n12 Fl+* W6.F%qSSS w_ȘMɽ}΁?@7B%Zn ko;ht_jrIPlhVXQ}^J{Ү]ҩSQiǜNKuP>7WMnw8kۏx)9 Es Ϟ=VWW3 }wr:pcNm;(̪>jXdKVKf3~^Ϟޛj@i -5Q1RWB؎کS,4P69s&>1j@U!GPWM8󁲚zU@A!TLV5qՄPfФlGU!GPxWݖU@A!THV5QDJG]UP!YD a|RU#6@E&j襪0*6@E&ꘙɽ=B 7pA!TjBzF}_*@5!T V `̕0̵wimm-]|_,tݪ m@@\ow_ڃ 3<{؀<A!Q[S@V5&*h*\^^μOl@yqOr^PuZ`LE;oojՄ3;n +vNǢv@U:d@Pc(h'H5!T @/0/PMkU-&(1l % GU!TLRTBMLLf{(\JPc$E~ɔ'T]/ m@jjB*@gSSSh'k:0PbY/cyq[XXzf&P B(_\M0lbNa+ b*0PRo^ot L؄'vzƋJ_"$ f`| ^twϦSNg >z{:^G/FS\`{)-يVܞT;`(ͣ:W<|k4lv}: ƔJ$^ uAnMj]@I,~Ӌs`"$\XXz?ߴ$(gm(0xSSS6nIZ` TUZzPp^611fgg71` 3ghA@ :ڂ=Fln+@AD%N?x,}0rQUX׻ϼB('A!@TtC‘8 bNy00bQu/;%@DEa (A!ة:֑Ϥq@@:0,n F;'Z@Q:0:-iGP#UiKe.!Pp+ &d(/NMHw~\JG=>}:~15,!E%(!h(|_iyˇ?yN8s"(U(ޯ^_X%(!ֻhh+!ag/#UiqqsˌB 䨪W;$(Wrx.< HPC}SBB`,\|pj h `BnCO8$`,ler,u@ `@DEaT |0 BBaa/",|O:?W%(Uaa!9A!Y^uf),@Uհ0o@D@)$<{)!!PY[ ^Yt!Eܩj"$GA0WiHP;qoʗ0V`hn! )oita˗SVтln! (yK?c6k4[ cnaF<#(- F@}AtQ[aDϿ&AGP=1++DeFb_lЛ |Va9fڊ '(.UY7Gc{9,-,مjoEV]%(yUZ'!Ю.4c#EgY*/=D+ d𮽷~`%#HѲ*ӧO~M{Kܗ$lmh/֑Ϥg<ⰆR:;?֛T`DXܞ4{zO8*0z w^`Fq\Y!,/"$OSZ~#0jW_Msss[jEQ/}/4HP@Ə;yGZJ י3gB!NP@%naՏ~!=CV+v # C79S%(2+-i_jFo'Յݒ ":A!c/fF%IŢh+~'[sӮ.\XXrua[{D#m[BR1*6`$Y/o=W?c!@5ӟUe'GіUyN@Uux/^^_v9P-BJ-ZȮ7z ZsZrvhEn~(S:"4ܝN>v. P*7׫?z< XrW; V~;F}jCƚBkW V[#⸄ ]{x{ 'v8`l (\yVO֫{i%ި]9GTX%yvv6]x1ϧվ=~;8LVaTߝNuǻhbn;vDD8r{E`aa\Q]x…OTǿe{voxx#@00Qm׽`}S>G`Kf늠0 Z-ʃ)< Ѳz{({ ض{C#0pm o!S?:Hz}GX^o]sss'Aj#DlON[⮻A\;n~,i/WѽS?wČAݐ* ض{'49lngnW*nG.-%(#^g?|BAFJ ʕ+టPvjs}{A |@TH¿{裿_,%Bٽ?=AJ%g6lHaC_A?u^6(7H !`V`0ՆgΜiފIyd/\yk5z 0F"{'[?qɭ_߻}?<Ѻ"8\ OJ?=@ o:}W?,]^\ҞWά/ss/ˇ߲;7zsMOta?~+4Iqn}($A!ފ}3 g~ [GZ@Yҁ^zٍ_|_߿'k*~g߹۹B@P $(HB $(HB $(HB $(HB $(HB $(n{Zv P 0J2(6=IENDB`bayestestR/man/figures/unnamed-chunk-9-1.png0000644000175000017500000012043714133140641020523 0ustar nileshnileshPNG  IHDR `gPLTE:f:::f:ff!c!!!!!!!"c"c"d"""""d"""$c$$$$$$$$d$::f:::::::f:::ff:f:f:::LPLQLQLRLMMMMMnMMMnnMnMnMMPMPMQMRMMNPNQNRNff:f:f::f:ff:ffffffffffffnMMnMnnnMnnnnnnnnnMMnMnnnȎ:f:fffېnMnnMnȫff::f۶۶۶ȎMȎnȫnȫȫȫې:ېf۶f۶۶ې۶nȎȫccd d e c c d e!e! c d!d!e! c!d!ePQRfȎې۶QRdeQlD pHYs.#.#x?v IDATx,Wa{(Eu7EWT "$KbV*KV*r]WF)%#,JkK#?N \=F)py3sNOw 3=3/:>}*͹S̉6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D;ʹ.';C<8zwyO܅ wG_}_ 7`B ;e`sz:䳇˝ψw.%h sn(Zb{؋9󌨁]{{YV>wg}>#b`7mp3Ͻ~tcأ C~l3"v g 0Z{/Uzک >^,a`G 6 셗8$=yC 8");<}xC؃cI`z<(IC`]^9GdG`/0 0v}{y۹Kiva(ƮW`/j˽= 4.cq_蝋W8'?Ӿ_(;޲!=7/^G+i[byo !Ǔ w|Ckpm/}BqC~|?KՍ\)%`ZK\׼-bq[*ղ\6GuiQoSvϝ!='ZV9g{`#^j94>_̎<0]}s j @KVHoR5{!m{nw~X`7ʇI;>]K{ۣCZؼ6 Y;q<{'5u{{}w8ˣC}rܬV>w./|ysu]W[gpĽؼ{^<7V50 =Jq __~˛՗Oػ?юt—gŅs3=<0޷J/2Η.!\_vi2YV.jFvKks{H۠X8صZ}_Wa{`:D؛-%m̸Ty_LFy >mTyg?P ˯7-3rڪ._m]<]Þ^e+uVxbOc`vӨσٜs/}#<7۽cm:QeH9ZRHGVi/2`װ`Z̭x:vts(O|? i%LuW5TfTwl{<ޕ w*:Y5/GksTTػ'b`Dv%Cե+z/1~C>r1[?Kg_ip1Y-﷩ j/(!T#7Rmx)RW5^cJ}c<sH=[T\C EV6W!-wryHk.w`o_-P7m =}Λt 얉>ioPzvj4gTn޸gz%VZb[ ԙ+[{D hL;`(.wOX}dzw\+Ǯ=Tox[!ՓpL`phZN2{'*"{?N6gRUb07ye˿ɻnN}Ⱥg]ZT^gyuWǵR>m<\/t478m`DvR{(?U:f.UɟPR~U:ru tå[Mq;?opv g5wVj0^ݲCs'JTʃ jsǿO4U` '0^ݼ LM.bjVPW>~GmNؕ1=h[N` Y#pcRh.[ZPWva'$VzHn#څ>fcK밐jщuw! >$<rT`Pq1 kTiofG,XC'Xɰ4Q8e*>Ҷȱ?D%[oBJQb=]in9ʝXF O4A`w?D"Ҹҋ9"^<~S9v;92h[i̱? @Z6$9"",.sٻ)m`W﫳|ӶA{' Vj0=:Pܥ> Zvn=p>=v sO?e`wDvJ}<F3r/ō5ַZ2Zߤ.IRҾ#?ѡ[oPz2x*C\Vb?pc<Ҫn(L-?+W5;n!%厵aO4Y`7`(={}Ou=Lmk^BƺeۥUcUͩ)q`(ݕ'3w7c6-}=0CjӲ9igu-oVt1~͋ ?VKwT^q])q`a"ٯ?{C6[ioPvЦ\VcS:r.K7 Y;zy[V[yH{sw^1ݿxĄP16-:{~+ͳ<0ށ}۞KV#- Xۋ;}~p6;0p!%DyH^~;s`DN;ֳ<0]_K3?KͷkK>87'HၽvC6[i;cCzKR`lm;sg*;| :<إ/%46-B`w?nZr_^jzAޮ=&udz+;t~];mF9$ww؝~+mgy`(;0/}Eޜpûvk&*Y\v-_G7;|u)'>r8 O4:|1FJ0-Ё]61s5p`3Dܽ8k^32l`n>{%@[U՞ed? `0cؖ` m_ $l@}½_0oC*fM`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@DykN}  $73=sY[ffg0XwfhϿzߙż_y( .$2\T]ͼ^OM`;W.拄"z13^U`;W]~!N`PW+sD޻ثikW+sD޲_qol}s[`o^> t_Z,W)V)%s͋^̟l_t>Fޙ@n=~.Yd;|PSD,"{ʁkol< M`}?r`z[ҫ=nzgʻ+lwfsz1]M sOh9" j`}87*1~bH)}Co*v1@p3}oHr`,E_?ϖ ſ{vk]}mytFޙKC|;e`3D jR7i{b67n[] oXbOns?Lme6;~cp#;&V#_oǥ^.j`]4{;Gdo,WK}~Ki{Yؕ<\؛7V]]? /zW1(\etuU-K3B1ŎEq'Zv7\RUWblU;*sDlu`n~Pwu]]-p_){؋H.Bkdv+v93yL.[U;9ɭ{;Gr`տ .y]߮ `/[^j`5:٥'=:(] /577,`Ŏ~;u`o戼g먮~-fm@o戼i_qEJ6uG`_N.2nFۉ_}g#o^}^xw7{7;u`j:KFF]q\Wr`׽qƯw~T&g-4fRߟ=]O`ȍ3Y!%QbτvqG:W6g;Q`/[&vm,rim7ח< IDAT o-6^NF`cM=6~/m^0ZyvN({؁~ϲOj,6~[`/şOJf`_>SWb6?\l{47[`(ٙDL @"yv <;3gm囙9@|333Hof{ɳd&ٙDL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;3gm囙9@|333Hof{ɳd&ٙDL @"yv <;S`H)H$$gg lɳ36ٙDL @"yvw@ɳ3 Q;N}04ԘP 5ֈu> (R[QflDg.yKm kDF_+l`L}r-}Yȳ3 l`L}r<;S`*اx~Gp<;S`ٙ*'!%60Iyv>N ʳ3uk LN)Gc_Kl`b|{Dg=60%ca(363})Im l<9ќ";&d5drl QƒZWsVTٙ6|H`Kl`L}Rd>,60yv>R1 <;S`P9 l _)Oˇ.'S˳3bO`+l`L}"V <;S`HVٙ4360byv>hʳ3 쓞y{F+'360Vyv>J>"60Ryv>ȁqʳ3 jk[agg  <;S` l P)$60>yv\{ <;S`.U`+l`lL=@Gl L)&lٙ{`8 <;S`,i`+l`TL=` l I):60"yvf}3qVxٙ{PF#Cڑ<;S`hW l Eo`@ȳ3 l C)F!@fL=[acgg .ȁȳ3` l ^)60?yvЁ˳3Pl Z)N,@~|{3?E`+lL=-$6pRyvƉ[agg A/a Oo`dʳ3 N 8<;S`C l`~L=SN&[agg l Oyv[a'gg :EpViٙ;= d*>6pyvN[ l`~L[aggCyN 8<;S`&lٙ;gg ĺoV|{ W)ꜿ[aC˳3vZX)S`+l``yvNs V)޾<;S`'ս} l *IuOa[aCʳ3vRW`gg (߁[aʳ3vJPٙ;W`ggC;X`+l`0yvN.60<;S`'tH l`~L!;``+l` yvfA;d`+l`yvNٙ;wV LA;l`+l`yvNٙ;ÊwVLa+ɳ3v2ЁLʁ;x`+l <;S`r` l`~Lʁ;|`+l <;S`'rh l`~|;Z'l $gg DSʳ3v"'iܺ' l $ggi<;S`qp&6R)8tO H(I<;S`'qx*6N)8s60?yvNG,6L)S葹 l gg zT')ɳ3v}"W`gg D)[aiٙv3Ӹ<;S`'ЧqO H"JRȳ3v| W`gg*ȳ3vtW`gg [aٙ;~}{V@tyv_ l`~L]=}`+l <;S`ֳn60?yvgݎ 6Y)ccl ĕgg z'رlQʳ3vd}V`gg؉μoڎ#6S)#[# l Dgg 'ؑ ۱ɳ3 4zw'qɳ3v\V`gg zgx[aٙ;U;V@$yv l`~LUS`+l <;S`Gտi60?yvӎ*6E)ꟴ<;S`tDҎ+6C)c:h60?yvf̏(ڑ"ȳ3vLG'1c l /ӳ l -78&g60?yv蘜_`+lXyv蘚ٙ;jv#ٙ;bV`gg x1gg xjY O)9eG 8J)9.e60?yv渔g`+lyv渒ٙ;Jv#ٙv3?.d60?yvȐk`+l<;S`rd l`~Lˑ;V@oyvfۑ+Fg}C_ŋ?v,Gfx[a}M3_ō쑋{Vg͟XuxS zg.f{7>/ql l`~ߙo_̾~"l3^ŋ{F[a3_(ry#@mX ; )(. ݰl 2,S|{R`3]Xh̎',wxJ`w`+lwf˕sK`qt l`~Fޙ"{爼؃8`G afrAq|;w ; A 1|;rt؇Yyg>e[ }ń 1<(ӳ"gH[65l jyH`/Pw]YO` BN 6pwf."!^ 8;EդЮwfQ` BN"6pwf3o4Ezy;*ygnmjlAtF`+l#[(W ;9^]l]*\' 8;yIc=*{g֯V,i5v3S l t7άO.Sv'g`Yٮ#R `(5̣td[a3!w}n7?m[`|j@Fߙ?ūW_yt1_ l=(*gBJJYdtGvRQuB&Й߬?}D`PhL3_춯?Pf}BQuJnљZfT`+l<;S`-N l`~L}8:V@yv>ZbٙhqubL}8*ɳ3"[aٙv3ԫ l gg cEU O))W 'NJT<;3%RN06G))V60?yv>RXb`+l`<;S`)V l`~L}X:VNyv>RTٙHRu]L}X*ɳ3bD[a;ٙ8BU`gg D թL}h*ɳ3 8gS' ʳ3qe'lj l ٙ8*U`ggQī L}x*ɳ3QE[aL}x*ɳ3Q5[aL}x:V@<;S`#b l`~L}:V@<;S`#b l`~L}:V@S)O60?yv>F>|`+l!7cy<ٙ1t<;S`!f l`~L}u:V@Mo`G3N ʳ3bƵS'ٙ1 uҁ}c٩`}$(y샥)T Og%B`,Mf 2gg 샥)T 1yv>XB'6E-\}xbkؽTBo#c:(6"X[gW(5qN`#Q l`~y\`g"Qf 2p~>TB;cDg%*Լ[a@vt{ wQ C%*T ώ|#w-b$Sk}DY`+lBeL`(U l`~6*[a@6*[a@:t˗/_B#>PBٙ@ 5V0yv>PBٙv3OU b컟\+czf(T[۵C^04V0w;/;& &O46̜f$u  v2L40It!V0oyv>L8-l`~L}qZ46Z) MW60?yv>H4]56Y)L60?ww^MҗAZ`:v^c '>H0tQ}g3la DӍ|[a|u+7-}|\2 tK`ӡ3WEM_YR7{y.86Vz`ӯͿoxr5{D!"Wigm-߻w?"!Jrl sY!RzfbCč zt9W&Hm#6O|M__@ 7{ym%W0;jk5z1{J IX[g뮻>)o:wgRvwqR4@`/Sɳ3vwQJ4D`/Sɳ3 E)]8w Dgg bh.;ʳ3vw1B4H`/Kbʳ3vw:4L`/Kbܙg/?_R}&WN-u̗F{45#;>w+t>3;?Ƿ.{3OXxǂ)6LJ<{F_6,ZW3@B\x}~τ^S`w1Cvڙ/?X[zE)jrϷW_] 6LG,_p[ bȏN~/S\`wCv{gҥN^tҲc=2&Ĭ `ޙfz6(ǺsAjl'y ҩ^O\X.}݅aD.p.yjHw}J`IߙO_{0jv*NJ>F3NJ]mv@%W0):;>}fW*\~oe}%$j}-`JvwYcz;.| B`[ao sײȮA֏/_(B`4[atm_O>y͛~}x`#a Gc  `vt[mksUدO?KG`/TN{B`4[ad_ZM;!-"b ]ŏ;@`;2m}HuXkV);vŏ;%6LE3ؗ^f.Ռ/~4&,ؙ+ 9>{B`v= K)$pl1-ߌmv v;=!-F|OV;`g`+lJ`l0g?[ ݁`v^:ybl'6L΋d`̂}.n"c:]`+l<;3(~1&,{/v;`o`+l<;S` gg =c;?6V)_v ˳3n1@`tl cgg c0ayv-~0.`L[` [a8ٙ{ 6LX)w;[`+l<;S`?v ˳3 ng?v@V0Fyv-~0 gg c F(; `|":ɳ3N{1D`l<;S`Cv@V0:;8 b0a;dH{"l c)ϝ;g8b0a{<9 C`Cv!`d5|&-wߋ!;@`uYYVdӱ.s1H` Fgg>Q+r.s1H` ƣog>^}ިǔ^E\ &OgV^bOk%~. [ahޙ庾ձ"%#w_a;@`ؙr맴f!~- `, }ozكŖ'8Tk1L` l #ѹ3+cLm[`v ֙_׸KzKE`v@V0:졛uzcW'#~- 3z}ObSDf"~- ~`!]n{`I b Ơs`px+_D<k1L`ln}'4?w}[at]z. g?w&,acq;61yv;:^زه'6_ފ ?'uI_VE` NKvXTI`lM{bN;VpbrWw?9"Eʹv ˳3vH9ם gg vr3;6 (\W;@`ٙ]J`Dl ɳ3vx9ב gg vs `v.W,·s>JĜF`$l CۻfI-'Q=nc Za9,'f;L a@st"84/$R#npbQ%ʬzݸvvZ앙4|Gx̬s lnsB67FAv`Vp-|ܚB`6T,g?ͯ"?4n !h3vknP6;S`w؁[a%ٙAc\?*6\@);^sv@`@Li%љ?}?L?x#WCnN\.6Lnhgխ_?/&X]&>; b:s') .\06LmPg8~ޓ1C`6TlHgwCa .S\26Ll@gӫ~Ɵ}g]MG`w$؁I[at5_|ǫTkb41wlXg`C^2M$Iw)>vb86L3_V[Gנ>S; b+'ק+2U S v"]kT@`w mc86Lg0-Z.& q>&ll`w#雗 [.$l S$."˔- Й7Ë^(;MrD`+lȐ|Eav}l}-LrH`+lFԙO(~,Jׇ\ NӶ\7P37pVHD`6;pV0c{Slfe& l SجLrv@`@ņ]:/tNN;pV06;|" l k3SlX)Mrvr`tmvw'*fgء \``+lY'%Jnlm饗^`%Jn\26lPg>s43s)=;pV0!8{)]; b:S\\v e[at0? P\~O_=w/_z0Ǜs!l c71/_/z|~=*m; bb7oR{ի{.Urvҁ`D+UnQ͋Dx`+lOog.`,q׵ػ.r[v@`@ņ:gW^^*v;˅|`+lQqւ] M; r\`o.^.mB{%Cnlذ{u͋Kl #k7ZoڮLh؁V0~}\6n |cGT/c[`eCQw2^o~T 5H l cЙ˨eJVdťCnM`6Tl@gNa߹_\/qt]؁V0!::qZKwܚ gW+CUz[`|ǭ ̧y7JW` = l gؙ'f^y +'v 16|w4"wdt܂lX)wdt܂d3ٙv) lX)wt[; fg 9'C,GtO3wϳ|t{[R Pr`+l8|[p|joѷn`w8Pa?ٔؑVpA}$ޖq; N737~9ޖVq; b:^6;m:{[Z @~`+l8ـ|}kxcML`oK8( 63W ?!B`oKk8P|<ݗ\~0gޖp;PB`+l8Qg^ޖp;PD`+l8Mog>J )Jo$"lXog.!ҵ:A؁2[aI6Tl D$ lKD*ӹy&*6"/o"1?6`};BUH`VpOr|겻+]!"՛l؀Q^ǖS^ @9hC:͗^ޖo;PP`+l8VԙO(~,Jׇ\ y&%#E _oE"{K^ ; $ #I,7(+6E`n; b.r s: M&cٙ{Sb @imvޔm; bmvޔm;P\`+l؛M` l ٙvמ'Flݥ{}؛M` l C }`~ &%ulސl; b:Wz l;Pd`+lfHgvuͅ-7d ҙz~oYOOdl؀|sUq?^{0皖ސl;Ph`+l3W DyռHD`o5(56 ߙ}2>ɇސk; bT^wA`o5(66UH?2kM` l z;9= &*6,?#~P}+5vVG`.vV%"z&*"Ƕyj @с|=x(zz R 5"">+]`J-5(;6ߙE{_j;Px`+l8d@g.xoPD/v@`@t2;w>zouJ'N؁[aC:szG^zO^}uطR3M`l Ay[؛k}+v@`@ņuzEu7r+M`l ;'촟kZFn  ̷f?i;PA`+lٙ?{4F؁[a@`@g?Fwx^oz6||/[L`M4"6tm? |{?w?;߸ ߌ77r M`6T3\~qx69>5{[:R`-4#6ts\s 6|rok{ &.O'?S"/V_[_bJ{_޳>؁Z[a@a\tZzjHw6?Z@ZגL`6Tl3V'/OSBO6SZr @5`_g>Z4/Q:L^о $Zr @=`ϰ]pvAv7zF+9v@`@to/zc?[ikD7\n){$ǙT v ̟(g>wwU IDAT]!r`ȯf;PS`+lߙݻ L}{O3$T  OGu}ގ嵋3o-tpD`J.3P{,tx'.% Bw\i]b+}+9vVim؟N|S. +3WrKzRe;P[`+lPxg˾o6"ײL` l +p7L` l 7 -7L`* l ̓.r| Ξg7lXog^z?S˾~&{-vVv3;^D[w,(d;Pe`+lX9ؙ͇ [vòL`f v e lnXv [a,ݰءYUa%Cl @3 K_\ӎE`/e lW m]_o L[a&ݮzK` & ve[;4VH`+;!  3w#]zK` ٙ{!ء=mvf)6i3Bvw%C l @kL]o ٙؑy1mvf!;v`fhK)`v6;S`_ pImv#eL}%G }i3>F.W{_x6;S`_ dRL]F_ ,[aЊ6;S` 1#e2L- h3>V.l7o<;v`hB)rpmvcEٙ[`A` w%ٙZ؁`L]F_ Ȝ[a0{mvc'L- Oٙ[`A`(j3>U&fg l=}?L{ dR)L{  #fg l=}i3vv] @6;Z؁[a0[mvn5h"6sfg Z؁6[a0Smv.5h$6fgײjM`6Ta&`LUk;L`+lY&`;; @C6;S`B`6T؅؁[a0;mv.4 i3v!vVL) !`^L]P6;S`B`Z l ٙzϳs4 9i3[옺!`FL]h06fg B@6;^.;d`+l؅؁6[a0mv.lX) !`L]h56fg B@6;n[;n`+lf؅؁[aP6;S`B`Zl @L]P6;S`B`l @L]h;6k3 g-h<6uk3v!vVT؅؁[aP6;S`B`bmvf]Nv@`+l*fg BT؅^mv.'j3v!v@`/e8M) !{%{IL]kكNfg B=Zmv.k٣Nfg6ŴI`k3v!v@`0p6;S`B`j3 Şg؛G ؅%{qL]۲ ؅-{QvE#=jmv.]fg١E`õٙ; e؅!{Pmv..cj3v!v@`w;0P) !SaL]ݲG fg Bd؅?0@p`g6ءv{مA`v,{ @6;S`B`كٙ; EУ؅}P0vEcYfg B=6;S`B`vٙ; {e$ٙvv+{(@؅/{,@؅=@`H) !{6;S`B` ٙ; OЩ؅=P.mv.졲Gth3 Şg'؃e)fg6o 1 d*fg B>B]mv.cd+fg؅}qL]X؅}L]Z؅}V;>^Mv.d/dg R>E&;S`B`I4ٙ; O=`إ}! Mv.Se1myŞg''d .Q0ݒvؙ; ϑ=h^r`F`YGk3v1v@`'{и;S`C`mk3v1v@`+{д;S`C`ٲG-k3[ | 5ؙW; G=hWy%!{ #f5ؙW; G=hUy%!{cF5ؙW; G=hSy%!{,٣&5ؙW-bϳgd;Z`g^5c5x4\؅=@{]cz4\h7 #{Tcִי ; Ǖ=hL{ !{d٣י ; ǖ=hJ{ !{tי -vvt=@C%]\؅=1@3%]4\؅=a@%]S4\؅=@ ̥v{}E`tG" h3 u\1 *eE毵\؅=֙+; '=:s.ieGf\؅=֙+; = :sE`B`G$sZgB1 |֙+vvv@`_B`5]=,:sM`B`edKf\؅}!yj3v!v@`_J`5]=4:sM`B`dMf\k7{3;v@`_P`~̵%Jc-T|6??ط"A`I`7%{$P;sq:z'ް{L-Cjߙ\X}u_ Ur2ef*Q|g>Y'+WE|h!I] @Je/߮ Y =-;I] @J߿}zqB77"e!; ˕=@p{Vv$`Cޙw{y_ZV^ˮv@`,{LP;sw5"] M1rej Wxg~rx?_{Cv$pޙO,H"ǫ[K إ|؇r&7dA; =(Yp^v@`W {hP;S`+;B] @ ̎Tgi' {"v@`!{xP;S`k'v7e*3vz@`I`#Pxg sԀFaСt㹲05nv@`W%{P;qw`{PC`v]9)3G7ãdtRxg{k=Xvv]@I ՋsɽO`Ȯ; =(H᝹Ie!BكrޙW9.OiDD`n; k=(F靹zBo =v@`){P;sF>"}+D#R#2ߙS֟7|n l}#;7Uw_ջo,֋طkjeyJP~g.Wlx Ȯ: k{GŞ'DGD`v=3*~tg/$=HWCg۾~mGg/$=VGgƫ~u{c>=vav@`.{# !~٣DMt]3=Dg؅= Y=; !{ IG`B`LdOh34qO`lddh3 Byk3 Bk3 B k3 Bj3 BKj3؋=xZv@`P|Йc3@gv؅=Os@gv؅=Sٓ@gvh7 #{. !{. !{βg&@gv؅=kSSk3;41˞;.˞;.˞;.dLgE`B`v2" ! ٓgvi!aحȞ';.vdL`٩^e  ɞ*;.dOmI`B`v[g F6$ !5}gvj7 #9c}gvj sa ʞ3;.eOfM`B`v F2& !Q{gv؅ݬ1̽3 BnWޙv!v@`,{lsnDH`v۲4 Bn] Yfޙ]d!a؅;3n`/C`6 'ygv!v@`=pwf@`B`6kS ygfZ`I`sٳ ǛwgF Vtݙ]lʞP8Ҽ;3" !mS GwgFv!v@`#{N̈.ld* 7Ό=K`0˞Wl֝؅Mfݙ!]t˞[d֝؅M${v`YwfH`B`6^̐.lɞa1 < Iܙv{k~; =pȜ;3& !^ 9wfL`B`6d4Dܙ1] =mΝw`g ag:͸3؅P fܙBf=3v!v@`s)]3v!v@`sY-3fGcXۙBxS7ۙBٓkCv!v@`sv!; 9UQv@`sIvA; 9K4кvAb/sD`6gʞ6h\;v0; KФva; OРva; SМva; WИva v@`3 )3; _Аvf]L"{hL;.l&=EaC`B`6ɞZ0! ! eOS71>pK`6ʞnG`B`6˞m٧^C frٳͳ3B+gg؅EdXs53? 1 l=i,;W]\L0G^; `~fٙv!v@`sYs̲3{ Bⲧ/9egi`g'3|̱3 B&G$0s~; ɒ=;.ledcg؅M vs~ c v@`,{: ;s]ؤ˞j6@`B`6Ȟ5^G#ٓ@fؙB@fؙ2,6MȞ3B`B`6Eɞj3.lJ=Te~9.lʓ=Td~9.l=b~9;{*{:s]ؔ+{:s]ؔ,{(:sbIDAT]ؔ-{(:s]ؔ/{(:sv4; BPu0; EPu0; HPu0; KPu0 졇ŞOy=l= df9.l*= cf9.l=bf9.lj=af9;z*,{BH7J`B`6˞rͫ3؅MED̡v!v@`3 3#@yuPs v@`3#@Yu`; fՙ Bff'I˚Ug& !ɞ'.hV9;y:R\ p)sv!v@`3Sٳ%e̩3؅|eO0N`B`6s=eLnN9ܼbϧ:h@`6=kLlFy]4 {Ό:; iD 0uv!v@`Ӑ)`|3#*McQqͧ3! !dO#OgC`B`6MʞM2<.lZ=b>y9v,; iY ptQv!v@`Ӹ<̣B ̣B46PtQf9sB ,ҙ؅ ;Zc̥3# !:dϷCͥ33>){' ݲ\AfҙG؅ iL:H; L:H; W Igi6};l${6<.l,{7<.l8Nd ey,]p <:Xs v@`igeYtv!v@`fYtv!v@`Ùgṃ BdO@fљGI`g'F=OmCgO`B`6({3<.lY| eyyvj; a6Јt Ş}:lR :+{L*=sVgB`B`6\D<Uy ]p1s90Gw)ǤQʞс3O" !./{Vf< ;H4ؐ#{jf<.lȔ=ի3O# !eO@jBdO@* BdO@}*Uهd@]S B2eO@%S Beݙ=. >媺3O& !}@JTug>H+ >0E3O' !}x QsgN`B`6, 3OWw`3 1 l }Uܙgh7 #f#d3Pu`g,$f&\Vy]0OBsB9>3Qs`gF%}8SmgE`B`64#LoYC`gOG!Vypk}ů?uvs; ReW+3,Jԫ5 _>w ;{fPxg^[k=@>y-bv@`T';(Iѝ߿}"zq۲Ξ!.H`6LeGQpg.=ܹXqy۲%i1¹̕e%o]8X1|˫;{@_leg 2;x- qzy; Dv봢ܰ!}WevK!"eR-mO;H[^ؗ=lf/;TTg{ѵ÷*+/]c&;)3=ܻPqbcZ6#(X  TKmj{YBEyl^d 0 {`"FTx`?A3öп|Ζ0ܻw[te-`:Yř>vKNၽŌn )<]\5-`2Bź÷ɔ˕wY^1|KL遽:~%L^wz*p]-`*jdž,kxV-2jk/';}`K Vk-v%\F XSꢙ_]ްpu6TB`6H`7uN_,7}y8pE7^-8%+ol<'Wwh_rn,[o E{ӛ彞6lZ<-!6yv#<= lY(rTXiV_3 <eZPfNO--?FZq (#o 'yyÊʨ݂3rnP7]-_ſ[ ;.4/ ~ z,5empi6\+뻧߻ExVG`@`3$ neYݞ?TB`s`s KQw6_9[?S0KDȵ{W_(ܪ>%8fd)#/r,{#}+"[ʶZ]©j%n)s5 fLG`L#Z'  @`L+ ;w>VY߾sg^qǏ?7@6 ; CkD͑uB#:~}%V 2~S#jue6Q`u @oPUf3 ;ţ 6P 0ϏX#2f`_j|}D`5W?ע^֙n:w(8Fd7Q}LI`mYs[5wf7|onkkg˛ 5kN]n9w}v];&}T) l-wj<ڏT\7Aw懷?GWvvo|kDvn}p׎`JlM~﹭{6~}ٯ-w~?ӟ`w>̟Y_yW7fg> 8To9t<.}̷{x׎`Jl벼^*JCnh;ΟNVA{{jMnEsOwN޵ݳ_g{}NxtثmVgf"VoݟlqmNn؇7O~_ٵݳ_g:%|ӄ݁62{̛?FJZ~#E7qc5[ݳG=.}>xv~LH`m7O^Y|{y~ 2ٻpov4׶gϹo]}0! p3+kfv񅇂|/7?ڍԝ 잍|% o&$ζ(-OVwxwl-1Ymz'Z|G{67,W?gvvmp`6v9 /wݛ-o?d+]ݷq97Wپw׆v~LH`~c;"~3<-mlpƃ{q"ܵݻ_g;6џ}o-֏ʮp=f{q]ˢj 6 t [M c/+ֈ=&l;QyOy N߾/5^`xX`oZH"vn`ߜo/nAGnw]x`ぁFd=vE$g;7W^|vs]>r>>}R{M{vm/>Sz `BlgWno_s]o<0oo[o/ lj>z{P`/?bGlGxh`<.}q}u[{honٷ_g#Cs*vN&b{6קwlu߮ܲo&$64>^U3[^oS7kDcgt߮m#7fp`&$6R|;wz^c:K`U ?xtgoG߮m[/ l{ey?͝p=r~;|g^͉_ xp`ʝ=[zh&$vn`w?0#?؝=Gׯ?xx`yGnUݳk[}7x36~n)y?oscsqtwU8}'y?[uFwOs˷[^~w#{}r] oۮ.e C0 #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #0" #12ak5IENDB`bayestestR/man/figures/profsanders.jpg0000644000175000017500000006174414133140641020011 0ustar nileshnileshJFIF``fExifMM*>F(1N``paint.net 4.3.2C   %# , #&')*)-0-(0%()(C   (((((((((((((((((((((((((((((((((((((((((((((((((((}! }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?/rb Q@bP "P61K@ 1@(bP1@(bPhͅL}zIz)oLUo*43.Z'ɂĕdh % OjTQ7b Q1F(-&(Rb Q1F(b1F(bPF(E+bX.) 1B&(@l(LR.(1F(qI1F(b Q1F(TMs}ҋ]9Qvu+*T4KSHǛˋm 10Y~V͜.gFYӜ-aB:"+0Ü#2Fֶ-]gT7# V~KG!82Z-#MfVզ4rD4;`gzm^ź)(1{UYsʹ+`du$uƷcd5x28ܬ;JJ."b Q1F(b Rbb Q1NQ֚b( ( hbCb (@LP bb 1@(b Q1F(g} |;/ ^I#l>= %˛qzXkOzԎWQZc< *ǎҩ3ʨ>*jO SgX]\HN'XhI;(n&rV2 &3):!ecucw{t]ٶqDǘ˼?ֻ[+_,Q1 ^s®֑þѰruZ0@*C)Њ(1(bP1@(bP1@b((@ EQ11@(P(1F(b F(F( QbA1@(bP1@(DIfu~J*UqGlVvlƌ႑kV(Ms+Qv'vʎRV N w6TV?}3Nq,xb-=q޹4GJ)Y#bT0ҳnf[U}+砗6x+Q B|3U!u6PA&|NEh饥]̈vֺlܶ<6Z=nN@bSbP1@.(.b QE%- 1HE@b Q1F(@.(1F(&(b Qnդ?(PzV/}0F6=UhICJPO=c'b{4]_J6)F6[XpmWФ ;\!E7"Ed$z眵QJZ|Bݬ!5{o0pNu-HjInҍ%ɷQ`݇etR A:#v{ڐ458H~=&{:}-I,R||gWrvKAФgwO!ӕle/; 8+ '` \n.ry8Lm#h8ppבKl<zm7PJG{>"ڵdo]11^~dLQ1F(&(.(&(.(1KLQ1F(((&(bP(!h bbPF) B1F)Xb1F)h1F(` Q,1E(; QVFnA<}+oSC=b4 Xލsf$]{{H(mO|Kaz,fhmA=7Ŗ0_ S[JpRR˜>YNC}>1NjI<(\ )y~iG~hʻO\c.cN42 h݌mⴶAT+I%d|nN1F)A1@(bQ` Q@(XbQ(Q@bPh@XE Q@Ť(l(Cץ.(P1F(b Q1F(b 1LŇ6r"?̣m?xqml]يU]ڵɣCF.j?-x|~uj^)6i]@an iE_vyC:Qau`G&-M Hp̣nNsQʣU*IzLm_if fE]Ox[^Ӽ7)#Fڮ2RZ<%ZMI}µ/,8~^}x<6>ca$ө᜚i34BeDԵH6ǹVV]&gwhlQϜ Q1F(P1F(b QBP1F(0 RЀ)1E1F(Pbn(PK@(&)q@G4Pץ;bP1@ \PJ)q@(bPbbP|^[^q8 ּMK u]+ŧGR~"Ӽ%}>;8ز8BSV~ᳺQ좎j`6Uj՗-Ywg7/ J0A!#OZ |>m+ijv*R^+CWK?kR\=WSiZXX@: O'$MS12J7^u-]lxd7^0>+jĹqF+>x1F(&(qF(.(bb RPLRЀ1F(&(.(RE\RbXbQ` sF(A.(@bPb( Q1F(`1F)bPU͜" v?y3%[ΤH8?z2=<r-> _.Q$b {~5M:OöHĥFkZoG/fENѰa""0Oផjo7\*:WRVMLZSr[7=I- @%2qKSRE|l {3RU\kIuhbXYsן6ˇyRnZЃZ2g-68;:s] =jө"-ހ~<~׆\ǴXOCz%wz7UٯkomK(B6J5)kfxM+\1F* Q(bP1BbP1Cb PE bE0 Q@b Q(@P() )Rъa1F(Q@ Z( ( LR`.+"BW#m ?\ޝ}+oJԠݤۀ=zsV/3`m'^|R:xNȻ.eD#Th۹ǵ6e ˆ%a=르d9GP5 iYϨ=93YέEq|͞*/Dq t~ g`$Jcn4Te _jB;hivOlol[ (*z8H5#jJ 5/J$3gjVI?x4C랟^ⴔp}W֬RNIvTօuhћ@&(`.((b)Rb R1H.(@40 Q@0b)h@&(L&)`ъ)qC(C0S@1F(1F(bP4A(bA(} IcG ݈Tʧ7NJKZ\9*z:yOq[6Zw ѯ<׋QmhY յkjP }>N sάds~,سXI#ו\L;NsKCI{;Rf0j1M|QM\o” -VghZ-㼎~ܓS\Prxk׊QVGNrrg$yctQVSJrz+춒Z4Sޗw%2=z^?hEaoJ=gFGF ?9V P0Q@0AF( ( R⁉1@ LPJZLQ` Q@ ILZ 1@ ME:@Q; a1LA1@ K1F(i1@ I1F(-&)h-&(#:j"o^c3^ S.'`FpQꦸ1TSGONWQMSE,9ۏ,.VߐxE%^OI3'9 kn("S~ֹ0m< (}Nny =ش:MK{(R(#T̞޺QPW{["Iu9K`W2e_O5 3A[$IqQ`n62!iXAMJ)N*D uq֚ZX|DȒѰam'RbBb Q1K@bP@ C@ @ (%.(sF( Q1F( 4c( 1EqELR`) ((EQE1EPF(< QQo'w |3{ckvN*" ++dyM% z~(\jr~e]éQK񥦯[^鲅Yd^0:v5Kch/iũug,;y{Zh5y?A+u'w=1GMol* cWсE+;[re[x4\ 㿸a(j:&bck$&`֤(` )QB (`b B((ŠPNaM.PLSb ) 1LrSb((((PPKu\3 tri6 W~=?VML@RXmm s^}‹q*dU?2;c'!|yCtr"P}D>5E[5RL@('[sVWl6y`yV 8 qSV*)0xlwỉ+ v,QKu;%KuT ?,ҧMSb+9mGQ2W7r4_WBk_b:OL`eGT գTU(\eQթ/`)6 er?Ǎ<.)@ Me`aԘ\8m9Oan\E]z?JҜL֍DS*r >DEPI@-R`bP@&(R⁆(P  (Q^PEPEPEPE 詔Uievھ"B3ŷ-Y^EyO&8 nB LTp)֢Uooݽ"yž\I-t@kgR䭌gFR iu"P D)@IÊDzԁxlү&̿Gr&{s"H9C4R߲jH[  =Aק޴ý@Q@Q@Q@Q@Q@Q@ E- R`Z(i(h((('@ٿv[J{H«]4IS|b**KU4S\wMטzGm#lOLkH Gŧ ':VuQÍlr9$RZ^ݞ]'%_*5=v߁ [7&b}ǞHEk˨D MLI# ;z1υPj<Ȅrj@8u 1lj";1ƷJrTpG1{#OqrEibS)=a +9Fw$Q(=l)msN ((((PEPEPEPEPEPEPEPEɿ6;{XTsV`bgP뀣@m0[w #!H^RgW߶.Y葷cAWs9#^*_ִ_Ngs .o1$U\cH'&)=,}omD܉B1VY!E E+>he{DrשerGGҲfHAQ3@"0*AISm3I*-z67@ۆk2k4;IW1G0DzsW>o౭|U'uXw@>ߔɏl`sn )*K-|U p(J9+%331QQ*$Qn*5z͹EWv($-a ?M^wW1aE0 ( ( (4f (Q@ EPEPEPEPEPEP/sbĺH힃|qSNd^\w$#F?RJ~*- yraiv~ ?* (^3'V5|"2s>E{gj-f]76zqo־?_ x>#Wts~t=#ޢQOT4|V|9 D`G8s+d\ˬx2F`cǘI>$% *-6HQH`uc (*Ԕ1 ڱhsQ7l76 Db DԎ06A-@šǜP1K@! sLe&47"$+چ:8k5Iס(ZHvU +lbjsypރG%rK18'Y s1 ( (ƣ]#Ğ&WzkE@nU;@$듞Cq|A~"=r#VsD>y9x=z 沨N'# *9⁢$ 4g2w5[Kr<=Oj$ΟV+FAY'4GCRʱ^A&"b9h<1 ˜Y[Q~2(I ( ( ( ( ( ( ( ( (|'|@]oYԴ. J8|h5;7W~E+Iۦȣ^:^w:OmKx{м=hWYNͻjv7@ѵ-sNҵMFږ&Mqmfc*X =O~ڟ BY'N:bׁOľFc}6k Ym6h1̑,?OSY:w|Ij6Zv]Ͷ z@ZwwVI텣V7s>$G-^h>'n@8ɵ'H'$b9aoXío]֢j&F fH9": +ǖW_vK=#WAߪ񥜞%xvI}G^dԮ4b$iIC~8@=ElxK\׉j̑=NnB2u {WmqyXj*-.4[` g4XpiM6:UGS㖓 ?y('&%0"M 4`R"0$ucc`'۔]mEL +bB(((Q@Q@ I@R ( ( (0|{xw|GzۙAXWuHԼL\ZwAp*eأ"5fn=;Pm?ڣX[]$\sż6kd9CG 5x;ƾ ou׶O#Hkf@psВ}O |u~!-,|#VCؽ,^F|r>UO9O]Qk艪GM%Eamc@|{V: kךZMFqLW_·~+>\\I"/EEfsT~w|3i*xFLy"rF26̣i-o;/V[CA "d Y_wqmZ&FO6&lTd*&e )T]B(k=B9']R y2^hb `/ğ&]Rvl_{g>jya↾'5n3_1ˆ,NK ((F 6v.ϊF\`y_oARBR'ZcJ@FMBjYH1I,$ǥW>sE+6+t=DՓA,04ч)US&94 ^s TV 5,dةܡFkj=Q ((ZJ((()h(((+㷅 -'ZqaS~@}EPEPZ>ks:6{LsGizK& xث"e# u|/>]t_(Oɑ߳;5<;U|#gIu :eؐڝ6QTef'c?T $;яۉ~BpPOxFl]Ϋ+/Tr(c_P$qE5 ʳfv 444wHب()$yH,3RI=^io7\z*RrH\Q繪Oe5~r⶷A5ͼKAM%wzBт|'|Y9i[A԰Ÿ{BPgE0ք>++rB(PEPE-%P@ EPEh(((+ioxBK/iH[{^SI+ FBjm1z k(aSOcx m%r1Nh_ ӼkY5>mB(kJUBۗO^̿|7i6ZĖW:| ?*|@'u=[x"oijz|Lzzơww8V= S ~\`z{qQҿf>{}~b,֣"ӝ2g vMDx3UG7zQ KRHʧ2w^:J./d?29s'ހ8^|' 3|97 Ѡx˒p@9ȯ8ҠG'Di\ -R}>y BU-c(G$s\wcMuR}>id^\vʰ >׋nwKYY= 1`A,{ UoB砤nþ%{5o4;_`gbY>|ܗ+[ ;KqT!7CY e1>eoJ..č?vN(#1EbO#Eu|9N]|4w'I$;rRSR]W|d9B}y!~VVg[Bc%#^di"y\ -Mik|UKNnYfhvmQ^WpW4tⶑqe}Ђ`Lq\2%ĄljލV߳OC PN=>x~(;ePŔC=5 j6|9BNyyZY<ϝ߹Z7MDd43()P`##5%HvTR*>V5HFn+FpOZ1|h#SL^iUcL6/~żxUҎ"0N\&CkkI A9GZE^N9950lt[i}jwYXŻB ( ( (J(y((((PEI^< U5ck[sɜ}y+ZԡďݝgW7 [kx,E@02zragzZ7'>-i Z͕zlayfCeQu~(|GJ-- =S@O@ tMUe< =߅ &EecՍn?3Ylŏ2}fOF镾ꜳa⢯̽_VWZ,-\n}OJ<3Kι '+pO'^U#3fR敆ݕ1 ( (i(J(((((()h(+#bFdY՗,*8_O4)%ʸ$uީx떨uSh,VnĀOpQ3Edzռ5g,Z77 kolXL0aq NR-C{Shf sońmyTpF{^ih}7իcH,3KW3 l'ebi ~A9]?Z|Ftۿ\vE :!c?1nnrqҁuOi%;l s%aT9"⧊%N;4Y%KP`3ǭ {H`lcӮ5[ƹ]"| nfj|gy| RKKs=T(m?̗^t0$_[yOsӞ wU>%o<-[8QcPۈevUbzGYůjZ}:ܚDv YWlGJði)(WVkwbO!G\تkقkVSʪ.v7`$$x"V[=]cٮ^A -J) 6|yf{y1W!CŖ@lG"9i#Z$vH'+%T$'*Y@؊E's(7k>ֳ A鏕 шЀy#ק\m63ÖI 0c[֮a]xɝ7h֬=Dgb͙>|:$% &}^x;cҔ6h9:O0=sZ64f\ xׄ#a[cQ7e-y6 ?hJhKJ&sG]dm,qdڴzƥfPEPK@ EPEPEPEPEPEPEPEPK@ \ī3yI|#d8ۆ5ezr^F]EqZ5 :b#WEEC! rf^M96^jSP|t uig<'ːwkRh>hx#&zVe[5m #Jcaw1{Mru|?)4#7'iЊA\iw~MkKVvRdsdל\|OvO~)=b#I" 3&浟:|^Z-)irHNN;H.^KŽyN@q2\hCҵFN%w#S]^ x'dr^!biYkUHC&OҼDm/cWQյe![I >״ZL9Ip1XӇVvcQORI0rB|ctSӭM9#*JuW:W2I*UEm=huG%aqKz\'c0z5RTY-)%RG8QB8#/#2u ]yAK*/b: WKCAYumkk$ 4`QW#4fN]) a{zR\ZdB_hzo4qo 42rVoYZCl;2JM֓l[<@YSK_V7=QE(f|f&&ZwQE9wݏ•/y"Q˂7֮YK1!diЙY/Z:J39 y`~nK`$f\Q mM#XZ|ؿ&? W֨5#4*6A7g:v (ZMԇxMu9c~$xkYNH V1U1[r$\>eeKI߶h՛?^.-Me iM9<ÊF;?|W&QV5_xvh}I=1Tm_z7~ I/ɖU>ku4C2USu{N<1o @V?yEu wR-ەeEPF~=*qTՍOPaErN5\Ww2fsp)+j\"+.D#}d R+6ʣ>;-7-&(cT/43ezЉ;3iz$ogi$F1);C@Cq]'ed (/ lm9ʽ|5]XM%ѭcJ*/^Vri0ZIq4Йn*Lg&Cbw5SOHGHLkU zՆm=43m[VsNɳѾ#KEl362y6.`Q>2 <GVVj$<?NTfͩ>Y'k_kKԹ25lgSڻM;EĈhDpJJ6k궺L?μZ3_ؒ3.p޵ge Jr3xO.2`mrw1֬.mC#I8zRpԏ6\_ncޡW1C0 vΠs}t7+tFVu_I$,^ p=*(n1 ,w|'QVWdA8ж '*, ƣi&`vȭӋi3*D|=K5嬑Ft*w)s^+]V{&0nۈo0+1PҎ8tK:+t'5'}Y_EdzW0g{hFЌ!sN?9'LObhcW+MWhoKtz_5Y7h5-rfrQG;כ(k5h&rzu[.y k[mpU|Et*03&n.4MVZ:}/%_>tċ^48czZ;H4RX#&!$v3!(Lb[./^3Tco&"l1W'~-b! &3WxJ1v%NsUCB ((((((bayestestR/man/figures/unnamed-chunk-15-1.png0000644000175000017500000010527014133140641020576 0ustar nileshnileshPNG  IHDR `gePLTE:f:::f:f333::::::::::f:f::ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnMMnMnnȎ:f:ffffېnMnȫff:ff:f۶۶ȎMȎnȫې:ېf۶f۶۶۶۶nȎȫcfȎې۶pE pHYs.#.#x?v IDATx$Y}nFt/A4id/^=X6 4A U7*3?||vQ8ʼ ͽJ6H`6H`6H`6H`6̨ǟ?ο?`zխ___Nm [ƿ__ }eտ6i*T_~{ͭڛn A-^y]^s ؛;+ԛGn"$ؿԿ%߻W{ZR`_kCⶑGt؛g]=ִݼzroll`ob w*k6w=D~-hMm9 1jkC߻Mֵ١%2w{6-v4뻞"rA`^n*?l8L;47o^>MqvhLk;덗R_%.9i43okwd_&9iƴ3zm="?w%!MuKm yqj lR=פmo7?县&iC7:佋K/-HiC߻7JxF3 @LK }ǭ? zǮ~Ǐ?~P @HP @HOп*ҟ k5;T`3Αv8U͝Yr0Qk p;`#Lk p;`Mk p;݁u-WC6mk p*n8%6AjvP py-١YZaLSC6%sf l 3[/̚ n5;T`%6]jv8@_+ljvZ6^5;T`3 ث,9igZbQsgôӢ}v3KaeVܙ%0ZaRC605Zbܮf l`l+MVk p* 5;T`[76u5;T`Z6u5;T`Z655;T` f l`PWP ) *!Za\VC60\,9id_+l 5wf8L;/dZaTsgôs. ;F1hD`m}&}PC60tY>M١J/@ jvK҇5;T`#IGWC60tR_>q5;T`H VC60tN"}HjvFۤ @X(1}AȪ١D:wH*i[:wI;`}l)}`j̒q3z3KaG} ,9#zȩ١@:><15;T`NRC6P_>@)5;T`y!١.ϓ@F@qt&}2jvKDQC6P[:'K(*|١(-]H**|X_،ô(J.ܙ%0I `u5wf8Lx|X[͝Yr0>t.!}VVsg@UX>Jf lt+%}UC6PT:>l١)GK85P 㥏jvJJg ҇`E5;T`#c*ґ|XO@AD>Mf lt"(}VSC6PO:O>~١''K@P8L0y|XK͝Yr0>t }VRsgô"dzHDuܙ%0H,`5wfN㙤#*jvjIl` 5;T`x6 *RY<XA@%(SXf lt*}0WC6PH:g>١($]3KNP ԑ٥(jvH҇`a5;T`ekxC **1AXV،ôNe*j̒q)aXT͝Yr0^[:>K3KakKwb`I5wf-jvJHW҇`A5;T`#xQ * XN@^X,f l^Z,f l^\,f l]\,f l{]A,f l{]A,f lw]E ,f lw]E ,f laڋJJ҇`5wf8L{Q]I0,,9i)kIgEܙ%05w5 ;5jv5jvzU6jvznU6jv:Nޕ7jv:.ޕ7jvե8jv8jv݀!Y@ұ>3١U:v#`^5;T`JnF̫f laڋInH̪,9i%)0;`tƤs3KK JzP (Ic0*#7*}SC6Сtf>|jv:NܰM@ҁ>١O:p'`.5;T`Im^ ̥f l;m@̤f l7mA̤f l7mB$̣f l3mC,̣f laڋHm#ҧ`5wf8L{ mE<̢,9i!H9ܙ%0%30;vjvzږ jvzږ jv:nڶjv:Nڶjv.ƤOjv.֤jv֤jvjvjvz jvz jvfwmR,9i\:e۔>+3Ka;NFO Ij̒qCUp;fO )jvfO )jv؆O jv؆O jvzئOjvznئOjv:NƥOjv:.ƥOjvڗOjvڗOjvjvOjvf[z@,9iU:^>Iǩ3Ka{n.Oqj̒qNӵp;.NOQjvږnO1jvږnO1jv֎Ojv֎OjvZ֞jvZ֮OjvZn֮OjvNΤOjv.ΤOjvڕޤjvڕOjvf;\O,9iN:W>c3Ka{j̒qޤcGsp;n.Ojvn>ajvN>ajvڔ.^Ajvڔ^AjvڔnO!jv~!jv~!jvԎOjvZԞjvZԮOt5;T`-J7j'`*iH:Q>{ܙ%0I'jҧ`;`#B\LVsgô#K@j̒hOO>SP 4'O!D5;T`IiS0Q@suZ@LTC6Кt>P &ݦ%O"45;T`Ii0I@ceZD4LRC6ЖtV>P %eO$5;T`mIwi 0E@SYZHTLPC60]HWi!S 0A͝Yr0ރt>w3Ka{nR'n5wf8L{MZJdܭ,9S@CEZLtܩf l -'}BRC6Ўt>w١hF:GIQP 4#O)jvтҧ5;T`HhEs p*VcIدf l)}V١hD:EkJUjvNѢҧ`*io[:DJWj̒qCyث,9ioZCJY}j̒q3ا,9+اf l ,}n١hA:B+K[=jvng`*حf l-.}zv١Khu S@^:@K_jvZ^RC6g`*t~ }v١Kҧ`*ioU:>G>;ܙ%0J'v5wf8L{9I]͝Yr0ިtz"}nUsg@X<>١K ҧV5;T`YFDܦf l +ݝHhP Ds S p*tu$}nQC6΁O5-jv9f l )ݜCIljv9f l (Injv9f laۓO8u5wf8L{{ҽ9 ,9ioO7>ܙ%0Iҧ;bҵ9)f l %CJtjvRҭ9If l %ݚCJtjvBҩ9if l $]Jv+jv2ҡ9yf l #Jxjv2ҝ9f l "Jyjv"ҙ9f l "]KzKjvf%Y\Rsgô$CK| 5wf8L{C҉9Psgô7$cK} 5wfspRҁ9xf l`}]TC6t_/=/P .ݗ'*եPC6t^VP -l5;T`kK%hFq?+l5;T`+K%)8SC6tZ*ioD,JV͝Yr0ކtXr&=[5wf8L{aɹ lܙ%0mHw%҃QsgY Iب١XU:+y)= 6 UKQx&NJ.I8])$= t0l+ N.JH8U("=D@P 'ݓ\*{kPC60qHë3Ka5 Wsgôc3 ,9iOK$H0;֒nIn `t5;T`+I$J0*K[\J%ɭc f l`d`c١XG#!=jvVHvJ0*U+ң f l`dhC١XC:"#=jv֐nHH0*}f laڃ ^Vsgô  ,9iI$X͝Yr0ӞH`\5wf,/ݏ!= jvG`\5;T`K#wIO0*ť;GV3 f l`ixnUC6t<2AzHQP ,,L`T5;T` K#SU$1FUC6t92MzNAP ,+L`P5;T`3ӞG&J 0;`="L`L5wf8L{D:*=)j̒qt62YzT!ܙ%t42]zV!P ,)L`H5;T`KJG#H 0*%Qo=_%;NF`Dmv{;|E`ad qdzw;A ^ l ,&=/ʞvD`I#J 0:wl`9^@Zӟ==w+XL:9Xzd4֡?\]Ǐg6t-riCï~3{߿،ô.],=2pۙ?>~`}mXFO?ߝ_g$0kK"GH 0&w杁}Eן'{HF!זnE`4M;{?PD`K"GI 0&;tg`o=u4W("ťKL37oҥQc 6/>wH"GJ0&;tZ`ogxD`,Mv~ #W l`LXdN _ l`]Lhd lMJhdEhR:9Azx4١{ܐ?tܙ%0sKKPN͝Yr0>t챾ܙ%0sKKPN͝YrGJ ١8R:HHOPMҩGBzjjvN="cSC6pt葑;*CP 'zf l8#$=x@-5;T`GIg)j١8J:IPJQҕGLzRjvtфEܙ%0HmH!PD͝Yr0>tш 5ܙ%0HwH"PC͝YrIgHO"PCAYG3ңPC6ptь(%P "u#=@ 5;T`HG I#PA!MGCTPC6ptђ4P  t4%=@5;T`HMI#P@t頣-y ١.t4&=@jvKI$п*i?Y:hMz"ܙ%0'KI$н;`d隣9Wsgô*s'=@j̒*s'=@jvJ J%л*)GS f l`tѢT١(r(=@jvImJ%й*i!Gs tf l`tѦ\١$q4*=@jv&IwJO&з*IGғ f l`tѪd}١qS+vgZ͝Yr0~tѮl]3KaO8ڕMk5wf8L)G ,9 G f l`tѲt=١ p4-=@jv&HMK'б*XG f ln~qUC6ptѺ١S:h^zDnP )]o4/=@jv7ڗQ[5;T`wIH)Ы*i?V@zH^ܙ%0GJ=HO)Ы;`Ht 1:Usgô)]nt!=@j̒/n!=@jvKH*Ч*F'҃ f l`tы}١+mt#=@jvJWH*Х*}F?ҳ tf l`tё=١'lt$=@jvH'=IO+У*=FW tf l`tѕ١qåДb1/8LFg gk>?ɜ$0KI,П}|38LҹFo gr`?ʗzUDr;sG!\A`;kGδ/۱6K:OzfLЧ?nvl 쒎5:Z7t>x؍ߎ-]ҭFC /Q÷c l`tѣ9CnTK:sL^[E6CRzl١?WߟA`K}J-Й#;~NG%n5:\/t:(of\ҝF҃ }'?g;n4z\/uKw?7t،ô$i*=@_ٙw%Wh'ô$it+=@W&?{ҷ>DSô"i+=@W̗x-KxqC#~gʔ.n כ?@ґFҳ t~?wмtѳ=ػߊ{zNnN4z^'S{ ?|w3 "h,=@O&֐v ltѵ=١)]h-=@GN{w ܔ4_#nyG7÷yu[ ҁF tdo>纽__ >w !Aޥ'= t$g.=@?tܻwSO=@?yF# tc_s.9#i*g/=@7OvW`<`}tѿ ػ3G_fOy(8LT8ؿ3;_yv_.9i*fb?Ee&fTbB:(!=@'60tQBzNL}KY ltQCzNP \3H2Ї*]FAPC6pU("=@vwG?|/~ݣF(+YFQ'_>;dZ}HWeG2H2 ԗ2 I3ЃO|/~ٟG`B f laڧH7A͝Yr0>A:(%=@j̒q EF)q:PsgôO.2JI3Ё;.b {ˍ>@dK`=F17Cg?}؇:.{b oZ>b}c;ycv 1IO4оIkb\vװ6R:('=@&uUOŏ?7oWpGKH͛ҡ/ŏ{Ci^Hghޔ}ӫ⮑ ltQPzMз^F"g_n|{D6B((=@&t+o\x~c뎟ha8N1*JO5к RTͯ?%?6p.bkq߹ t ]bkq[/#}tQSzۇj`C L~`m%dz{/߂]D`Ci/almSӷ};/ߘ~~5s!ʹ0JO6ж/\^>}/nnFY6U鏾p3DCD`gFY6C^>|l l#ҳ 4mZ2?{[}QΤ# lbʷ[$tQXzMÇ_}ޣ/f g%hqcL`6J7а*gEhX؀fYVC6 YVzP lp];w߾o}5 KO8Ю;c?XK`3ӾS:.=@ߚLN횴36ę]E}5eg~}-!δ/K8Ь);ۘzѯ4$9t|Q_zƁfMз6?ʌ6 /^ =@&tW$s0t{1С6 ,[&=@{&wӟ|߻/N$a`b83C?˼E?I6 ,][ '=@s&v;Wo,N a\b<3C?km`?Z/J0tl1ԡ<g~γ'߼;q[h͔^r2浖o،ô_N-Fz5Sv }.N 9i&Z )=@c\R`o="qkҥŐc4fμC!{ObOv 8LUbL3agT֗ϭ0th1m%-;Ah IgJO>Жo6&V^@#v_Ѥ 6hľ}qnއ-a4鰂:C} } lLLz%mӡ ػv6 &Up&6^~廰60[鬂3akۭ#"9iHWK/ {v淞۟Ǜ~u4 8LFZ{g ߄-9iHG^ @7_s@ Q/Єk}wy .W t/Tp!l{餂 @`K'\^@6лtPe4@`K\^@6лtP lw鞂+ @9WW'Υs I/ n`{N'k I/ 4Ӟ).&8 }3阂ҋH7Ӟn)!(4 }~)7WVsg>9`锂[VC6 #]RpjvQC n^@X0tH Ȫ١F(UzaY5;T` K f lD:`jvA+ vH/ f lC:`jv1# vJ/ f lC`jv! vK f l1 H/ ]2j߫ҽƖN%8\zkۡןYC MR.B`CYT# ~{r;/PVe,hg`opw6%8Fz++7{w!"t(Q XW__g槿/ l+Ip5C?|ݟ/A.]\^~il U3 ^9݁} ;L`CYL#&;tO`?퟽?ҕGJ/`UMv~Ǐ?~PS:Xjw~Ç~=Z،ciOG-x5 tU-r5F& }jӉK`M6Դ N^>vO۝y{JGf l(HpVTC6T$8Iz١*J$P N^Azjvy'J/!`55;T`CA:JWkI*]hXK:dEE`}H,l 4JW+I P8t XI͝ya=F0BQsg%9`iOw"uܙGIg]H/$`5wQ"Iz)١IW$UU٫w4ݫҁUf^J*hE2_9E3QI/&` h_`6A`K''5U٫w4ݫҁf^N jvRE3J/'`5;T`C% 9*t X^PI`V,f l($C0wH~Oo⛍><%8t+ Xw\z+_^ClsL{:`f%,n_/ǿ:O{:`n5,nk~SOxc^So3ѓ'O>h'#C07C9)'ƾ]"H/MЏ^qw6DOPF:`~U,mJ ۻFX#@`CW)m{|uO4 UK^V&t6o^~{D6T.!XBz] С[?n*!KH+`a_7A`CEl !A5K7"KH,`YSw댷gw6 XHziҡLj\vPD:`),jRnbZa0 IDATo~UW{KI-`Qvma?޾)ȪpYiOG,&%ޙ~Ʌ/l6{?ŷ~_>Gvre=@{g<.i=@$f^^6Ьt XО~ l`1%*t XNPA`Q,f l( ?SC6XXz١ H,,ĀŜԡ/oB1eg~yWo>w^EUn:U ;W?mu:Ma֑^i2?x!CD6.=R1C7)}~7ȥXB`C+I/5`Zٗoi֒^k" ˗7|}Sl[:z`5,xH7a l[y`5,a_x} ,$}еt XTucDήfuϗ%֔^o&u覥ϟ"?xLt ߤ^>^}+_8K*M۫ZCD`3ӞXWz3{wayLءധsV^r~yO,Ou;tK;/nw_ng%v7؁0b;zH.耹P J.耹P J0*[ҁfvH~O>o6- J0.b//N&WЁ5C>ٷ<Яt@Dz֡^x~SlUs "yMз^ɓ'4_:I/=`V:B|Om%"Sʁf5C?z>MD`Cҕ!jJ ۻFX#@`Cґ1iJu_]A`3ZӞnI/>`NvmJ߼V~JاԴrҫӄys ~p=BdNhHz93ҡ c/a lJh!`Jn\G5=G =I $LзnMwGD`COAMI/Ht:nds'}݌{^&tfwu6t$3ИN&o=]3ИN61w""!k^&ރ}[7owt6?阁%pOۆ-I/JTSv͵oe+OGe9E jJn)s.[[`C?) J/KD:tN{tqn9 lGdAe hRn#^)tӯ Їt@ҋ8ѯJo>^)&t53;Vqt=鄁'p ;-3GQz K/O6ަ䀃t=邁'p yMK/PxSvwo.Isr8MK/Pxwg =H 4-@Mmail H 4.DM{xţF(zƥ(p)yZ}Z^6h^z@F]yE kSDd/6G K 4/HcP K t L#P K t L#P K t L#P K t!PLwc^q:n.*pv`@Cnkrzt@ 8w.y}+}pN=-ЉRwgW^+n.bwpN=]-ЉRog~k/n|uOuNƑDzGס  >i6--ЋZC?zmoۻE>M"ږFzӡo yEst: mK7 t#Xcw^`Ϯf id+p=ֵk.HzGݡ{B~?7a lhZX# awn\ ,,Еw@`E6,+Е'{^6t@gK8VLzз=]+Й&oM{:V;E J`Ct@wҋ8u7TW-p( **С$UK:^6t@ 8 w>j6*)ХsG`ߡ ZRzҙJ/] XQRSwϒwg,Ճbd4^BElfc,iBD ԻYfW{*S=WV՝B'tfVNp ')XFЧtbo^5;T`CҍykP ]J' ,XPC6t)](`Bجǒt_ Y,jO ,Z[|%%gIW{O`700bV,)9> y і2_gA둮X- VC6t(]'p[f lO:N`710VПtob`*;4ҷ10RНt@f lNLm TC6&&PBFƩ١z(!}#P Ig ԐqjvΤH(5;T`CgUEoe`*Y\*"ҷ20V-$9e\(2730V,#9e\&2730V,#9e\&2730V,#9`5Iog`*+"Bҷ30BЕt@!f lI:H \VC6$#PJ.١z(%}CP IԒjvsIE5;T`CG5Ťoi࢚*r75pIЏt@9f lF:Em \PC6՞.((}[2ߪf%okW[uLW{:D |ޓjOw^o{rj3JY5;T`C'Eomଚ*6pVЇt@Y8f lCA SC6t! PXΨ١.(,}{gP ]H3jvJKi5;T`Copച*ҷ8pRЁt}@q[8f l֣=]P\NueO{K)w79~t{@y8ӕN&՞N/}2߫H@6N١+́jvtysบ*!.8f lKBF١сjvtw*otਚ*!.;8f lKgCN١+Ձ#jvtuJou*!.][8f l֣۫=]{8|njOGE^2߫溽kׁC}69MGF2arzlUP AfC5;T`C5;T`Cy@ؐ#af{8PC6|,anWC6l]P 1f}5;T`Cf}5;T`C}١R6{`O١j]=t06d}`GG+sC}%L]`CHvt27Wr6}`GG+sC}%Rw?f lm5;T`Cr?f lH-5;T`CRC6l OU !*<W5;T`>ؐ*a~3xQC6c i)<١f'!.<g5;T`6ąf l֣c y9: z5;T`L`ի١f"KkWC6D`C“֮f lѾ v5;T`<6t*<`jvylTx6P 8Vf ln[0 OX*a'Z`CVf lZجGj? qlX~6!3 K'2NNW09 O X*ajgZ`C'c֫f l X*aj <'`jvk O X*ab!<)`jvik  X*aZ"<+`jvi lX𬀕١&u6t$<-`jvI lXu١H\Z`COYs@`K}-'3A` l༙' lXĀ5١s6%<3`jvlX̀١&s6t&<5`}jvlXԀ١26&<7`ujvlX܀թ١&26t'<9`mjvlX䀵١16t'<:`mjvilX쀕١!a³Vf lc֫}\_ llsذtc9lX9}-Cs A` l` 6,Xx~P  &5;T`6,ZxP k ] X*= !"5;T`C{.[ ;گk ~\'[[|xlC`W~\?ѧ7[o$YYZ`C&Cg Ɨ?|潿8ő lC`ט|^޴?;{64%4uC7x^#ҕ}-gySnޣo'?M]`6)=u~oOhE"&p}_ l\x@}=u>{!GhF`C9ԡW{$Z`CcC/ȮlhE`CAԡ{駸64rK_ l]x@y=ucd.| _Jpj}&_jq'~񉗈WM}-{M ࡳ)Wdљجn0: M@F㟔ԛuQj6ol+~ׯOpwЀ*CO/w_d;o?64 jv!o_ l(l%Pzgk M>ՠ*Y jP [k ',AjPZxP 6TxP /k ,@B`f lxk Ņf lx&s WC6\ج՞6MbAjvf=6}е*Yt\o ;^tf lV+փaO >0Pjz0Izf|Л*YtZ?v%s >LجU: ^9gtf lV*]O}I/RC6+.'þW{`V}١uJagҫ=0P*Ű7WxBWjvf]b؛b,<'5;T`F~5NzБ*YtUo' OAGج՞-Wz`vY]١xQmرJ/: #5;T`OW{w ;^聀4VzR3:PC6+#J/@Jx(BjvfE5}İ_% OEȫ١H1ÎWx '<!f lV#G {^d*YtK5Yz}“jvf-)}ܰkH F١,f7W{: ^ށt*YM lGY5;T`@jvf@jvf}Ұw OHH١5Hgiv -<#!f l ѧ ^ځ*YtE1_ziSrjvtD3`zecrjvtD3`zesbjvtC5aza:RC6եa:Ѓ𬄐*).] ^ց.%dPMm逾d!5;T`SO:1pz%0*)'W8 RMؔn {^ȁ')̥f l9}tg:(z} XTؔ {^΅',Lf l*HgO@3&TC6+n{) ө١}Xp_xdjvfFǬ+WckPM&>ѷp&< 5;T`ӟbV`k0^tgj9VC6.f6"Y#PMg;0PRx85;T`ӗtm8* 0J$;EG:QC6Inp5(+<Ლ*G:vO@]PM7ҭ1yz Ovf l:.ݐ/PYx5;T`Ӈt ^~Ϋ١.;7f8 pV 9_sjv&/I H@u)gPM\q3^zƒN١bvs3 @x)5;T`6c l`N١9cV`kp\DXćcjv&)yiHZg>QC6ApK.jv&&ݶ]Dz#<@ؤӶéH/g?١tb8X5;T`~ #+^`[#HڄRC6KGmO^j /f lfnڮ g$^E,]IzV(*W:h{3: Rx9G5;T`3tvg8+UX5;T`3pn.Kz*({cV` .GVf l p(5;T`3=-~D֦f l&2ڿЍPM{96W{zm0~DWVf lZ۞|%F$Hش;K`]LX*ii4uFtAa-jv#9;vz]UX*iXNk:nD֠f l9:ڿѵjv#9;u]^(f l89ڿ5jv^q:_wt#PY0NkΟC~D١\|ߥЏzCU5;T`s8|]r(f ln6bc$@7%PmMtoEt*>݈.@TSC6?ksa'EZjvZ׌tDW" ١\9{K`4](f lpM. VtM*떱tj]f lƹm7\nD'f lFy_^DW(f l.canD)f l.kڦcnD*f lκs֦e~D+f lNЦ N4@7TC6|_S ЏPQfl:_lt#|85;T`sD ;݈.a,Lk9_ڿuE١mk:_t#5;T``M՞އ}x~DW5f lL2UK`.5;T`3Ums:_w l"Gjvfq] %ѳ*WnaM("ѯ*WlQHt#ѥ*i)pt#қ*h# ҕ*Wgt#ґ*dٙ^DD:QCZdf:_zp]ɫ١{#3K>B݈.DP][zZ *+t E*JOI=J@/+' 5;T`͎<^݈.̬f Bqfksa_@7*١݈.Wl|{o]#9Sc:_Kхc?~cP_]zfs|ߺ nDS-]u?W7ŵ<3H͡kB̈́Cn'& )D6g"s'kSn^ S[!ԡo?5^&BFpLO)/7~ `b"d'i~ @.n!#8Iah׿ɷ7DN"y+g_OOY"]zضZ`F`ö/om ,ҥ'lض{axCYK3^L^k ,҅M`O`;/P{U&H6 p? gl ?qvi&n%O"?ϜE l[ l,3|,ҘM`J`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА6@S_ տKմ>p:&1 a`oʟ~!WO7G1_`el;{vb^Ɍ|?6PhT`x EeƱ/@-NIDATP=[:p ":S5 Sq@5/x|H *OFd(˯@-:h̛i 6Gc~Q`lFU`+ỏ8j ":/<~ C~w>ӿϾ|0Ok|?~ ¿|$oy|^}+?}H  Ԙl߻y}Οx|KC'||[s_𥍏nyb׶}}ӷN@?6@S#gK+vvh+_zzvpr,oy|vypЯt ԙ`?O9܉W;{z!/O~Ξ/?vrˣvw;;qhx`oO[n~`}K~/Emc/gſܼ+2fԖvm[ag=ӓCnNJ??/~K8<ص}o=O>M ǧUݡs?&˿+&~pʷ#/8?7h/=챁}n!:ҟ  r6{ߎGӦۯ3yNԳ[}{1:쑁}v!z~Ɓ/7`/=PuW?`/:/#[^,ʽn{d`?~lN;G?~Gom1{/xb}809O={JvHhx`G|/ ݍw_dk`pҎvsr`ハd؟Sw#C`4u~_'rV`{MG{>]۲'^}h`Pc{j㽏8<ا^ ީ-GkD<|hr`#d`޸~|'ǽn拇-Z6@S{տ!ǝ^~}+uK?~l.2w  oq'/I杭~`oͽ!l.L‰>>~owrQq<{{/6@/6@S{av?q.n<6?/|8&gϟx \`=<M]'n  :UgOot{?x  Ԙs!Om<6/>mMm9.|쭷{:Mz K$>9`xt`o>χ*}<~|/_qqs Ըw[}ц'*ƣ1zyٯ򒿛}֙-G?< 7Χ?C%?݁[ o/W?>3G ~;-|Pg6"}3vrl`?}Ã3pw?W}gُv̑tC`4u1^:j;/+g|bڅ-~ 쇗WWgˁY$_~O^?@XŞ~x&c[_߻~|WF<ӯm?zwrtB`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CJ$zIENDB`bayestestR/man/dot-select_nums.Rd0000644000175000017500000000040013636776614016715 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.select_nums} \alias{.select_nums} \title{select numerics columns} \usage{ .select_nums(x) } \description{ select numerics columns } \keyword{internal} bayestestR/man/distribution.Rd0000644000175000017500000000732314133142462016317 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{distribution} \alias{distribution} \alias{distribution_custom} \alias{distribution_beta} \alias{distribution_binomial} \alias{distribution_binom} \alias{distribution_cauchy} \alias{distribution_chisquared} \alias{distribution_chisq} \alias{distribution_gamma} \alias{distribution_mixture_normal} \alias{distribution_normal} \alias{distribution_gaussian} \alias{distribution_nbinom} \alias{distribution_poisson} \alias{distribution_student} \alias{distribution_t} \alias{distribution_student_t} \alias{distribution_tweedie} \alias{distribution_uniform} \alias{rnorm_perfect} \title{Empirical Distributions} \usage{ distribution(type = "normal", ...) distribution_custom(n, type = "norm", ..., random = FALSE) distribution_beta(n, shape1, shape2, ncp = 0, random = FALSE, ...) distribution_binomial(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_binom(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_cauchy(n, location = 0, scale = 1, random = FALSE, ...) distribution_chisquared(n, df, ncp = 0, random = FALSE, ...) distribution_chisq(n, df, ncp = 0, random = FALSE, ...) distribution_gamma(n, shape, scale = 1, random = FALSE, ...) distribution_mixture_normal(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) distribution_normal(n, mean = 0, sd = 1, random = FALSE, ...) distribution_gaussian(n, mean = 0, sd = 1, random = FALSE, ...) distribution_nbinom(n, size, prob, mu, phi, random = FALSE, ...) distribution_poisson(n, lambda = 1, random = FALSE, ...) distribution_student(n, df, ncp, random = FALSE, ...) distribution_t(n, df, ncp, random = FALSE, ...) distribution_student_t(n, df, ncp, random = FALSE, ...) distribution_tweedie(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) distribution_uniform(n, min = 0, max = 1, random = FALSE, ...) rnorm_perfect(n, mean = 0, sd = 1) } \arguments{ \item{type}{Can be any of the names from base R's \link[stats:Distributions]{Distributions}, like \code{"cauchy"}, \code{"pois"} or \code{"beta"}.} \item{...}{Arguments passed to or from other methods.} \item{n}{the number of observations} \item{random}{Generate near-perfect or random (simple wrappers for the base R \verb{r*} functions) distributions.} \item{shape1}{non-negative parameters of the Beta distribution.} \item{shape2}{non-negative parameters of the Beta distribution.} \item{ncp}{non-centrality parameter.} \item{size}{number of trials (zero or more).} \item{prob}{probability of success on each trial.} \item{location}{location and scale parameters.} \item{scale}{location and scale parameters.} \item{df}{degrees of freedom (non-negative, but can be non-integer).} \item{shape}{shape and scale parameters. Must be positive, \code{scale} strictly.} \item{mean}{vector of means.} \item{sd}{vector of standard deviations.} \item{mu}{the mean} \item{phi}{Corresponding to \code{glmmTMB}'s implementation of nbinom distribution, where \code{size=mu/phi}.} \item{lambda}{vector of (non-negative) means.} \item{xi}{the value of \eqn{\xi}{xi} such that the variance is \eqn{\mbox{var}[Y]=\phi\mu^{\xi}}{var(Y) = phi * mu^xi}} \item{power}{a synonym for \eqn{\xi}{xi}} \item{min}{lower and upper limits of the distribution. Must be finite.} \item{max}{lower and upper limits of the distribution. Must be finite.} } \description{ Generate a sequence of n-quantiles, i.e., a sample of size \code{n} with a near-perfect distribution. } \examples{ library(bayestestR) x <- distribution(n = 10) plot(density(x)) x <- distribution(type = "gamma", n = 100, shape = 2) plot(density(x)) } bayestestR/vignettes/0000755000175000017500000000000014135671003014541 5ustar nileshnileshbayestestR/vignettes/apa.csl0000644000175000017500000016075013506417057016026 0ustar nileshnilesh bayestestR/vignettes/web_only/0000755000175000017500000000000014124727741016370 5ustar nileshnileshbayestestR/vignettes/web_only/apa.csl0000644000175000017500000016075014034712166017640 0ustar nileshnilesh bayestestR/vignettes/web_only/bibliography.bib0000644000175000017500000003340214034712166021516 0ustar nileshnilesh@book{mcelreath2018statistical, title={Statistical rethinking: A Bayesian course with examples in R and Stan}, author={McElreath, Richard}, year={2018}, publisher={Chapman and Hall/CRC} } @article{wagenmakers2018bayesian, title={Bayesian inference for psychology. Part I: Theoretical advantages and practical ramifications}, author={Wagenmakers, Eric-Jan and Marsman, Maarten and Jamil, Tahira and Ly, Alexander and Verhagen, Josine and Love, Jonathon and Selker, Ravi and Gronau, Quentin F and {\v{S}}m{\'\i}ra, Martin and Epskamp, Sacha and others}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={35--57}, year={2018}, publisher={Springer} } @article{morey2014simple, title={Simple relation between Bayesian order-restricted and point-null hypothesis tests}, author={Morey, Richard D and Wagenmakers, Eric-Jan}, journal={Statistics \& Probability Letters}, volume={92}, pages={121--124}, year={2014}, publisher={Elsevier} } @misc{morey_2015_blog, title={Multiple Comparisons with BayesFactor, Part 2 – order restrictions}, url={http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html}, author={Morey, Richard D}, year={2015}, month={Jan} } @article{benjamin2018redefine, title={Redefine statistical significance}, author={Benjamin, Daniel J and Berger, James O and Johannesson, Magnus and Nosek, Brian A and Wagenmakers, E-J and Berk, Richard and Bollen, Kenneth A and Brembs, Bj{\"o}rn and Brown, Lawrence and Camerer, Colin and others}, journal={Nature Human Behaviour}, volume={2}, number={1}, pages={6}, year={2018}, publisher={Nature Publishing Group} } @article{dienes2018four, title={Four reasons to prefer Bayesian analyses over significance testing}, author={Dienes, Zoltan and Mclatchie, Neil}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={207--218}, year={2018}, publisher={Springer} } @article{lakens2018equivalence, title={Equivalence testing for psychological research: A tutorial}, author={Lakens, Dani{\"e}l and Scheel, Anne M and Isager, Peder M}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918770963}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{etz2018bayesian, title={Bayesian inference and testing any hypothesis you can specify}, author={Etz, Alexander and Haaf, Julia M and Rouder, Jeffrey N and Vandekerckhove, Joachim}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918773087}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{kruschke2018bayesian, title={The Bayesian New Statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a Bayesian perspective}, author={Kruschke, John K and Liddell, Torrin M}, journal={Psychonomic Bulletin \& Review}, volume={25}, number={1}, pages={178--206}, year={2018}, publisher={Springer} } @article{wagenmakers2017need, title={The need for Bayesian hypothesis testing in psychological science}, author={Wagenmakers, Eric-Jan and Verhagen, Josine and Ly, Alexander and Matzke, Dora and Steingroever, Helen and Rouder, Jeffrey N and Morey, Richard D}, journal={Psychological science under scrutiny: Recent challenges and proposed solutions}, pages={123--138}, year={2017}, publisher={Wiley New York, NY} } @article{gronau2017bayesian, title={A Bayesian model-averaged meta-analysis of the power pose effect with informed and default priors: The case of felt power}, author={Gronau, Quentin F and Van Erp, Sara and Heck, Daniel W and Cesario, Joseph and Jonas, Kai J and Wagenmakers, Eric-Jan}, journal={Comprehensive Results in Social Psychology}, volume={2}, number={1}, pages={123--138}, year={2017}, publisher={Taylor \& Francis} } @article{gronau2017simple, title={A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using warp-III bridge sampling}, author={Gronau, Quentin F and Wagenmakers, Eric-Jan and Heck, Daniel W and Matzke, Dora}, journal={Psychometrika}, pages={1--24}, year={2017}, publisher={Springer} } @article{piironen2017comparison, title={Comparison of Bayesian predictive methods for model selection}, author={Piironen, Juho and Vehtari, Aki}, journal={Statistics and Computing}, volume={27}, number={3}, pages={711--735}, year={2017}, publisher={Springer} } @article{mills2017objective, title={Objective Bayesian Precise Hypothesis Testing}, author={Mills, Jeffrey A}, journal={University of Cincinnati [original version: 2007]}, year={2017} } @article{szucs2016empirical, title={Empirical assessment of published effect sizes and power in the recent cognitive neuroscience and psychology literature}, author={Szucs, Denes and Ioannidis, John PA}, journal={BioRxiv}, pages={071530}, year={2016}, publisher={Cold Spring Harbor Laboratory} } @article{wagenmakers2016bayesian, title={Bayesian benefits for the pragmatic researcher}, author={Wagenmakers, Eric-Jan and Morey, Richard D and Lee, Michael D}, journal={Current Directions in Psychological Science}, volume={25}, number={3}, pages={169--176}, year={2016}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @article{ly2016harold, title={Harold Jeffreys’s default Bayes factor hypothesis tests: Explanation, extension, and application in psychology}, author={Ly, Alexander and Verhagen, Josine and Wagenmakers, Eric-Jan}, journal={Journal of Mathematical Psychology}, volume={72}, pages={19--32}, year={2016}, publisher={Elsevier} } @article{wasserstein2016asa, title={The ASA’s statement on p-values: context, process, and purpose}, author={Wasserstein, Ronald L and Lazar, Nicole A and others}, journal={The American Statistician}, volume={70}, number={2}, pages={129--133}, year={2016} } @article{etz2016bayesian, title={A Bayesian perspective on the reproducibility project: Psychology}, author={Etz, Alexander and Vandekerckhove, Joachim}, journal={PloS one}, volume={11}, number={2}, pages={e0149794}, year={2016}, publisher={Public Library of Science} } @article{burrell2016machine, title={How the machine ‘thinks’: Understanding opacity in machine learning algorithms}, author={Burrell, Jenna}, journal={Big Data \& Society}, volume={3}, number={1}, pages={2053951715622512}, year={2016}, publisher={SAGE Publications Sage UK: London, England} } @article{castelvecchi2016can, title={Can we open the black box of AI?}, author={Castelvecchi, Davide}, journal={Nature News}, volume={538}, number={7623}, pages={20}, year={2016} } @incollection{cohen2016earth, title={The earth is round (p<. 05)}, author={Cohen, Jacob}, booktitle={What if there were no significance tests?}, pages={69--82}, year={2016}, publisher={Routledge} } @article{maxwell2015psychology, title={Is psychology suffering from a replication crisis? What does “failure to replicate” really mean?}, author={Maxwell, Scott E and Lau, Michael Y and Howard, George S}, journal={American Psychologist}, volume={70}, number={6}, pages={487}, year={2015}, publisher={American Psychological Association} } @article{lilienfeld2015fifty, doi = {10.3389/fpsyg.2015.01100}, title={Fifty psychological and psychiatric terms to avoid: a list of inaccurate, misleading, misused, ambiguous, and logically confused words and phrases}, author={Lilienfeld, Scott O and Sauvign{\'e}, Katheryn C and Lynn, Steven Jay and Cautin, Robin L and Latzman, Robert D and Waldman, Irwin D}, journal={Frontiers in Psychology}, volume={6}, pages={1100}, year={2015}, publisher={Frontiers} } @misc{mcelreath2014rethinking, title={rethinking: Statistical Rethinking book package. R package version 1.391}, author={McElreath, R}, year={2014} } @book{kruschke2014doing, title={Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan}, author={Kruschke, John}, year={2014}, publisher={Academic Press} } @article{chambers2014instead, title={Instead of 'playing the game' it is time to change the rules: Registered Reports at AIMS Neuroscience and beyond}, author={Chambers, Christopher D and Feredoes, Eva and Muthukumaraswamy, Suresh Daniel and Etchells, Peter}, journal={AIMS Neuroscience}, volume={1}, number={1}, pages={4--17}, year={2014}, publisher={Aims Press} } @article{dienes2014using, title={Using Bayes to get the most out of non-significant results}, author={Dienes, Zoltan}, journal={Frontiers in psychology}, volume={5}, pages={781}, year={2014}, publisher={Frontiers} } @article{jarosz2014odds, title={What are the odds? A practical guide to computing and reporting Bayes factors}, author={Jarosz, Andrew F and Wiley, Jennifer}, journal={The Journal of Problem Solving}, volume={7}, number={1}, pages={2}, year={2014}, publisher={Purdue University Press} } @incollection{mills2014bayesian, title={Bayesian MCMC estimation}, author={Mills, Jeffrey A and Parent, Olivier}, booktitle={Handbook of Regional Science}, pages={1571--1595}, year={2014}, publisher={Springer} } @article{andrews2013prior, title={Prior approval: The growth of Bayesian methods in psychology}, author={Andrews, Mark and Baguley, Thom}, journal={British Journal of Mathematical and Statistical Psychology}, volume={66}, number={1}, pages={1--7}, year={2013}, publisher={Wiley Online Library} } @article{kruschke2012time, title={The time has come: Bayesian methods for data analysis in the organizational sciences}, author={Kruschke, John K and Aguinis, Herman and Joo, Harry}, journal={Organizational Research Methods}, volume={15}, number={4}, pages={722--752}, year={2012}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @inproceedings{snoek2012practical, title={Practical bayesian optimization of machine learning algorithms}, author={Snoek, Jasper and Larochelle, Hugo and Adams, Ryan P}, booktitle={Advances in neural information processing systems}, pages={2951--2959}, year={2012} } @article{wagenmakers2010bayesian, title={Bayesian hypothesis testing for psychologists: A tutorial on the Savage--Dickey method}, author={Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul}, journal={Cognitive psychology}, volume={60}, number={3}, pages={158--189}, year={2010}, publisher={Elsevier} } @article{morey2011bayesinterval, title={Bayes factor approaches for testing interval null hypotheses}, author={Morey, Richard D and Rouder, Jeffrey N}, journal={Psychological methods}, volume={16}, number={4}, pages={406}, year={2011}, publisher={American Psychological Association} } @article{clyde2011bayesian, title={Bayesian adaptive sampling for variable selection and model averaging}, author={Clyde, Merlise A and Ghosh, Joyee and Littman, Michael L}, journal={Journal of Computational and Graphical Statistics}, volume={20}, number={1}, pages={80--101}, year={2011}, publisher={Taylor \& Francis} } @article{kruschke2010believe, title={What to believe: Bayesian methods for data analysis}, author={Kruschke, John K}, journal={Trends in cognitive sciences}, volume={14}, number={7}, pages={293--300}, year={2010}, publisher={Elsevier} } @article{wagenmakers2007practical, title={A practical solution to the pervasive problems ofp values}, author={Wagenmakers, Eric-Jan}, journal={Psychonomic bulletin \& review}, volume={14}, number={5}, pages={779--804}, year={2007}, publisher={Springer} } @book{jeffreys1998theory, title={The theory of probability}, author={Jeffreys, Harold}, year={1998}, publisher={OUP Oxford} } @article{kirk1996practical, title={Practical significance: A concept whose time has come}, author={Kirk, Roger E}, journal={Educational and psychological measurement}, volume={56}, number={5}, pages={746--759}, year={1996}, publisher={Sage Publications Sage CA: Thousand Oaks, CA} } @article{cohen1988statistical, title={Statistical power analysis for the social sciences}, author={Cohen, Jacob}, year={1988}, publisher={Hillsdale, NJ: Erlbaum} } @article{rouder2012default, title={Default Bayes factors for ANOVA designs}, author={Rouder, Jeffrey N and Morey, Richard D and Speckman, Paul L and Province, Jordan M}, journal={Journal of Mathematical Psychology}, volume={56}, number={5}, pages={356--374}, year={2012}, publisher={Elsevier} } @article{wagenmakers2018SI, title={The Support Interval}, author={Wagenmakers, Eric-Jan and Gronau, Quentin Frederik and Dablander, Fabian and Etz, Alexander}, year={2018}, publisher={PsyArXiv}, url = {https://psyarxiv.com/zwnxb/}, doi = {10.31234/osf.io/zwnxb} } @article{rouder2018bayesian, title={Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors}, author={Rouder, Jeffrey N and Haaf, Julia M and Vandekerckhove, Joachim}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={102--113}, year={2018}, publisher={Springer} } @article{van2019cautionary, title={A cautionary note on estimating effect size}, author={van den Bergh, Don and Haaf, Julia M and Ly, Alexander and Rouder, Jeffrey N and Wagenmakers, Eric-Jan}, year={2019}, publisher={PsyArXiv} }bayestestR/vignettes/web_only/indicesEstimationComparison.Rmd0000644000175000017500000003005414034712166024537 0ustar nileshnilesh--- title: "In-Depth 1: Comparison of Point-Estimates" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 1: Comparison of Point-Estimates} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("parameters", quietly = TRUE) || !requireNamespace("stringr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } options(knitr.kable.NA = "") knitr::opts_chunk$set( echo = TRUE, comment = ">", out.width = "100%", message = FALSE, warning = FALSE, dpi = 150 ) options(digits = 2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- # Effect Point-Estimates in the Bayesian Framework ## Introduction One of the main difference between the Bayesian and the frequentist frameworks is that the former returns a probability *distribution* for each effect (*i.e.*, a model parameter of interest, such as a regression slope) instead of a *single value*. However, there is still a need and demand - for reporting or use in further analysis - for a single value (**point-estimate**) that best characterises the underlying posterior distribution. There are three main indices used in the literature for effect estimation: - the **mean** - the **median** - the **MAP** (Maximum A Posteriori) estimate (roughly corresponding to the mode - the "peak" - of the distribution) Unfortunately, there is no consensus about which one to use, as no systematic comparison has ever been done. In the present work, we will compare these three point-estimates of effect with each other, as well as with the widely known **beta**, extracted from a comparable frequentist model. These comparisons can help us draw bridges and relationships between these two influential statistical frameworks. ## Experiment 1: Relationship with Error (Noise) and Sample Size ### Methods We will be carrying out simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (*known* parameters values from which data is drawn): Can be 1 or 0 (no effect). - **Sample size**: From 20 to 100 by steps of 10. - **Error**: Gaussian noise applied to the predictor with SD uniformly spread between 0.33 and 6.66 (with 1000 different values). We generated a dataset for each combination of these characteristics, resulting in a total of `2 * 2 * 9 * 1000 = 36000` Bayesian and frequentist models. The code used for generation is available [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} library(ggplot2) library(dplyr) library(tidyr) library(stringr) library(see) library(parameters) df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study1.csv") ``` ### Results #### Sensitivity to Noise ```{r, message=FALSE, warning=FALSE} df %>% select(error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -error, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(error, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(error_group = round(mean(error), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = error_group, y = value, fill = estimate, group = interaction(estimate, error_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape = NA) + theme_modern() + scale_fill_manual( values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index" ) + ylab("Point-estimate") + xlab("Noise") + facet_wrap(~ outcome_type * true_effect, scales = "free") ``` #### Sensitivity to Sample Size ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(sample_size, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(size_group = round(mean(sample_size))) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = size_group, y = value, fill = estimate, group = interaction(estimate, size_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape = NA) + theme_modern() + scale_fill_manual( values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index" ) + ylab("Point-estimate") + xlab("Sample size") + facet_wrap(~ outcome_type * true_effect, scales = "free") ``` #### Statistical Modelling We fitted a (frequentist) multiple linear regression to statistically test the the predict the presence or absence of effect with the estimates as well as their interaction with noise and sample size. ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% pivot_longer( c(-sample_size, -error, -true_effect, -outcome_type), names_to = "estimate" ) %>% glm(true_effect ~ outcome_type / estimate / value, data = ., family = "binomial") %>% parameters(df_method = "wald") %>% select(Parameter, Coefficient, p) %>% filter( str_detect(Parameter, "outcome_type"), str_detect(Parameter, ":value") ) %>% arrange(desc(Coefficient)) %>% knitr::kable(digits = 2) ``` This suggests that, in order to delineate between the presence and the absence of an effect, compared to the frequentist's beta coefficient: - For linear models, the **Mean** was the better predictor, closely followed by the **Median**, the **MAP** and the frequentist **Coefficient**. - For logistic models, the **MAP** was the better predictor, followed by the **Median**, the **Mean** and, behind, the frequentist **Coefficient**. Overall, the **median** appears to be a safe choice, maintaining a high performance across different types of models. ## Experiment 2: Relationship with Sampling Characteristics ### Methods We will be carrying out another simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect). - **draws**: from 10 to 5000 by step of 5 (1000 iterations). - **warmup**: Ratio of warmup iterations. from 1/10 to 9/10 by step of 0.1 (9 iterations). We generated 3 datasets for each combination of these characteristics, resulting in a total of `2 * 2 * 8 * 40 * 9 * 3 = 34560` Bayesian and frequentist models. The code used for generation is avaible [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study2.csv") ``` ### Results #### Sensitivity to number of iterations ```{r, message=FALSE, warning=FALSE} df %>% select(iterations, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -iterations, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(iterations, 5, labels = FALSE))) %>% group_by(temp) %>% mutate(iterations_group = round(mean(iterations), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = iterations_group, y = value, fill = estimate, group = interaction(estimate, iterations_group))) + geom_boxplot(outlier.shape = NA) + theme_classic() + scale_fill_manual( values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index" ) + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales = "free") ``` #### Sensitivity to warmup ratio ```{r, message=FALSE, warning=FALSE} df %>% mutate(warmup = warmup / iterations) %>% select(warmup, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -warmup, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(warmup, 3, labels = FALSE))) %>% group_by(temp) %>% mutate(warmup_group = round(mean(warmup), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = warmup_group, y = value, fill = estimate, group = interaction(estimate, warmup_group))) + geom_boxplot(outlier.shape = NA) + theme_classic() + scale_fill_manual( values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index" ) + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales = "free") ``` ## Discussion Conclusions can be found in the [guidelines section](https://easystats.github.io/bayestestR/articles/guidelines.html) article. # Suggestions If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request. bayestestR/vignettes/bayestestR.Rmd0000644000175000017500000002507514030221266017337 0ustar nileshnilesh--- title: "Get Started with Bayesian Analysis" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Get Started with Bayesian Analysis} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set(comment = ">") options(knitr.kable.NA = "") options(digits = 2) if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(rstanarm) library(bayestestR) } ``` ## Why use the Bayesian Framework? The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards **open and honest science**. Reasons to prefer this approach are: - **reliability** [@etz2016bayesian] - **accuracy** (in noisy data and small samples) [@kruschke2012time] - the possibility of introducing **prior knowledge** into the analysis [@andrews2013prior; @kruschke2012time] - critically, **intuitive nature of results** and their **straightforward interpretation** [@kruschke2010believe; @wagenmakers2018bayesian] In general, the frequentist approach has been associated with the focus on the null hypothesis testing, and the misuse of *p*-values has been shown to critically contribute to the reproducibility crisis in social and psychological sciences [@chambers2014instead; @szucs2016empirical]. There is an emerging consensus that the generalization of the Bayesian approach is *one* way of overcoming these issues [@benjamin2018redefine; @etz2016bayesian]. Once we agree that the Bayesian framework is the right way to go, you might wonder *what* exactly is this framework. **What's all the fuss about?** ## What is the Bayesian Framework? Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (*t*-tests, correlations, ANOVAs, regressions, etc.) can be achieved using the Bayesian framework. The key difference is that in the **frequentist framework** (the "classical" approach to statistics, with *p* and *t* values, as well as some weird *degrees of freedom*), **the effects are fixed** (but unknown) and **data are random**. In other words, it assumes that the unknown parameter has a **unique** value that we are trying to estimate/guess using our sample data. On the other hand, in the **Bayesian framework**, instead of estimating the "true effect", the probability of different effects *given the observed data* is computed, resulting in a **distribution** of possible values for the parameters, called the **posterior distribution**. The uncertainty in Bayesian inference can be summarized, for instance, by the **median** of the distribution, as well as a range of values of the posterior distribution that includes the 95\% most probable values (the 95\% **credible interval**). *Cum grano salis*, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say *"given the observed data, the effect has 95\% probability of falling within this range"*, while the frequentist (less intuitive) alternative would be *"when repeatedly computing confidence intervals from data of this sort, there is a 95\% probability that the effect falls within a given range"*. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (*the posterior*) of an effect that is compatible with the observed data. Thus, an effect can be described by [characterizing its posterior distribution](https://easystats.github.io/bayestestR/articles/guidelines.html) in relation to its centrality (point-estimates), uncertainty, as well as its existence and significance In other words, putting the maths behind it aside for a moment, we can say that: - The frequentist approach tries to estimate the **real effect**. For instance, the "real" value of the correlation between *x* and *y*. Hence, the frequentist models return a **point-estimate** (i.e., a **single** value and not a distribution) of the "real" correlation (e.g., $r = 0.42$) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a "parent", usually normal distribution). - **The Bayesian framework assumes no such thing**. The data are what they are. Based on the observed data (and a **prior** belief about the result), the Bayesian sampling algorithm (**MCMC** sampling is one example) returns a probability distribution (called **the posterior**) of the effect that is compatible with the observed data. For the correlation between *x* and *y*, it will return a **distribution** that says, for example, "the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74 with certain probabilities". - To characterize statistical significance of our effects, we do not need *p*-values, or any other such indices. We simply *describe* the posterior distribution of the effect. For example, we can report the median, the [89% Credible Interval](https://easystats.github.io/bayestestR/articles/credible_interval.html) or [other indices](https://easystats.github.io/bayestestR/articles/guidelines.html). ```{r echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ``` *Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance [this thread](https://discourse.datamethods.org/t/language-for-communicating-frequentist-results-about-treatment-effects/934/16)). As always, the world is not black and white (p \< .001).* **So... how does it work?** ## A simple example ### `bayestestR` installation You can install `bayestestR` along with the whole [**easystats**](https://github.com/easystats/easystats) suite by running the following: ```{r eval=FALSE, message=FALSE, warning=FALSE} install.packages("remotes") remotes::install_github("easystats/easystats") ``` Let's also install and load the [`rstanarm`](https://mc-stan.org/rstanarm/), that allows fitting Bayesian models, as well as [`bayestestR`](https://github.com/easystats/bayestestR), to describe them. ```{r message=FALSE, warning=FALSE, eval=FALSE} install.packages("rstanarm") library(rstanarm) ``` ### Traditional linear regression Let's start by fitting a simple frequentist linear regression (the `lm()` function stands for *linear model*) between two numeric variables, `Sepal.Length` and `Petal.Length` from the famous [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` This analysis suggests that there is a statistically **significant** (whatever that means) and **positive** (with a coefficient of `0.41`) linear relationship between the two variables. Fitting and interpreting the frequentist models is so easy that it is obvious that people use it instead of the Bayesian framework... right? **Not anymore.** ### Bayesian linear regression ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) posteriors <- describe_posterior(model) # for a nicer table print_md(posteriors, digits = 2) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, refresh = 0) posteriors <- describe_posterior(model) # for a nicer table print_md(posteriors, digits = 2) ``` **That's it!** You just fitted a Bayesian version of the model by simply using the [`stan_glm()`](https://mc-stan.org/rstanarm/reference/stan_glm.html) function instead of `lm()` and described the posterior distributions of the parameters! The conclusion we draw, for this example, are very similar. The effect (*the median of the effect's posterior distribution*) is about `0.41`, and it can be also be considered as *significant* in the Bayesian sense (more on that later). **So, ready to learn more?** Check out the [**next tutorial**](https://easystats.github.io/bayestestR/articles/example1.html)! And, if you want even more, you can check out other articles describing all the functionality the package has to offer! ## References bayestestR/vignettes/example2.Rmd0000644000175000017500000002736314054322056016736 0ustar nileshnilesh--- title: "2. Confirmation of Bayesian skills" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{2. Confirmation of Bayesian skills} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r , include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) ``` Now that [**describing and understanding posterior distributions**](https://easystats.github.io/bayestestR/articles/example1.html) of linear regressions is not that mysterious to you, we will take one step back and study some simpler models: **correlations** and ***t*-tests**. But before we do that, let us take a moment to remind ourselves and appreciate the fact that **all basic statistical procedures** such as correlations, *t*-tests, ANOVAs, or chi-square tests **are** linear regressions (we strongly recommend [this excellent demonstration](https://lindeloev.github.io/tests-as-linear/)). Nevertheless, these simple models will provide a good pretext to introduce a few more complex indices, such as the **Bayes factor**. ## Correlations ### Frequentist version Once again, let us begin with a **frequentist correlation** between two continuous variables, the **width** and the **length** of the sepals of some flowers. The data is available in `R` as the `iris` dataset (the same that was used in the [previous tutorial](https://easystats.github.io/bayestestR/articles/example1.html)). We will compute a Pearson's correlation test, store the results in an object called `result`, and then display it: ```{r} result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ``` As you can see in the output, the test actually compared **two** hypotheses: - the **null hypothesis** (*h0*; no correlation), - the **alternative hypothesis** (*h1*; a non-null correlation). Based on the *p*-value, the null hypothesis cannot be rejected: the correlation between the two variables is **negative but non-significant** ($r = -.12, p > .05$). ### Bayesian correlation To compute a Bayesian correlation test, we will need the [`BayesFactor`](https://richarddmorey.github.io/BayesFactor/) package (you can install it by running `install.packages("BayesFactor")`). We can then load this package, compute the correlation using the `correlationBF()` function, and store the result. ```{r, results='hide'} library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ``` Now, let us run our `describe_posterior()` function on that: ```{r } describe_posterior(result) ``` We see again many things here, but the important indices for now are the **median** of the posterior distribution, `-.11`. This is (again) quite close to the frequentist correlation. We could, as previously, describe the [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html), the [**pd**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) or the [**ROPE percentage**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), but we will focus here on another index provided by the Bayesian framework, the **Bayes Factor (BF)**. ### Bayes Factor (BF) We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an alternative one (presence of an effect). The [**Bayes factor (BF)**](https://easystats.github.io/bayestestR/articles/bayes_factors.html) allows the same comparison and determines **under which of these two models the observed data are more probable**: a model with the effect of interest, and a null model without the effect of interest. So, in the context of our correlation example, the null hypothesis would be no correlation between the two variables ($h0: \rho = 0$; where $\rho$ stands for Bayesian correlation coefficient), while the alternative hypothesis would be that there is a correlation **different** than 0 - positive or negative ($h1: \rho \neq 0$). We can use `bayesfactor()` to specifically compute the Bayes factor comparing those models: ```{r} bayesfactor(result) ``` We got a *BF* of `0.51`. What does it mean? Bayes factors are **continuous measures of *relative* evidence**, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as *the numerator*), and a Bayes factor smaller than 1 giving evidence in favour of the other model (*the denominator*). > **Yes, you heard that right, evidence in favour of the *null*!** That's one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the ***p*-value can only be used to reject *h0***, but not *accept* it. With the **Bayes factor**, you can measure **evidence against - and in favour of - the null**. In other words, in the frequentist framework, if the *p*-value is not significant, we can conclude that **evidence for the effect is absent**, but not that there is **evidence for the absence of the effect**. In Bayesian framework, we can do the latter. This is important since sometimes our hypotheses are about no effect. BFs representing evidence for the alternative against the null can be reversed using $BF_{01}=1/BF_{10}$ (the *01* and *10* correspond to *h0* against *h1* and *h1* against *h0*, respectively) to provide evidence of the null against the alternative. This improves human readability^[If the effect is really strong, the BF values can be extremely high. So don't be surprised if you see BF values that have been log-transformed to make them more human readable.] in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null). In our case, `BF = 1/0.51 = 2`, indicates that the data are **2 times more probable under the null compared to the alternative hypothesis**, which, though favouring the null, is considered only [anecdotal evidence against the null](https://easystats.github.io/effectsize/reference/interpret_bf.html). We can thus conclude that there is **anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51)**, which is a much more informative statement that what we can do with frequentist statistics. **And that's not all!** ### Visualise the Bayes factor In general, **pie charts are an absolute no-go in data visualisation**, as our brain's perceptive system heavily distorts the information presented in such way^[An exception would be when the pie slices are well-labeled so that our brain's perception system does not have to do the decoding work.]. Nevertheless, there is one exception: pizza charts. It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise. ```{r echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great blog.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ``` Such "pizza plots" can be directly created through the [`see`](https://github.com/easystats/see) visualisation companion package for `easystats` (you can install it by running `install.packages("see")`): ```{r } library(see) plot(bayesfactor(result)) + scale_fill_pizza() ``` So, after seeing this pizza, how much would you be surprised by the outcome of a blinded poke? ## *t*-tests > **"I know that I know nothing, and especially not if *versicolor* and *virginica* differ in terms of their Sepal.Width" - Socrates**. Time to finally answer this crucial question! ### Versicolor *vs.* virginica Bayesian *t*-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the `Species` factor, *versicolor* and *virginica*. We will start by filtering out from `iris` the non-relevant observations corresponding to the *setosa* specie, and we will then visualise the observations and the distribution of the `Sepal.Width` variable. ```{r } library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ``` It *seems* (visually) that *virgnica* flowers have, on average, a slightly higer width of sepals. Let's assess this difference statistically by using the `ttestBF()` function in the `BayesFactor` package. ### Compute the Bayesian *t*-test ```{r} result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ``` From the indices, we can say that the difference of `Sepal.Width` between *virginica* and *versicolor* has a probability of **100% of being negative** [*from the pd and the sign of the median*] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a **strong evidence against the null hypothesis** (BF = 18). Keep that in mind as we will see another way of investigating this question. ## Logistic Model A hypothesis for which one uses a *t*-test can also be tested using a binomial model (*e.g.*, a **logistic model**). Indeed, it is possible to reformulate the following hypothesis, "*there is an important difference in this variable between the two groups*" with the hypothesis "*this variable is able to discriminate between (or classify) the two groups*". However, these models are much more powerful than a *t*-test. In the case of the difference of `Sepal.Width` between *virginica* and *versicolor*, the question becomes, *how well can we classify the two species using only* `Sepal.Width`. ### Fit the model ```{r} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ``` ### Visualise the model Using the [`modelbased`](https://github.com/easystats/modelbased) package. ```{r} library(modelbased) vizdata <- estimate_relation(model) ggplot(vizdata, aes(x = Sepal.Width, y = Predicted)) + geom_ribbon(aes(ymin = CI_low, ymax = CI_high), alpha = 0.5) + geom_line() + ylab("Probability of being virginica") + theme_modern() ``` ### Performance and Parameters Once again, we can extract all indices of interest for the posterior distribution using our old pal `describe_posterior()`. ```{r} describe_posterior(model, test = c("pd", "ROPE", "BF")) ``` ```{r} library(performance) model_performance(model) ``` ### Visualise the indices TO DO. ```{r } library(see) plot(rope(result)) ``` ### Diagnostic Indices About diagnostic indices such as Rhat and ESS. bayestestR/vignettes/bibliography.bib0000644000175000017500000003340213610210350017663 0ustar nileshnilesh@book{mcelreath2018statistical, title={Statistical rethinking: A Bayesian course with examples in R and Stan}, author={McElreath, Richard}, year={2018}, publisher={Chapman and Hall/CRC} } @article{wagenmakers2018bayesian, title={Bayesian inference for psychology. Part I: Theoretical advantages and practical ramifications}, author={Wagenmakers, Eric-Jan and Marsman, Maarten and Jamil, Tahira and Ly, Alexander and Verhagen, Josine and Love, Jonathon and Selker, Ravi and Gronau, Quentin F and {\v{S}}m{\'\i}ra, Martin and Epskamp, Sacha and others}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={35--57}, year={2018}, publisher={Springer} } @article{morey2014simple, title={Simple relation between Bayesian order-restricted and point-null hypothesis tests}, author={Morey, Richard D and Wagenmakers, Eric-Jan}, journal={Statistics \& Probability Letters}, volume={92}, pages={121--124}, year={2014}, publisher={Elsevier} } @misc{morey_2015_blog, title={Multiple Comparisons with BayesFactor, Part 2 – order restrictions}, url={http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html}, author={Morey, Richard D}, year={2015}, month={Jan} } @article{benjamin2018redefine, title={Redefine statistical significance}, author={Benjamin, Daniel J and Berger, James O and Johannesson, Magnus and Nosek, Brian A and Wagenmakers, E-J and Berk, Richard and Bollen, Kenneth A and Brembs, Bj{\"o}rn and Brown, Lawrence and Camerer, Colin and others}, journal={Nature Human Behaviour}, volume={2}, number={1}, pages={6}, year={2018}, publisher={Nature Publishing Group} } @article{dienes2018four, title={Four reasons to prefer Bayesian analyses over significance testing}, author={Dienes, Zoltan and Mclatchie, Neil}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={207--218}, year={2018}, publisher={Springer} } @article{lakens2018equivalence, title={Equivalence testing for psychological research: A tutorial}, author={Lakens, Dani{\"e}l and Scheel, Anne M and Isager, Peder M}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918770963}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{etz2018bayesian, title={Bayesian inference and testing any hypothesis you can specify}, author={Etz, Alexander and Haaf, Julia M and Rouder, Jeffrey N and Vandekerckhove, Joachim}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918773087}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{kruschke2018bayesian, title={The Bayesian New Statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a Bayesian perspective}, author={Kruschke, John K and Liddell, Torrin M}, journal={Psychonomic Bulletin \& Review}, volume={25}, number={1}, pages={178--206}, year={2018}, publisher={Springer} } @article{wagenmakers2017need, title={The need for Bayesian hypothesis testing in psychological science}, author={Wagenmakers, Eric-Jan and Verhagen, Josine and Ly, Alexander and Matzke, Dora and Steingroever, Helen and Rouder, Jeffrey N and Morey, Richard D}, journal={Psychological science under scrutiny: Recent challenges and proposed solutions}, pages={123--138}, year={2017}, publisher={Wiley New York, NY} } @article{gronau2017bayesian, title={A Bayesian model-averaged meta-analysis of the power pose effect with informed and default priors: The case of felt power}, author={Gronau, Quentin F and Van Erp, Sara and Heck, Daniel W and Cesario, Joseph and Jonas, Kai J and Wagenmakers, Eric-Jan}, journal={Comprehensive Results in Social Psychology}, volume={2}, number={1}, pages={123--138}, year={2017}, publisher={Taylor \& Francis} } @article{gronau2017simple, title={A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using warp-III bridge sampling}, author={Gronau, Quentin F and Wagenmakers, Eric-Jan and Heck, Daniel W and Matzke, Dora}, journal={Psychometrika}, pages={1--24}, year={2017}, publisher={Springer} } @article{piironen2017comparison, title={Comparison of Bayesian predictive methods for model selection}, author={Piironen, Juho and Vehtari, Aki}, journal={Statistics and Computing}, volume={27}, number={3}, pages={711--735}, year={2017}, publisher={Springer} } @article{mills2017objective, title={Objective Bayesian Precise Hypothesis Testing}, author={Mills, Jeffrey A}, journal={University of Cincinnati [original version: 2007]}, year={2017} } @article{szucs2016empirical, title={Empirical assessment of published effect sizes and power in the recent cognitive neuroscience and psychology literature}, author={Szucs, Denes and Ioannidis, John PA}, journal={BioRxiv}, pages={071530}, year={2016}, publisher={Cold Spring Harbor Laboratory} } @article{wagenmakers2016bayesian, title={Bayesian benefits for the pragmatic researcher}, author={Wagenmakers, Eric-Jan and Morey, Richard D and Lee, Michael D}, journal={Current Directions in Psychological Science}, volume={25}, number={3}, pages={169--176}, year={2016}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @article{ly2016harold, title={Harold Jeffreys’s default Bayes factor hypothesis tests: Explanation, extension, and application in psychology}, author={Ly, Alexander and Verhagen, Josine and Wagenmakers, Eric-Jan}, journal={Journal of Mathematical Psychology}, volume={72}, pages={19--32}, year={2016}, publisher={Elsevier} } @article{wasserstein2016asa, title={The ASA’s statement on p-values: context, process, and purpose}, author={Wasserstein, Ronald L and Lazar, Nicole A and others}, journal={The American Statistician}, volume={70}, number={2}, pages={129--133}, year={2016} } @article{etz2016bayesian, title={A Bayesian perspective on the reproducibility project: Psychology}, author={Etz, Alexander and Vandekerckhove, Joachim}, journal={PloS one}, volume={11}, number={2}, pages={e0149794}, year={2016}, publisher={Public Library of Science} } @article{burrell2016machine, title={How the machine ‘thinks’: Understanding opacity in machine learning algorithms}, author={Burrell, Jenna}, journal={Big Data \& Society}, volume={3}, number={1}, pages={2053951715622512}, year={2016}, publisher={SAGE Publications Sage UK: London, England} } @article{castelvecchi2016can, title={Can we open the black box of AI?}, author={Castelvecchi, Davide}, journal={Nature News}, volume={538}, number={7623}, pages={20}, year={2016} } @incollection{cohen2016earth, title={The earth is round (p<. 05)}, author={Cohen, Jacob}, booktitle={What if there were no significance tests?}, pages={69--82}, year={2016}, publisher={Routledge} } @article{maxwell2015psychology, title={Is psychology suffering from a replication crisis? What does “failure to replicate” really mean?}, author={Maxwell, Scott E and Lau, Michael Y and Howard, George S}, journal={American Psychologist}, volume={70}, number={6}, pages={487}, year={2015}, publisher={American Psychological Association} } @article{lilienfeld2015fifty, doi = {10.3389/fpsyg.2015.01100}, title={Fifty psychological and psychiatric terms to avoid: a list of inaccurate, misleading, misused, ambiguous, and logically confused words and phrases}, author={Lilienfeld, Scott O and Sauvign{\'e}, Katheryn C and Lynn, Steven Jay and Cautin, Robin L and Latzman, Robert D and Waldman, Irwin D}, journal={Frontiers in Psychology}, volume={6}, pages={1100}, year={2015}, publisher={Frontiers} } @misc{mcelreath2014rethinking, title={rethinking: Statistical Rethinking book package. R package version 1.391}, author={McElreath, R}, year={2014} } @book{kruschke2014doing, title={Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan}, author={Kruschke, John}, year={2014}, publisher={Academic Press} } @article{chambers2014instead, title={Instead of 'playing the game' it is time to change the rules: Registered Reports at AIMS Neuroscience and beyond}, author={Chambers, Christopher D and Feredoes, Eva and Muthukumaraswamy, Suresh Daniel and Etchells, Peter}, journal={AIMS Neuroscience}, volume={1}, number={1}, pages={4--17}, year={2014}, publisher={Aims Press} } @article{dienes2014using, title={Using Bayes to get the most out of non-significant results}, author={Dienes, Zoltan}, journal={Frontiers in psychology}, volume={5}, pages={781}, year={2014}, publisher={Frontiers} } @article{jarosz2014odds, title={What are the odds? A practical guide to computing and reporting Bayes factors}, author={Jarosz, Andrew F and Wiley, Jennifer}, journal={The Journal of Problem Solving}, volume={7}, number={1}, pages={2}, year={2014}, publisher={Purdue University Press} } @incollection{mills2014bayesian, title={Bayesian MCMC estimation}, author={Mills, Jeffrey A and Parent, Olivier}, booktitle={Handbook of Regional Science}, pages={1571--1595}, year={2014}, publisher={Springer} } @article{andrews2013prior, title={Prior approval: The growth of Bayesian methods in psychology}, author={Andrews, Mark and Baguley, Thom}, journal={British Journal of Mathematical and Statistical Psychology}, volume={66}, number={1}, pages={1--7}, year={2013}, publisher={Wiley Online Library} } @article{kruschke2012time, title={The time has come: Bayesian methods for data analysis in the organizational sciences}, author={Kruschke, John K and Aguinis, Herman and Joo, Harry}, journal={Organizational Research Methods}, volume={15}, number={4}, pages={722--752}, year={2012}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @inproceedings{snoek2012practical, title={Practical bayesian optimization of machine learning algorithms}, author={Snoek, Jasper and Larochelle, Hugo and Adams, Ryan P}, booktitle={Advances in neural information processing systems}, pages={2951--2959}, year={2012} } @article{wagenmakers2010bayesian, title={Bayesian hypothesis testing for psychologists: A tutorial on the Savage--Dickey method}, author={Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul}, journal={Cognitive psychology}, volume={60}, number={3}, pages={158--189}, year={2010}, publisher={Elsevier} } @article{morey2011bayesinterval, title={Bayes factor approaches for testing interval null hypotheses}, author={Morey, Richard D and Rouder, Jeffrey N}, journal={Psychological methods}, volume={16}, number={4}, pages={406}, year={2011}, publisher={American Psychological Association} } @article{clyde2011bayesian, title={Bayesian adaptive sampling for variable selection and model averaging}, author={Clyde, Merlise A and Ghosh, Joyee and Littman, Michael L}, journal={Journal of Computational and Graphical Statistics}, volume={20}, number={1}, pages={80--101}, year={2011}, publisher={Taylor \& Francis} } @article{kruschke2010believe, title={What to believe: Bayesian methods for data analysis}, author={Kruschke, John K}, journal={Trends in cognitive sciences}, volume={14}, number={7}, pages={293--300}, year={2010}, publisher={Elsevier} } @article{wagenmakers2007practical, title={A practical solution to the pervasive problems ofp values}, author={Wagenmakers, Eric-Jan}, journal={Psychonomic bulletin \& review}, volume={14}, number={5}, pages={779--804}, year={2007}, publisher={Springer} } @book{jeffreys1998theory, title={The theory of probability}, author={Jeffreys, Harold}, year={1998}, publisher={OUP Oxford} } @article{kirk1996practical, title={Practical significance: A concept whose time has come}, author={Kirk, Roger E}, journal={Educational and psychological measurement}, volume={56}, number={5}, pages={746--759}, year={1996}, publisher={Sage Publications Sage CA: Thousand Oaks, CA} } @article{cohen1988statistical, title={Statistical power analysis for the social sciences}, author={Cohen, Jacob}, year={1988}, publisher={Hillsdale, NJ: Erlbaum} } @article{rouder2012default, title={Default Bayes factors for ANOVA designs}, author={Rouder, Jeffrey N and Morey, Richard D and Speckman, Paul L and Province, Jordan M}, journal={Journal of Mathematical Psychology}, volume={56}, number={5}, pages={356--374}, year={2012}, publisher={Elsevier} } @article{wagenmakers2018SI, title={The Support Interval}, author={Wagenmakers, Eric-Jan and Gronau, Quentin Frederik and Dablander, Fabian and Etz, Alexander}, year={2018}, publisher={PsyArXiv}, url = {https://psyarxiv.com/zwnxb/}, doi = {10.31234/osf.io/zwnxb} } @article{rouder2018bayesian, title={Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors}, author={Rouder, Jeffrey N and Haaf, Julia M and Vandekerckhove, Joachim}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={102--113}, year={2018}, publisher={Springer} } @article{van2019cautionary, title={A cautionary note on estimating effect size}, author={van den Bergh, Don and Haaf, Julia M and Ly, Alexander and Rouder, Jeffrey N and Wagenmakers, Eric-Jan}, year={2019}, publisher={PsyArXiv} }bayestestR/vignettes/credible_interval.Rmd0000644000175000017500000002335314030767205020674 0ustar nileshnilesh--- title: "Credible Intervals (CI)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, ci, credible interval] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Credible Intervals (CI)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # What is a *Credible* Interval? Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise **the uncertainty** related to the unknown parameters you are trying to estimate. In this regard, it could appear as quite similar to the frequentist **Confidence Intervals**. However, while their goal is similar, **their statistical definition and meaning is very different**. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute. As the Bayesian inference returns a **distribution** of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95\% credible interval is simply the central portion of the posterior distribution that contains 95\% of the values. Note how this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say *"given the observed data, the effect has 95% probability of falling within this range"*, compared to the less straightforward, frequentist alternative (the 95\% **Confidence* Interval**) would be "*there is a 95\% probability that when computing a confidence interval from data of this sort, the effect falls within this range*". # 89\% vs. 95\% CI Using 89\% is another popular choice, and used to be the default for a long time ([read here the story of the change](https://github.com/easystats/bayestestR/discussions/250)). How did it start? Naturally, when it came about choosing the CI level to report by default, **people started using 95\%**, the arbitrary convention used in the **frequentist** world. However, some authors suggested that 95\% might not be the most appropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn [@kruschke2014doing]. The proposition was to use 90\% instead of 95\%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary thresholds in the first place, why not use 89\%? Moreover, 89 is the highest **prime number** that does not exceed the already unstable 95\% threshold. What does it have to do with anything? *Nothing*, but it reminds us of the total arbitrariness of these conventions [@mcelreath2018statistical]. Thus, CIs computed with 89\% intervals (`ci = 0.89`), are deemed to be more stable than, for instance, 95\% intervals [@kruschke2014doing]. An effective sample size (ESS; see [here](https://easystats.github.io/bayestestR/reference/diagnostic_posterior.html)) of at least 10.000 is recommended if one wants to compute precise 95\% intervals (Kruschke, 2014, p. 183ff). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., `rstanarm` or `brms`) is only 4.000 (thus, you might want to increase it when fitting your model). However, 95\% has some [**advantages too**](https://easystats.github.io/blog/posts/bayestestr_95/). For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the 95\% CI than for lower ranges such as 89\%), which is a good thing in the context of the reproducibility crisis. To add to the mess, some other software use different default, such as for instance 90\%. Ultimately, **you as the user should make an informed decision**, based on your needs and goals, and justify your choice. # Different types of CIs The reader might notice that `bayestestR` provides **two methods** to compute credible intervals, the **Highest Density Interval (HDI)** (`hdi()`) and the **Equal-tailed Interval (ETI)** (`eti()`). These methods can also be changed via the `method` argument of the `ci()` function. What is the difference? Let's see: ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # Quantile in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ``` > **These are exactly the same...** But is it also the case for other types of distributions? ```{r warning=FALSE, message=FALSE} # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend = TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # ETI in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ``` > **The difference is strong with this one.** Contrary to the **HDI**, for which all points within the interval have a higher probability density than points outside the interval, the **ETI** is **equal-tailed**. This means that a 90\% interval has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does *not* change when transformations are applied to the distribution (for instance, for log-odds to probabilities transformation): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. Thus, for instance, if exponentiated credible intervals are required, it is recommended to calculate the ETI. # The Support Interval Unlike the HDI and the ETI, which look at the posterior distribution, the **Support Interval (SI)** provides information regarding the change in the credibility of values from the prior to the posterior - in other words, it indicates which values of a parameter have gained support by the observed data by some factor greater or equal to *k* [@wagenmakers2018SI]. ```{r warning=FALSE, message=FALSE} prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x = x, y = y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept = si_1$CI_low, color = "royalblue", size = 1) + geom_vline(xintercept = si_1$CI_high, color = "royalblue", size = 1) + # BF = 3 SI in red geom_vline(xintercept = si_3$CI_low, color = "red", size = 1) + geom_vline(xintercept = si_3$CI_high, color = "red", size = 1) ``` Between the blue lines are values that received *some* support by the data (this is a $BF = 1~SI$), while between the red lines are values that received at least *moderate* support ($BF = 3~SI$) by the data. From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the Support Interval will yield a Bayes factor smaller than 1/`BF`. # References bayestestR/vignettes/guidelines.Rmd0000644000175000017500000002046714023535711017347 0ustar nileshnilesh--- title: "Reporting Guidelines" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > %\VignetteIndexEntry{Reporting Guidelines} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Reporting Guidelines ## How to describe and report the parameters of a model A Bayesian analysis returns a posterior distribution for each parameter (or *effect*). To minimally describe these distributions, we recommend reporting a point-estimate of [centrality](https://en.wikipedia.org/wiki/Central_tendency) as well as information characterizing the estimation uncertainty (the [dispersion](https://en.wikipedia.org/wiki/Statistical_dispersion)). Additionally, one can also report indices of effect existence and/or significance. Based on the previous [**comparison of point-estimates**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) and [**indices of effect existence**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we can draw the following recommendations. ### **Centrality** We suggest reporting the [**median**](https://easystats.github.io/bayestestR/reference/point_estimate.html) as an index of centrality, as it is more robust compared to the [mean](https://easystats.github.io/bayestestR/reference/point_estimate.html) or the [MAP estimate](https://easystats.github.io/bayestestR/reference/map_estimate.html). However, in case of a severely skewed posterior distribution, the MAP estimate could be a good alternative. ### **Uncertainty** The [**95\% or 89\% Credible Intervals (CI)**](https://easystats.github.io/bayestestR/articles/credible_interval.html) are two reasonable ranges to characterize the uncertainty related to the estimation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html) for a discussion about the differences between these two values). We also recommend computing the CIs based on the [HDI](https://easystats.github.io/bayestestR/reference/hdi.html) rather than [quantiles](https://easystats.github.io/bayestestR/reference/ci.html), favouring probable over central values. Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis)). ### **Existence** ```{r echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") ``` The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect *existence* and *significance*. The most straightforward index to describe existence of an effect is the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics, and independent from the scale of the data. Moreover, it is strongly correlated with the frequentist **p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A **two-sided *p*-value** of respectively `.1`, `.05`, `.01` and `.001` correspond approximately to a ***pd*** of 95\%, 97.5\%, 99.5\% and 99.95\%. Thus, for convenience, we suggest the following reference values as an interpretation helpers: - *pd* **\<= 95\%** ~ *p* \> .1: uncertain - *pd* **\> 95\%** ~ *p* \< .1: possibly existing - *pd* **\> 97\%**: likely existing - *pd* **\> 99\%**: probably existing - *pd* **\> 99.9\%**: certainly existing ### **Significance** The percentage in **ROPE** is a index of **significance** (in its primary meaning), informing us whether a parameter is related or not to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the **percentage of the full posterior distribution** (the *full* ROPE) instead of a given proportion of CI in the ROPE, which appears to be more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original [equivalence test](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#equivalence-test), we recommend using the percentage as a *continuous* index of significance. However, based on [simulation data](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we suggest the following reference values as an interpretation helpers: - **\> 99\%** in ROPE: negligible (we can accept the null hypothesis) - **\> 97.5\%** in ROPE: probably negligible - **\<= 97.5\%** \& **\>= 2.5\%** in ROPE: undecided significance - **\< 2.5\%** in ROPE: probably significant - **\< 1\%** in ROPE: significant (we can reject the null hypothesis) *Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see [here](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#sensitivity-to-parameters-scale))*. ### **Template Sentence** Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be: > "the effect of *X* has a probability of ***pd*** of being *negative* (Median = ***median***, 89\% CI [ ***HDIlow*** , ***HDIhigh*** ] and can be considered as *significant* (***ROPE***\% in ROPE)." ## How to compare different models Although it can also be used to assess effect existence and significance, the **Bayes factor (BF)** is a versatile index that can be used to directly compare different models (or data generation processes). The [Bayes factor](https://easystats.github.io/bayestestR/articles/bayes_factors.html) is a ratio that informs us by how much more (or less) likely the observed data are under two compared models - usually a model *with* versus a model *without* the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., **0**) or an interval), the Bayes factor could be used both in the context of effect existence and significance. In general, a Bayes factor greater than 1 is taken as evidence in favour of one of the model (in the nominator), and a Bayes factor smaller than 1 is taken as evidence in favour of the other model (in the denominator). Several rules of thumb exist to help the interpretation (see [here](https://easystats.github.io/effectsize/reference/interpret_bf.html)), with **\> 3** being one common threshold to categorize non-anecdotal evidence. ### **Template Sentence** When reporting Bayes factors (BF), one can use the following sentence: > "There is *moderate evidence* in favour of an *absence* of effect of *x* (BF = *BF*)." # Suggestions If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request. bayestestR/vignettes/indicesExistenceComparison.Rmd0000644000175000017500000000367014054321351022532 0ustar nileshnilesh--- title: "In-Depth 2: Comparison of Indices of Effect Existence and Significance" output: rmarkdown::html_vignette: toc: false toc_depth: 3 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 2: Comparison of Indices of Effect Existence and Significance} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75, out.width = "100%") options(digits=2) ``` This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Indices of Effect *Existence* and *Significance* in the Bayesian Framework A comparison of different Bayesian indices (*pd*, *BFs*, ROPE etc.) is accessible [**here**](https://doi.org/10.3389/fpsyg.2019.02767). But, in case you don't wish to read the full article, the following table summarizes the key takeaways! ```{r, echo=FALSE} knitr::include_graphics("https://www.frontiersin.org/files/Articles/498833/fpsyg-10-02767-HTML/image_m/fpsyg-10-02767-t003.jpg") ``` # Suggestions If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request.bayestestR/vignettes/region_of_practical_equivalence.Rmd0000644000175000017500000002421014023526535023563 0ustar nileshnilesh--- title: "Region of Practical Equivalence (ROPE)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, rope, equivalence test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Region of Practical Equivalence (ROPE)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *ROPE?* Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against "zero". Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as **"practically no effect"** (*i.e.*, a negligible magnitude) is sufficient. This range is called the **region of practical equivalence (ROPE)**. Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are **equivalent to the null** value for practical purposes [@kruschke2010believe; @kruschke2012time; @kruschke2014doing]. # Equivalence Test The ROPE, being a region corresponding to a "null" hypothesis, is used for the **equivalence test**, to test whether a parameter is **significant** (in the sense of *important* enough to be cared about). This test is usually based on the **"HDI+ROPE decision rule"** [@kruschke2014doing; @kruschke2018bayesian] to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (*i.e.*, a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. # Credible interval in ROPE *vs* full posterior in ROPE Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95\% HDI that falls within the ROPE as a decision rule. However, as the 89\% HDI [is considered a better choice](https://easystats.github.io/bayestestR/articles/credible_interval.html) [@kruschke2014doing; @mcelreath2014rethinking; @mcelreath2018statistical], `bayestestR` provides by default the percentage of the 89\% HDI that falls within the ROPE. However, [*simulation studies data*](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the **full ROPE** percentage (by setting `ci = 1`), which will return the portion of the entire posterior distribution in the ROPE. # What percentage in ROPE to accept or to reject? If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, *i.e.*, all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected. If the **full ROPE** is used (*i.e.*, 100\% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5\% or greater than 97.5\%. Desirable results are low proportions inside the ROPE (the closer to zero the better). # How to define the ROPE range? Kruschke (2018) suggests that the ROPE could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988). - For **linear models (lm)**, this can be generalised to: $$[-0.1*SD_{y}, 0.1*SD_{y}]$$. - For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: $$\pi/\sqrt{3}$$ (see [the **effectsize** package](https://easystats.github.io/effectsize/articles/convert.html#from-odds-ratios), resulting in a range of `-0.18` to `-0.18`. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. - For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). - For **correlations**, `-0.05, 0.05` is used, *i.e.*, half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. - For all other models, `-0.1, 0.1` is used to determine the ROPE limits, but it is strongly advised to specify it manually. # Sensitivity to parameter's scale It is important to consider **the unit (*i.e.*, the scale) of the predictors** when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the [`pd`](https://easystats.github.io/bayestestR/articles/probability_of_direction.html)), the percentage in **ROPE** depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. For instance, if we consider a simple regression `growth ~ time`, modelling the development of **Wookies babies**, a negligible change (the ROPE) is less than **54 cm**. If our `time` variable is **expressed in days**, we will find that the coefficient (representing the growth **by day**) is of about **10 cm** (*the median of the posterior of the coefficient is 10*). Which we would consider as **negligible**. However, if we decide to express the `time` variable **in years**, the coefficient will be scaled by this transformation (as it will now represent the growth **by year**). The coefficient will now be around **3550** cm (`10 * 355`), which we would now consider as **significant**. ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) library(bayestestR) library(see) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` We can see that the *pd* and the percentage in ROPE of the linear relationship between **Sepal.Length** and **Sepal.Width** are respectively of about `92.95%` and `15.95%`, corresponding to an **uncertain** and **not significant** effect. What happen if we scale our predictor? ```{r message=FALSE, warning=FALSE, eval=FALSE} data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` As you can see, by simply dividing the predictor by 100, we **drastically** changed the conclusion related to the **percentage in ROPE** (which became very close to `0`): the effect could now be **interpreted as being significant**. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (*e.g.*, what coefficient would correspond to a small effect?), and when reporting or reading ROPE results. # Multicollinearity: Non-independent covariates When **parameters show strong correlations**, *i.e.*, when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate [@kruschke2014doing]. The `equivalence_test()` and `rope()` functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection [@piironen2017comparison]. bayestestR/vignettes/probability_of_direction.Rmd0000644000175000017500000002674014023526535022267 0ustar nileshnilesh--- title: "Probability of Direction (pd)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Probability of Direction (pd)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = "") knitr::opts_chunk$set(comment = ">") options(digits = 2) set.seed(333) ``` # What is the *pd?* The **Probability of Direction (pd)** is an index of **effect existence**, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (*i.e.*, is positive or negative). Beyond its **simplicity of interpretation, understanding and computation**, this index also presents other interesting properties: - It is **independent from the model**: It is solely based on the posterior distributions and does not require any additional information from the data or the model. - It is **robust** to the scale of both the response variable and the predictors. - It is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of "significance"), which is better achieved through other indices such as the [ROPE percentage](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). In fact, indices of significance and existence are totally independent. You can have an effect with a *pd* of **99.99\%**, for which the whole posterior distribution is concentrated within the `[0.0001, 0.0002]` range. In this case, the effect is **positive with a high certainty**, but also **not significant** (*i.e.*, very small). Indices of effect existence, such as the *pd*, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect's direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance. # Relationship with the *p*-value In most cases, it seems that the *pd* has a direct correspondence with the frequentist **one-sided *p*-value** through the formula: $$p_{one-sided} = 1-p_d$$ Similarly, the **two-sided *p*-value** (the most commonly reported one) is equivalent through the formula: $$p_{two-sided} = 2*(1-p_d)$$ Thus, the two-sided *p*-value of respectively **.1**, **.05**, **.01** and **.001** would correspond approximately to a *pd* of **95\%**, **97.5\%**, **99.5\%** and **99.95\%** . ```{r message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'} library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate( effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100 ) %>% ggplot(aes(x = p_direction, y = p_value, color = effect_existence)) + geom_point2(alpha = 0.1) + geom_segment(aes(x = 95, y = Inf, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = -Inf, y = 0.1, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = 97.5, y = Inf, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + geom_segment(aes(x = -Inf, y = 0.05, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits = 2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values = c("Presence of true effect" = "green", "Absence of true effect" = "red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ``` > **But if it's like the *p*-value, it must be bad because the *p*-value is bad [*insert reference to the reproducibility crisis*].** In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the *p*-value is an intrinsically bad or wrong. Instead, it is its **misuse**, **misunderstanding** and **misinterpretation** that fuels the decay of the situation. For instance, the fact that the **pd** is highly correlated with the *p*-value suggests that the latter is more an index of effect *existence* than *significance* (*i.e.*, "worth of interest"). The Bayesian version, the **pd**, has an intuitive meaning and makes obvious the fact that **all thresholds are arbitrary**. Additionally, the **mathematical and interpretative transparency** of the **pd**, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist *p*-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework. # Methods of computation The most **simple and direct** way to compute the **pd** is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on [**density estimation**](https://easystats.github.io/bayestestR/reference/estimate_density.html). It starts by estimating the density function (for which many methods are available), and then computing the [**area under the curve**](https://easystats.github.io/bayestestR/reference/area_under_curve.html) (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function. # Methods comparison Let's compare the 4 available methods, the **direct** method and 3 **density-based** methods differing by their density estimation algorithm (see [`estimate_density`](https://easystats.github.io/bayestestR/reference/estimate_density.html)). ## Correlation Let's start by testing the proximity and similarity of the results obtained by different methods. ```{r message=FALSE, warning=FALSE, fig.align='center'} library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for (the_mean in runif(25, 0, 4)) { for (the_sd in runif(25, 0.5, 4)) { x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind( data, data.frame( "direct" = pd(x), "kernel" = pd(x, method = "kernel"), "logspline" = pd(x, method = "logspline"), "KernSmooth" = pd(x, method = "KernSmooth") ) ) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ``` All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much. ## Accuracy To test the accuracy of each methods, we will start by computing the **direct *pd*** from a very dense distribution (with a large amount of observations). This will be our baseline, or "true" *pd*. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the *pd* with different methods. The closer this estimate is from the reference one, the better. ```{r message=FALSE, warning=FALSE} data <- data.frame() for (i in 1:25) { the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for (j in 1:25) { sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind( data, data.frame( "sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method = "kernel") - true_pd, "logspline" = pd(subsample, method = "logspline") - true_pd, "KernSmooth" = pd(subsample, method = "KernSmooth") - true_pd ) ) } } data <- as.data.frame(sapply(data, as.numeric)) ``` ```{r message=FALSE, warning=FALSE, fig.align='center'} library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x = sample_size, y = Distance, color = Method, fill = Method)) + geom_point(alpha = 0.3, stroke = 0, shape = 16) + geom_smooth(alpha = 0.2) + geom_hline(yintercept = 0) + theme_classic() + xlab("\nDistribution Size") ``` The "Kernel" based density methods seems to consistently underestimate the *pd*. Interestingly, the "direct" method appears as being the more reliable, even in the case of small number of posterior draws. ## Can the pd be 100\%? `p = 0.000` is coined as one of the term to avoid when reporting results [@lilienfeld2015fifty], even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the `p = 0.000` returned by software is due to approximations related, among other, to finite memory hardware. One could apply this rationale for the *pd*: since all data points have a non-null probability density, then the *pd* (a particular portion of the probability density) can *never* be 100\%. While this is an entirely valid point, people using the *direct* method might argue that their *pd* is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which `pd = 100%` is a valid statement. bayestestR/vignettes/bayes_factors.Rmd0000644000175000017500000012634414133142440020037 0ustar nileshnilesh--- title: "Bayes Factors" output: rmarkdown::html_vignette: toc: true toc_depth: 2 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, bayes factors] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Bayes Factors} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Retrieved from [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r setup, include=FALSE} library(knitr) options(knitr.kable.NA = "", digits = 2) knitr::opts_chunk$set( echo = TRUE, comment = ">", out.width = "100%", message = FALSE, warning = FALSE, dpi = 150 ) pkgs <- c( "rstanarm", "BayesFactor", "emmeans", "logspline", "lme4", "ggplot2", "see", "insight", "emmeans", "knitr", "effectsize", "bayestestR" ) if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set(eval = FALSE) } set.seed(4) if (require("ggplot2") && require("see")) { theme_set(theme_modern()) } ``` The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about the **Bayes factor**. In short, one school of thought (e.g., the *Amsterdam school*, led by [E. J. Wagenmakers](https://www.bayesianspectacles.org/)) advocate its use, and emphasize its qualities as a statistical index, while another point to its limits and prefer, instead, the precise description of posterior distributions (using [CIs](https://easystats.github.io/bayestestR/reference/hdi.html), [ROPEs](https://easystats.github.io/bayestestR/reference/rope.html), etc.). The `bayestestR` package does **not** take a side in this debate, and offers tools to carry out analysis irrespective of the school you subscribe to. Instead, it strongly supports the notion of an *informed choice*: **discover the methods, learn about them, understand them, try them, and decide for yourself**. Having said that, here's an introduction to Bayes factors :) # The Bayes Factor **Bayes Factors (BFs) are indices of *relative* evidence of one "model" over another**. In their role as a hypothesis testing index, they are to Bayesian framework what a $p$-value is to the **classical/frequentist framework**. In significance-based testing, $p$-values are used to assess how unlikely are the observed data if the **null hypothesis** were true, while in the **Bayesian model selection framework**, Bayes factors assess evidence for different models, each model corresponding to a specific hypothesis. According to Bayes' theorem, we can update prior probabilities of some model $M$ ($P(M)$) to posterior probabilities ($P(M|D)$) after observing some datum $D$ by accounting for the probability of observing that datum given the model ($P(D|M)$, also known as the *likelihood*): $$ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} $$ Using this equation, we can compare the probability-odds of two models: $$ \underbrace{\frac{P(M_1|D)}{P(M_2|D)}}_{\text{Posterior Odds}} = \underbrace{\frac{P(D|M_1)}{P(D|M_2)}}_{\text{Likelihood Ratio}} \times \underbrace{\frac{P(M_1)}{P(M_2)}}_{\text{Prior Odds}} $$ Where the *likelihood ratio* (the middle term) is the *Bayes factor* - it is the ***factor*** by which some **prior odds** have been updated after observing the data to **posterior odds**. Thus, Bayes factors can be calculated in two ways: - As a ratio quantifying **the relative probability of the observed data under each of the two models**. (In some contexts, these probabilities are also called *marginal likelihoods*.) $$ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} $$ - As **the degree of shift in prior beliefs** about the relative credibility of two models (since they can be computed by dividing posterior odds by prior odds). $$ BF_{12}=\frac{Posterior~Odds_{12}}{Prior~Odds_{12}} $$ Here we provide functions for computing Bayes factors in two different contexts: - **testing single parameters (coefficients) within a model** - **comparing statistical models themselves** # Testing Models' Parameters with Bayes Factors {#bayesfactor_parameters} A **Bayes factor for a single parameter** can be used to answer the question: > "Given the observed data, has the null hypothesis of an absence of an effect become more or less credible?" ```{r deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/bayestestR/raw/master/man/figures/deathsticks.jpg") ``` Let's use the Students' (1908) Sleep data set (`data("sleep")`). The data comes from a study in which participants were administered a drug and the researchers assessed the extra hours of sleep that participants slept afterwards. We will try answering the following research question using Bayes factors: > **Given the observed data, has the hypothesis that the drug (the effect of `group`) has no effect on the numbers of hours of extra sleep (variable `extra`) become more of less credible?** ```{r sleep_boxplot, echo=FALSE} library(ggplot2) ggplot(sleep, aes(x = group, y = extra, fill = group)) + geom_boxplot() + theme_classic() + theme(legend.position = "none") ``` The **boxplot** suggests that the second group has a higher number of hours of extra sleep. *By how much?* Let's fit a simple [Bayesian linear model](https://easystats.github.io/bayestestR/articles/example1.html), with a prior of $b_{group} \sim N(0, 3)$ (i.e. the prior follows a Gaussian/normal distribution with $mean = 0$ and $SD = 3$), using `rstanarm` package: ```{r rstanarm_model, eval = FALSE} set.seed(123) library(rstanarm) model <- stan_glm( formula = extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE) ) ``` ```{r, echo=FALSE} model <- stan_glm( formula = extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0 ) ``` ### Testing against a null-*region* One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be *practically* equivalent to the null [@kruschke2010believe]. In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug's effect falling *within this null-region*, and the prior probability of the drug's effect falling *outside the null-region* to get our *prior odds*. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as: $$ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} $$ Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 3 hours, our priors would look like this: ```{r, echo=FALSE} null <- c(-1, 1) xrange <- c(-10, 10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ``` and the prior odds would be 2.2. By looking at the posterior distribution, can now compute the posterior probability of the drug's effect falling *within the null-region*, and the posterior probability of the drug's effect falling *outside the null-region* to get our *posterior odds*: $$ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} $$ ```{r rstanarm_fit, echo=FALSE} library(bayestestR) model_prior <- unupdate(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals, f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ``` We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2, which seems to favor **the effect being non-null**. **But**, does this mean the data support the alternative over the null? Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here! Let's compute the Bayes factor as the change from the prior odds to the posterior odds: $BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9$! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has *overall* shifted closer to the null interval, making the values in the null interval more probable! [see *Non-overlapping Hypotheses* in @morey2011bayesinterval] All of this can be achieved with the function `bayesfactor_parameters()`, which computes a Bayes factor for each of the model's parameters: ```{r, eval=FALSE} My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) My_first_BF ``` ```{r, echo=FALSE} print(My_first_BF) ``` We can also plot using the `see` package: ```{r} library(see) plot(My_first_BF) ``` Note that **interpretation guides** for Bayes factors can be found in the `effectsize` package: ```{r} effectsize::interpret_bf(exp(My_first_BF$log_BF[2]), include_value = TRUE) ``` ### Testing against the *point*-null (0) > **What if we don't know what region would be practically equivalent to 0?** Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the density of the null value between the two distributions.^[Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null: > "[...] the Bayes factor for $H_0$ versus $H_1$ could be obtained by analytically integrating out the model parameter $\theta$. However, the Bayes factor may likewise be obtained by only considering $H_1$, and dividing the height of the posterior for $\theta$ by the height of the prior for $\theta$, at the point of interest." [@wagenmakers2010bayesian] ```{r, eval=FALSE} My_second_BF <- bayesfactor_parameters(model, null = 0) My_second_BF ``` ```{r, echo=FALSE} My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0 ) print(My_second_BF) ``` ```{r} plot(My_second_BF) ``` ### Directional hypotheses We can also compute Bayes factors for directional hypotheses ("one sided"), if we have a prior hypotheses about the direction of the effect. This can be done by setting an *order restriction* on the prior distribution (which results in an order restriction on the posterior distribution) of the alternative [@morey2014simple]. For example, if we have a prior hypothesis that *the drug has a positive effect on the number of sleep hours*, the alternative will be restricted to the region to the right of the null (point or interval): ```{r savagedickey_one_sided, eval=FALSE} test_group2_right <- bayesfactor_parameters(model, direction = ">") test_group2_right ``` ```{r prior_n_post_plot_one_sided, echo=FALSE} test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ``` ```{r} plot(test_group2_right) ``` As we can see, given that we have an *a priori* assumption about the direction of the effect (that the effect is positive), **the presence of an effect is 2.8 times more likely than the absence of an effect**, given the observed data (or that the data are 2.8 time more probable under $H_1$ than $H_0$). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite [weak evidence](https://easystats.github.io/effectsize/reference/interpret_bf.html)). Thanks to the flexibility of Bayesian framework, it is also possible to compute a Bayes factor for **dividing** hypotheses - that is, for a null and alternative that are *complementary*, opposing one-sided hypotheses [@morey2014simple]. For example, above we compared an alternative of $H_A$: *the drug has a positive effects* to the null $H_0$: *the drug has no effect*. But we can also compare instead the same alternative to its *complementary* hypothesis: $H_{-A}$: *the drug has a negative effects*. ```{r inteval_div, eval=FALSE} test_group2_dividing <- bayesfactor_parameters(model, null = c(-Inf, 0)) test_group2_dividing ``` ```{r inteval_div2, echo=FALSE} test_group2_dividing <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = c(-Inf, 0) ) print(test_group2_dividing) ``` ```{r} plot(test_group2_dividing) ``` We can see that this test produces even stronger (more conclusive) evidence than the one-sided vs. point-null test! And indeed, as a rule of thumb, the more specific the two hypotheses are, and the more distinct they are from one another, the more *power* our Bayes factor has! ^[For more, see [this talk by Richard D. Morey, minute 48](https://philstatwars.files.wordpress.com/2020/09/richard_presentation.mp4)] Thanks to the transitivity of Bayes factors, we can also use `bayesfactor_parameters()` to compare even more types of hypotheses, with some trickery. For example: $$ \underbrace{BF_{0 **Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?** For example, we've seen that the point null has become somewhat less credible after observing the data, but we might also ask which values have **gained** credibility given the observed data?. The resulting range of values is called **the support interval** as it indicates which values are supported by the data [@wagenmakers2018SI]. We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities. In `bayestestR`, this can be achieved with the `si()` function: ```{r} my_first_si <- si( posterior = data.frame(group2 = posterior), prior = data.frame(group2 = prior), BF = 1 ) print(my_first_si) ``` The argument `BF = 1` indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all). Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased): ```{r} plot(my_first_si) ``` We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we've already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor: > "The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level $\alpha$. For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against." [@wagenmakers2018SI] Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent: - A $BF = 1$ contains values whose credibility has merely not decreased by observing the data. - A $BF > 1$ contains values who received more impressive support from the data. - A $BF < 1$ contains values whose credibility has *not* been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than $1/BF$ in support of the alternative. # Comparing Models using Bayes Factors {#bayesfactor_models} Bayes factors can also be used to compare statistical **models**. In this statistical context, they answer the following question: > **Under which model are the observed data more probable?** In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the **relative** evidence for one model over the other. Let's use Bayes factors for model comparison to find a model that best describes the length of an iris' sepal using the `iris` data set. ### For Bayesian models (`brms` and `rstanarm`) **Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:** - `brmsfit` models **must** have been fitted with `save_pars = save_pars(all = TRUE)` - `stanreg` models **must** have been fitted with a defined `diagnostic_file`. Let's first fit 5 Bayesian regressions with `brms` to predict `Sepal.Length`: ```{r brms_disp, eval = FALSE} library(brms) # intercept only model m0 <- brm(Sepal.Length ~ 1, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma"), save_pars = save_pars(all = TRUE), backend = "rstan") # Petal.Length only m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length"), save_pars = save_pars(all = TRUE)) # Species only m2 <- brm(Sepal.Length ~ Species, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), save_pars = save_pars(all = TRUE)) # Species + Petal.Length model m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), save_pars = save_pars(all = TRUE)) # full interactive model m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")) + set_prior("normal(0, 2)", coef = c("Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length")), save_pars = save_pars(all = TRUE)) ``` We can now compare these models with the `bayesfactor_models()` function, using the `denominator` argument to specify the model against which the rest of the models will be compared (in this case, the intercept-only model): ```{r brms_models_disp, eval = FALSE} library(bayestestR) comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) comparison ``` ```{r, echo = FALSE} comparison <- structure( list(Model = c("Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1"), log_BF = c(101.556419030653, 64.2903334815192, 122.864721399001, 119.712908243647, 0)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c("m1", "m2", "m3", "m4", "m0"), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ``` We can see that the `Species + Petal.Length` model is the best model - with $BF=2\times 10^{53}$ compared to the null (intercept only). Due to the transitive property of Bayes factors, we can easily change the reference model to the full `Species * Petal.Length` model: ```{r update_models1} update(comparison, reference = 4) ``` As we can see, the `Species + Petal.Length` model is also favored compared to the `Species * Petal.Length` model, though to several orders of magnitude less - is is only supported 23.38 times more!) We can also change the reference model to the `Species` model: ```{r update_models2} update(comparison, reference = 2) ``` Notice that, in the Bayesian framework the compared models *do not* need to be nested models, as happened here when we compared the `Petal.Length`-only model to the `Species`-only model (something that cannot be done in the frequentist framework, where compared models must be nested in one another). We can also get a matrix of Bayes factors of all the pairwise model comparisons: ```{r} as.matrix(comparison) ``` **NOTE:** In order to correctly and precisely estimate Bayes Factors, you always need the 4 P's: **P**roper **P**riors ^[[Robert, 2016](https://doi.org/10.1016/j.jmp.2015.08.002); [Kass & Raftery, 1993](https://doi.org/10.1080/01621459.1995.10476572); [Fernández, Ley, & Steel, 2001](https://doi.org/10.1016/S0304-4076(00)00076-2)], and a **P**lentiful **P**osterior ^[[Gronau, Singmann, & Wagenmakers, 2017](https://arxiv.org/abs/1710.08162)]. ### For Frequentist models via the BIC approximation It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models [@wagenmakers2007practical]. Let's try it out on some **linear mixed-effects models**: ```{r lme4_models} library(lme4) # define models with increasing complexity m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) # model comparison bayesfactor_models(m1, m2, m3, m4, denominator = m0) ``` ### Order restricted models {#bayesfactor_restricted} As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris' sepal from the length of its petal, as well as from its species, with priors: - $b_{petal} \sim N(0,2)$ - $b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)$ ```{r} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0 ) ``` These priors are **unrestricted** - that is, **all values** between $-\infty$ and $\infty$ of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, *a priori* the ordering of the parameters relating to the iris species can have any ordering, such that *a priori* setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa! Does it make sense to let our priors cover all of these possibilities? That depends on our *prior* knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be *negatively* associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as **restricted** priors [@morey_2015_blog; @morey2011bayesinterval]: 1. The novice botanist: $b_{petal} > 0$ 2. The expert botanist: $b_{versicolors} > 0\ \&\ b_{virginica} > 0$ By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with `bayesfactor_restricted()`, that compute a Bayes factor for these restricted model vs the unrestricted model. Let's first specify these restrictions as logical conditions: ```{r} botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ``` Let's test these hypotheses: ```{r} model_prior <- unupdate(iris_model) botanist_BFs <- bayesfactor_restricted( posterior = iris_model, prior = model_prior, hypothesis = botanist_hypotheses ) print(botanist_BFs) ``` We can see that the novice botanist's hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction. What about our expert botanist? He seems to have failed miserably, with a BF favoring the *unrestricted* model many many times over. How is this possible? It seems that when *controlling for petal length*, versicolor and virginica actually have shorter sepals! ```{r plot_iris, echo=FALSE} ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ``` Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so: $$ BF_{\text{restricted vs. NULL}} = \frac {BF_{\text{restricted vs. un-restricted}}} {BF_{\text{un-restricted vs NULL}}} $$ **Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (*a priori*) hypotheses, and should not be used for any post hoc comparisons [@morey_2015_blog].** **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* appendix below. # Bayesian Model Averaging In the previous section, we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider, or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases, we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models. ### Inclusion Bayes factors {#bayesfactor_inclusion} Inclusion Bayes factors answer the question: > **Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?** In other words, on average, are models with predictor $X$ more likely to have produced the observed data than models without predictor $X$?^[A model without predictor $X$ can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.] Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the *prior inclusion probability*), and of all models that do not include that predictor (the *prior exclusion probability*). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models' probabilities to obtain the *posterior inclusion probability* and the *posterior exclusion probability*. Once again, the change from prior inclusion odds to the posterior inclusion odds is the **Inclusion Bayes factor** ["$BF_{Inclusion}$"; @clyde2011bayesian]. Lets use the `brms` example from above: ```{r inclusion_brms} bayesfactor_inclusion(comparison) ``` If we examine the interaction term's inclusion Bayes factor, we can see that across all 5 models, a model with the term is *on average* (1/0.171) 5.84 times less supported than a model without the term. Note that `Species`, a factor represented in the model with several parameters, gets a *single* Bayes factor - inclusion Bayes factors are given **per predictor**! We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effects from which the interaction predictor is comprised (see explanation for why you might want to do this [here](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp)). ```{r inclusion_brms2} bayesfactor_inclusion(comparison, match_models = TRUE) ``` ### Comparison with JASP `bayesfactor_inclusion()` is meant to provide Bayes Factors per predictor, similar to JASP's *Effects* option. Let's compare the two: 1. **Across all models**: ```{r JASP_all} library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose * supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ``` ```{r JASP_all_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.jpg") ``` 2. **Across matched models**: ```{r JASP_matched} bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ``` ```{r JASP_matched_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.jpg") ``` 3. **With Nuisance Effects**: We'll add `dose` to the null model in JASP, and do the same in `R`: ```{r JASP_Nuisance} BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4] / BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ``` ```{r JASP_Nuisance_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.jpg") ``` ## Averaging posteriors {#weighted_posteriors} Similar to how we can average evidence for a predictor across models, we can also average the **posterior estimate** across models. This is useful in situations where Bayes factors seem to support a null effect, yet the *HDI* for the alternative excludes the null value (also see `si()` described above). For example, looking at Motor *Trend Car Road Tests* (`data(mtcars)`), we would naturally predict miles/gallon (`mpg`) from transition type (`am`) and weight (`wt`), but what about number of carburetors (`carb`)? Is this a good predictor? We can determine this by comparing the following models: ```{r} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10, 10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0 ) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10, 10, 20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0 ) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF_carb ``` It seems that the model without `carb` as a predictor is $1/BF=1.2$ times more likely than the model *with* `carb` as a predictor. We might then assume that in the latter model, the `HDI` will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case: ```{r} hdi(mod_carb, ci = .95) ``` How can this be? By estimating the HDI of the effect for `carb` in the full model, we are acting under the assumption that this model is correct. However, as we've just seen, both models are practically tied. If this is the case **why limit our estimation of the effect just to one model?** [@van2019cautionary]. Using Bayesian Model Averaging, we can combine the posteriors samples from several models, weighted by the models' marginal likelihood (done via the `bayesfactor_models()` function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI. In `bayestestR`, we can do this with the `weighted_posteriors()` function: ```{r} BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ``` We can see that across both models under consideration, the posterior of the `carb` effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now **the HDI does contain 0**. Thus we have resolved the conflict between the Bayes factor and the HDI [@rouder2018bayesian]! **Note**: Parameters might play different roles across different models. For example, the parameter `A` plays a different role in the model `Y ~ A + B` (where it is a *main* effect) than it does in the model `Y ~ A + B + A:B` (where it is a *simple* effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via `contr.sum` or orthonormal coding via [`contr.orthonorm`](https://easystats.github.io/bayestestR/reference/contr.orthonorm.html) for factors) can in some cases reduce this issue. # Appendices ## Testing contrasts (with `emmeans` / `modelbased`) Besides testing parameter `bayesfactor_parameters()` can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of `bayesfactor_parameters()` + [**`emmeans`**](https://cran.r-project.org/package=emmeans) to [test Bayesian contrasts](https://easystats.github.io/blog/posts/bayestestr_emmeans/). For example, in the `sleep` example from above, we can estimate the group means and the difference between them: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(emmeans) (group_diff <- emmeans(model, pairwise ~ group)) # pass the original model via prior bayesfactor_parameters(group_diff, prior = model) ``` That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way! We can also use the `easystats`' [**`modelbased`**](https://cran.r-project.org/package=modelbased) package to compute Bayes factors for contrasts: ```{r, echo=FALSE} set.seed(1) ``` ```{r, eval=FALSE} library(modelbased) estimate_contrasts(model, test = "bf", bf_prior = model) ``` **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* section below. ## Specifying correct priors for factors {#contr_bayes} This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.orthonorm`) [as outlined in @rouder2012default, section 7.2]. **Special care should be taken when working with factors with 3 or more levels**. ### Contrasts (and marginal means) The *effects* factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all *a priori* differences to have the same distribution, but... For our example, we will be test all ***prior*** pairwise differences between the 3 species in the `iris` dataset. ```{r} df <- iris contrasts(df$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_sum <- pairs(emmeans(fit_sum, ~Species))) ggplot(stack(insight::get_parameters(pairs_sum)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` Notice that, though the prior estimate for all 3 pairwise contrasts is ~0, the scale or the HDI is much narrower for the prior of the `setosa - versicolor` contrast! **What happened???** This is caused by an inherent bias in the priors introduced by the *effects* coding (it's even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect's parameters). **And since it affects the priors, this bias will also bias the Bayes factors over / understating evidence for some contrasts over others!** The solution is to use *orthonormal* factor coding, a-la `contr.orthonorm`, which can either specify this factor coding per-factor: ```{r} contrasts(df$Species) <- contr.orthonorm ``` Or you can set it globally: ```{r, eval=FALSE} options(contrasts = c("contr.orthonorm", "contr.poly")) ``` Let's again estimate the ***prior*** differences: ```{r} fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_bayes <- pairs(emmeans(fit_bayes, ~Species))) ggplot(stack(insight::get_parameters(pairs_bayes)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that using this coding scheme, we have equal priors on all pairwise contrasts. There are other solutions to this problem of priors. You can read about them in [Solomon Kurz's blog post](https://solomonkurz.netlify.app/post/2020-12-09-multilevel-models-and-the-index-variable-approach/). ### Order restrictions This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the *a priori* probability of obtaining the order A > C > B is 1/6 (reach back to *intro to stats* year 1), but... For our example, we will be interested in the following order restrictions in the `iris` dataset (each line is a separate restriction): ```{r} hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ``` With the default factor coding, this looks like this: ```{r, eval=FALSE} contrasts(df$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian() ) em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(df$Species)[, ] <- contr.sum(3) fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ***What happened???*** 1. The comparison of 2 levels all have a prior of ~0.5, as expected. 2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** Again, this is solved by using the *orthonormal* factor coding (from above). ```{r, eval=FALSE} contrasts(df$Species) <- contr.orthonorm fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian() ) em_bayes <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(df$Species)[, ] <- contr.orthonorm(3) fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_bayes <- emmeans(fit_bayes, ~Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) ``` ### Conclusion When comparing the results from the two factor coding schemes, we find: 1. In both cases, the estimated (posterior) means are quite similar (if not identical). 2. The priors and Bayes factors differ between the two schemes. 3. Only with `contr.orthonorm`, the prior distribution of the difference or the order of 3 (or more) means is balanced. # References bayestestR/vignettes/mediation.Rmd0000644000175000017500000001401014054321374017155 0ustar nileshnilesh--- title: "Mediation Analysis using Bayesian Regression Models" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, mediation] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Mediation Analysis using Bayesian Regression Models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r, SETTINGS-knitr, echo = FALSE, warning = FALSE, message = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dev = "png", fig.width = 7, fig.height = 5, message = FALSE, warning = FALSE ) options(width = 800) if (!requireNamespace("mediation", quietly = TRUE) || !requireNamespace("httr", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("insight", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ``` This vignettes demonstrates the `mediation()`-function. Before we start, we fit some models, including a mediation-object from the _mediation_-package and a structural equation modelling approach with the _lavaan_-package, both of which we use for comparison with _brms_ and _rstanarm_. ## Mediation Analysis in brms and rstanarm ```{r} library(bayestestR) library(mediation) library(brms) library(rstanarm) # load sample data data(jobs) set.seed(123) # linear models, for mediation analysis b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) # mediation analysis, for comparison with brms m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") ``` ```{r eval=FALSE} # Fit Bayesian mediation model in brms f1 <- bf(job_seek ~ treat + econ_hard + sex + age) f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4) ``` ```{r echo=FALSE} m2 <- insight::download_model("brms_mv_6") ``` ```{r eval=FALSE} # Fit Bayesian mediation model in rstanarm m3 <- stan_mvmer( list(job_seek ~ treat + econ_hard + sex + age + (1 | occp), depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp)), data = jobs, cores = 4, refresh = 0 ) ``` ```{r echo=FALSE} m3 <- insight::download_model("stanmvreg_2") ``` `mediation()` is a summary function, especially for mediation analysis, i.e. for multivariate response models with casual mediation effects. In the models `m2` and `m3`, `treat` is the treatment effect and `job_seek` is the mediator effect. For the *brms* model (`m2`), `f1` describes the mediator model and `f2` describes the outcome model. This is similar for the *rstanarm* model. `mediation()` returns a data frame with information on the _direct effect_ (median value of posterior samples from treatment of the outcome model), _mediator effect_ (median value of posterior samples from mediator of the outcome model), _indirect effect_ (median value of the multiplication of the posterior samples from mediator of the outcome model and the posterior samples from treatment of the mediation model) and the _total effect_ (median value of sums of posterior samples used for the direct and indirect effect). The _proportion mediated_ is the indirect effect divided by the total effect. The simplest call just needs the model-object. ```{r, message=TRUE} # for brms mediation(m2) # for rstanarm mediation(m3) ``` Typically, `mediation()` finds the treatment and mediator variables automatically. If this does not work, use the `treatment` and `mediator` arguments to specify the related variable names. For all values, the 89% credible intervals are calculated by default. Use `ci` to calculate a different interval. ## Comparison to the mediation package Here is a comparison with the _mediation_ package. Note that the `summary()`-output of the _mediation_ package shows the indirect effect first, followed by the direct effect. ```{r} summary(m1) mediation(m2, ci = .95) mediation(m3, ci = .95) ``` If you want to calculate mean instead of median values from the posterior samples, use the `centrality`-argument. Furthermore, there is a `print()`-method, which allows to print more digits. ```{r, message=TRUE} m <- mediation(m2, centrality = "mean", ci = .95) print(m, digits = 4) ``` As you can see, the results are similar to what the _mediation_ package produces for non-Bayesian models. ## Comparison to SEM from the lavaan package Finally, we also compare the results to a SEM model, using *lavaan*. This example should demonstrate how to "translate" the same model in different packages or modeling approached. ```{r} library(lavaan) data(jobs) set.seed(1234) model <- ' # direct effects depress2 ~ c1*treat + c2*econ_hard + c3*sex + c4*age + b*job_seek # mediation job_seek ~ a1*treat + a2*econ_hard + a3*sex + a4*age # indirect effects (a*b) indirect_treat := a1*b indirect_econ_hard := a2*b indirect_sex := a3*b indirect_age := a4*b # total effects total_treat := c1 + (a1*b) total_econ_hard := c2 + (a2*b) total_sex := c3 + (a3*b) total_age := c4 + (a4*b) ' m4 <- sem(model, data = jobs) summary(m4) # just to have the numbers right at hand and you don't need to scroll up mediation(m2, ci = .95) ``` The summary output from *lavaan* is longer, but we can find the related numbers quite easily: - the _direct effect_ of treatment is `treat (c1)`, which is `-0.040` - the _indirect effect_ of treatment is `indirect_treat`, which is `-0.016` - the _mediator effect_ of job_seek is `job_seek (b)`, which is `-0.240` - the _total effect_ is `total_treat`, which is `-0.056` bayestestR/vignettes/example3.Rmd0000644000175000017500000000347714054321205016732 0ustar nileshnilesh--- title: "3. Become a Bayesian master" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{3. Become a Bayesian master} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits=2) set.seed(333) ``` ```{r echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") ``` ## Mixed Models TO BE CONTINUED. ### Priors TO BE CONTINUED. ## What's next? The journey to become a true Bayesian master is not yet over. It is merely the beginning. It is now time to leave the `bayestestR` universe and apply the Bayesian framework in a variety of other statistical contexts: - [**Marginal means**](https://easystats.github.io/modelbased/articles/estimate_means.html) - [**Contrast analysis**](https://easystats.github.io/modelbased/articles/estimate_contrasts.html) - [**Testing Contrasts from Bayesian Models with 'emmeans' and 'bayestestR'**](https://easystats.github.io/blog/posts/bayestestr_emmeans/) bayestestR/vignettes/example1.Rmd0000644000175000017500000004670214133140641016727 0ustar nileshnilesh--- title: "1. Initiation to Bayesian models" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{1. Initiation to Bayesian models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r , include=FALSE} library(knitr) library(insight) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x * 100, digits = digits, ...), "%") } ``` Now that you've read the [**Get started**](https://easystats.github.io/bayestestR/articles/bayestestR.html) section, let's dive in the **subtleties of Bayesian modelling using R**. ## Loading the packages Once you've [installed](https://easystats.github.io/bayestestR/articles/bayestestR.html#bayestestr-installation) the necessary packages, we can load `rstanarm` (to fit the models), `bayestestR` (to compute useful indices), and `insight` (to access the parameters). ```{r } library(rstanarm) library(bayestestR) library(insight) ``` ## Simple linear (regression) model We will begin by conducting a simple linear regression to test the relationship between `Petal.Length` (our predictor, or *independent*, variable) and `Sepal.Length` (our response, or *dependent*, variable) from the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset which is included by default in R. ### Fitting the model Let's start by fitting a **frequentist** version of the model, just to have a reference point: ```{r } model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` We can also zoom in on the parameters of interest to us: ```{r} insight::get_parameters(model) ``` In this model, the linear relationship between `Petal.Length` and `Sepal.Length` is **positive and significant** ($\beta = 0.41, t(148) = 21.6, p < .001$). This means that for each one-unit increase in `Petal.Length` (the predictor), you can expect `Sepal.Length` (the response) to increase by **0.41**. This effect can be visualized by plotting the predictor values on the `x` axis and the response values as `y` using the `ggplot2` package: ```{r } library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x = Petal.Length, y = Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method = "lm") # This adds a regression line ``` Now let's fit a **Bayesian version** of the model by using the `stan_glm` function in the `rstanarm` package: ```{r , eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ``` ```{r echo=FALSE, comment=NA, results='hide'} library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ``` You can see the sampling algorithm being run. ### Extracting the posterior Once it is done, let us extract the parameters (*i.e.*, coefficients) of the model. ```{r , eval=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` ```{r , echo=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the `intercept` and the effect of `Petal.Length`. These columns contain the **posterior distributions** of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter. Contrast this with the result we saw from the frequentist linear regression mode using `lm`, where the results had **single values** for each effect of the model, and not a distribution of values. This is one of the most important differences between these two frameworks. #### About posterior draws Let's look at the length of the posteriors. ```{r } nrow(posteriors) # Size (number of rows) ``` **Why is the size 4000, and not more or less?** First of all, these observations (the rows) are usually referred to as **posterior draws**. The underlying idea is that the Bayesian sampling algorithm (*e.g.*, **Monte Carlo Markov Chains - MCMC**) will *draw* from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. **Therefore, the more draws you have, the better your estimation of the posterior distribution**. However, increased draws also means longer computation time. If we look at the documentation (`?sampling`) for the `rstanarm`'s `"sampling"` algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are **4** `chains` (you can see it as distinct sampling runs), that each create **2000** `iter` (draws). However, only half of these iterations are kept, as half are used for `warm-up` (the convergence of the algorithm). Thus, the total for posterior draws equals **`4 chains * (2000 iterations - 1000 warm-up) = 4000`**. We can change that, for instance: ```{r , eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250) nrow(insight::get_parameters(model)) # Size (number of rows) ``` ```{r echo=FALSE, , comment=NA, echo=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ``` In this case, as would be expected, we have **`2 chains * (1000 iterations - 250 warm-up) = 1500`** posterior draws. But let's keep our first model with the default setup (as it has more draws). #### Visualizing the posterior distribution Now that we've understood where these values come from, let's look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of `Petal.Length`. ```{r } ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ``` This distribution represents the [probability](https://en.wikipedia.org/wiki/Probability_density_function) (the `y` axis) of different effects (the `x` axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about **0.35 to 0.50**, with the bulk of it being at around **0.41**. > **Congrats! You've just described your first posterior distribution.** And this is the heart of Bayesian analysis. We don't need *p*-values, *t*-values, or degrees of freedom. **Everything we need is contained within this posterior distribution**. Our description above is consistent with the values obtained from the frequentist regression (which resulted in a $\beta$ of **0.41**). This is reassuring! Indeed, **in most cases, Bayesian analysis does not drastically differ from the frequentist results** or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe. We can now go ahead and **precisely characterize** this posterior distribution. ### Describing the Posterior Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a **concise way to summarize it**. We recommend to describe the posterior distribution with **3 elements**: 1. A **point-estimate** which is a one-value summary (similar to the $beta$ in frequentist regressions). 2. A **credible interval** representing the associated uncertainty. 3. Some **indices of significance**, giving information about the relative importance of this effect. #### Point-estimate **What single value can best represent my posterior distribution?** Centrality indices, such as the *mean*, the *median*, or the *mode* are usually used as point-estimates. But what's the difference between them? Let's answer this by first inspecting the **mean**: ```{r } mean(posteriors$Petal.Length) ``` This is close to the frequentist $\beta$. But, as we know, the mean is quite sensitive to outliers or extremes values. Maybe the **median** could be more robust? ```{r } median(posteriors$Petal.Length) ``` Well, this is **very close to the mean** (and identical when rounding the values). Maybe we could take the **mode**, that is, the *peak* of the posterior distribution? In the Bayesian framework, this value is called the **Maximum A Posteriori (MAP)**. Let's see: ```{r } map_estimate(posteriors$Petal.Length) ``` **They are all very close!** Let's visualize these values on the posterior distribution: ```{r } ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept = mean(posteriors$Petal.Length), color = "blue", size = 1) + # The median in red geom_vline(xintercept = median(posteriors$Petal.Length), color = "red", size = 1) + # The MAP in purple geom_vline(xintercept = map_estimate(posteriors$Petal.Length), color = "purple", size = 1) ``` Well, all these values give very similar results. Thus, **we will choose the median**, as this value has a direct meaning from a probabilistic perspective: **there is 50\% chance that the true effect is higher and 50\% chance that the effect is lower** (as it divides the distribution in two equal parts). #### Uncertainty Now that the have a point-estimate, we have to **describe the uncertainty**. We could compute the range: ```{r } range(posteriors$Petal.Length) ``` But does it make sense to include all these extreme values? Probably not. Thus, we will compute a [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html). Long story short, it's kind of similar to a frequentist **confidence interval**, but easier to interpret and easier to compute — *and it makes more sense*. We will compute this **credible interval** based on the [Highest Density Interval (HDI)](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis). It will give us the range containing the 89\% most probable effect values. **Note that we will use 89\% CIs instead of 95\%** CIs (as in the frequentist framework), as the 89\% level gives more [stable results](https://easystats.github.io/bayestestR/articles/credible_interval.html#why-is-the-default-89) [@kruschke2014doing] and reminds us about the arbitrariness of such conventions [@mcelreath2018statistical]. ```{r } hdi(posteriors$Petal.Length, ci = 0.89) ``` Nice, so we can conclude that **the effect has 89\% chance of falling within the `[0.38, 0.44]` range**. We have just computed the two most important pieces of information for describing our effects. #### Effect significance However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is **important**. For instance, is the effect different from 0? So how do we **assess the *significance* of an effect**. How can we do this? Well, in this particular case, it is very eloquent: **all possible effect values (*i.e.*, the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero**. But still, we want some objective decision criterion, to say if **yes or no the effect is 'significant'**. One approach, similar to the frequentist framework, would be to see if the **Credible Interval** contains 0. If it is not the case, that would mean that our **effect is 'significant'**. But this index is not very fine-grained, no? **Can we do better? Yes!** ## A linear model with a categorical predictor Imagine for a moment you are interested in how the weight of chickens varies depending on two different **feed types**. For this example, we will start by selecting from the `chickwts` dataset (available in base R) two feed types of interest for us (*we do have peculiar interests*): **meat meals** and **sunflowers**. ### Data preparation and model fitting ```{r } library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- filter(chickwts, feed %in% c("meatmeal", "sunflower")) ``` Let's run another Bayesian regression to predict the **weight** with the **two types of feed type**. ```{r , eval=FALSE} model <- stan_glm(weight ~ feed, data = data) ``` ```{r echo=FALSE, , comment=NA, results='hide'} model <- stan_glm(weight ~ feed, data = data) ``` ### Posterior description ```{r } posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x = feedsunflower)) + geom_density(fill = "red") ``` This represents the **posterior distribution of the difference** between `meatmeal` and `sunflowers`. It seems that the difference is **positive** (since the values are concentrated on the right side of 0). Eating sunflowers makes you more fat (*at least, if you're a chicken*). But, **by how much?** Let us compute the **median** and the **CI**: ```{r } median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ``` It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: **there is 89\% chance that the difference between the two feed types is between 14 and 91.** > **Is this effect different from 0?** ### ROPE Percentage Testing whether this distribution is different from 0 doesn't make sense, as 0 is a single value (*and the probability that any distribution is different from a single value is infinite*). However, one way to assess **significance** could be to define an area *around* 0, which will consider as *practically equivalent* to zero (*i.e.*, absence of, or a negligible, effect). This is called the [**Region of Practical Equivalence (ROPE)**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), and is one way of testing the significance of parameters. **How can we define this region?** > ***Driing driiiing*** -- ***The easystats team speaking. How can we help?*** -- ***I am Prof. Sanders. An expert in chicks... I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.*** Well, that's convenient. Now we know that we can define the ROPE as the `[-20, 20]` range. All effects within this range are considered as *null* (negligible). We can now compute the **proportion of the 89\% most probable values (the 89\% CI) which are not null**, *i.e.*, which are outside this range. ```{r } rope(posteriors$feedsunflower, range = c(-20, 20), ci = 0.89) ``` **5\% of the 89\% CI can be considered as null**. Is that a lot? Based on our [**guidelines**](https://easystats.github.io/bayestestR/articles/guidelines.html), yes, it is too much. **Based on this particular definition of ROPE**, we conclude that this effect is not significant (the probability of being negligible is too high). That said, to be honest, I have **some doubts about this Prof. Sanders**. I don't really trust **his definition of ROPE**. Is there a more **objective** way of defining it? ```{r echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.jpg") ``` **Yes!** One of the practice is for instance to use the **tenth (`1/10 = 0.1`) of the standard deviation (SD)** of the response variable, which can be considered as a "negligible" effect size [@cohen1988statistical]. ```{r } rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ``` Let's redefine our ROPE as the region within the `[-6.2, 6.2]` range. **Note that this can be directly obtained by the `rope_range` function :)** ```{r } rope_value <- rope_range(model) rope_value ``` Let's recompute the **percentage in ROPE**: ```{r } rope(posteriors$feedsunflower, range = rope_range, ci = 0.89) ``` With this reasonable definition of ROPE, we observe that the 89\% of the posterior distribution of the effect does **not** overlap with the ROPE. Thus, we can conclude that **the effect is significant** (in the sense of *important* enough to be noted). ### Probability of Direction (pd) Maybe we are not interested in whether the effect is non-negligible. Maybe **we just want to know if this effect is positive or negative**. In this case, we can simply compute the proportion of the posterior that is positive, no matter the "size" of the effect. ```{r } n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ``` We can conclude that **the effect is positive with a probability of 98\%**. We call this index the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html). It can, in fact, be computed more easily with the following: ```{r } p_direction(posteriors$feedsunflower) ``` Interestingly, it so happens that **this index is usually highly correlated with the frequentist *p*-value**. We could almost roughly infer the corresponding *p*-value with a simple transformation: ```{r , eval=TRUE} pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ``` If we ran our model in the frequentist framework, we should approximately observe an effect with a *p*-value of `r round(twosided_p, digits=3)`. **Is that true?** #### Comparison to frequentist ```{r } summary(lm(weight ~ feed, data = data)) ``` The frequentist model tells us that the difference is **positive and significant** ($\beta = 52, p = 0.04$). **Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.** ## All with one function And yet, I agree, it was a bit **tedious** to extract and compute all the indices. **But what if I told you that we can do all of this, and more, with only one function?** > **Behold, `describe_posterior`!** This function computes all of the adored mentioned indices, and can be run directly on the model: ```{r } describe_posterior(model, test = c("p_direction", "rope", "bayesfactor")) ``` **Tada!** There we have it! The **median**, the **CI**, the **pd** and the **ROPE percentage**! Understanding and describing posterior distributions is just one aspect of Bayesian modelling. **Are you ready for more?!** [**Click here**](https://easystats.github.io/bayestestR/articles/example2.html) to see the next example. ## References bayestestR/build/0000755000175000017500000000000014135671001013626 5ustar nileshnileshbayestestR/build/vignette.rds0000644000175000017500000000120114135671001016157 0ustar nileshnileshTKo1$apoZFqȎ7` XN7B̾k>x,^^{-JHXDb)t !H#$8% c G5|WmI]ի~Bz^|w9WT* d <|/!vHɱPkA@yM@FS3 a|r D(Ԕ'C Oi^EϤ?J.AڒJrÄC"OkB庐i&o6dɒ$/>LSvL9I#|z'$AhW-pPG-X&&2 m^63,ucpwh`CСZ7֐U$;1MBsvn x{kHT&Sȹ_-T4.5XAhT :=,a`7gB~5ю3$(iX]Ȁ5*-vFbayestestR/build/partial.rdb0000644000175000017500000025103314135670632015770 0ustar nileshnilesh xI/$O#xE0'$A$@223 ,GHwȬ̮o[ݭQ펤֌4H;ٝTOiTcE{D _:#7{Ϟ=ٜJ:R֭Ku'uTj=qkץ69fOۚKs94 3-å5JmvTᙫqI[&'>i|ٱ^4O'OvU(\ܐW֏ >w.[Ük(З7ZYwk ݮh;}0 ߜ/iNA6?'e$EA_eRZJOr-W&o|rfl(:)qp˚Xe?3|jBAl4^\Eu_u;0ߎ7lx _Wq${\5GZW˺j./qfl*J_]:@K3 ]xhg⊢7;!T &x+?, j +Thf9V44DOB:[TVF(uȯ@1 ]w^1n$@+&U"vǫE+Z6F'b-54^r$ Z$kY= pC,U(Nȝml:3̢e.[%b p~su+H9KNnp竳9jkK?rICQ~?33#-sB\'^ MvC?RK Xt'o mA 0^-.)N?@ʓlnz{oEEԙ$ CEk; / <0I| d | ql7R!btu̴905|\4l'SO-އBY"%7 |A&+kWpdlsɴyg6?3 )= z :-q[ t:92$OzՃAXE#eg\[45uLh.C2y8idt8Jro$ˈ䗾<ˏ?8o2,77rxQq @xT8UޟG B}tCw< j |εrt`ʟ2OR3^kW~JpZ" o$פʺ-ۤ@5IA K%4+ZY" [MFNeeh3mLY;".ۀ; hQr`/y4?Q2 LbzZ?8rWEK!CYgckiHA'瀗 twtr8 y5:ބ|3N)[ېlGdw²kϫm (OסUl;|IT{7C>)Z`@&cLusl5כsL6Y+I</a)zf<@8y a5Xg8d&nD3!Ɛvɸћw0K4nYƭ9KR7aj1_/;~n ?-ȝ<ͻF 3Fgy"ּ&yq&l$tsJbr}YdEn{cGB FY5s^ͫvFf,B$[ܼC^e;TG7K=ST.XeO,˝k[+5gN @"zӢiu>D&Sܜt1='`I%IL1=tI^G|B>?1i隴@0V1ZPL׮$թ|Dw<\lB+WJzR+AH:@H愆)d1@m?ee㪕jd#NHwt4@?T0B1z [Nði^;_q-;;Cb~yiUNx\[fSlpݲfzNAާuVq94D hf-^ս1坃9vߪtUdZekyh =ɲK}ҫo@SৠO%٤[nMh;[茰ISzEn5jvӏd~JaD5[fkqq1朼3K%k27 `_3̾iNEm|偿 Y&ρwBw N3J]Bئ,#=_~WڷP? T&k߶LlۘuB J4Tx*-3ݦa3 O-'].AY\.2,`auNEݘ>Ygk^!5Y[Ր (d* YiZDD{Ղppl| jBEir'kDp8Y)Ppَk%NJ]T,b>0KYUC<1KeJ]n1<&}Z~^>{#g;P(1<lwW7lTZw?4ORgz u8~^Ϭ= 3[7Zۘw:gj,JVߖ^Z#7qҎe]˕晩6ѪꏡT醙B\I!Icf%=,V1̘b*5S+ 2]ݲ 1oY]N} q,E!n!=lFo8:J!__J\DeEAJc ]SKl FFCU33yq[*Vw8mսQM jUf5WM3%} tV|͊Ķ.+®ؼJvL{Kު՝Czi(e`kukC;+7fAr F QݡGʙSu*JɫCLC/kh|.bsJ ok/ }tk56C-:ֻ2V,YV:=e#_R?Y+ ́?^Pm.+MWLkf`T{/-£Jڼ^jԪ+`ڞ:ڜư]eL]fL+4P}]>SyFnDGN\}Vy~ @}/gB\\ً;܋{Y+ƙ>ӇaMwQxǧt~_X;?'.n?ztwjsy ~ﱟ˹{ynC?Z\k'/?+o>9{+C__0,ܜSo++z,ꕕ3A+{ ʙfrPZj,DG>W[чwC?>C_E 1Xgh3XI : }fsA;RN!k j6[S=/D/xQ.}wnGZwgC^C,^0 y2,([ӛ\,,Wq5˃~%k͛7jݰWs[3N+QD ݐ/!O?ij߃^bm9<55ɔB0$p.x`ˣ0$|MѹE )D!v$[V$ 9*_Q;}Oj|B:%rBt)  V?9K.= 3|sױUl6nSa{ݢ vaJ0{E*biN7F#5Y9yð DaRlQv6e@__c7(d&*Icƛ&sO`>4qI75lQMw9pNjL҅H_KVUٳ>~ɮ Vk? 5])4,dh!J=A٢V6B~M׊ma]Vi) Z)x(lfd-M'1Z tHsre͵ |rO@>zEQ'ēN]"-oUkF;I]“[tk V댒6*%@<1e8\\.x`ȟxH#"o* !7O< 1P 4T|[Auk*tzeŇj7dݎ걽^I"_`/d:\[:qJo!ik&-mӉ,` y''S`=(cݵpKҪ^N)vd.IJտ]|*t_(3tY)6+4r@?2O:b5ui 'Z[$1TR.OVϝ n1h@pyruņwozp_he;>x1P$}!䇉~ݼ#r-['ww|Bu2r3.T]лG 7ȾVOz:csEWc&T RbRk"'qQ0>UבUZٍJDR .z1^x/@<>`E%QcJ~xZMcJ [.${NUuy64 p͚RFZG1O%HTJ~xS ;qQkuV=jWTTc5 KvrTWck<~7CnAcV. nȺ^b8yoNCnϣ4lҬ?%h[!+ r:{\~{#\+Է6^42'gG>aí(J~D@J"hK8>n˦Mzy׵~s :ꌺ׵*Q&.!_o~3Jɝހ|#o5rb0 vl&u G뛒TZ&d:]Zei!:9ȹ[O/E?6qOoFE!Ǐ7y#۔ kbĵnJ +U[݆ r, M̮iDщ6M;ģ;sZ0wFt/ݚHLiN`y*4 %ai[8]z͂l^NAw ߉6b^,xWJ|ip"&tDdтlV r[ J 0 9[Y{,׍沊mp7L*-I,M2됓j_uS(t 'f=7V2y[µ5^qI6wEqQaR "R*^HP2׺dPӺ2܆@0fw޼J2x6j&(m->;(b1YG4Hn@첇ކcU<, Lx2ˉTéǡ^/ֶ0^[x4woR95KEs3ZX6N sRa:t,pPbF>CS"YgcmSzgm ߯r.ɚ=!yt']y-DnN^P7vy9kyƭ>E?U+}!cgtfj% lҰ; q @4wG@>[]3m$x#x7!Y@Vj|l^[$ "nE1s|Vx~dè=F ?Ok)5Z\E_\l]b|Nl[j(N`r\ mJ,&QAp4>&͙g)z| [Mېn~ ˾p]_hE%Ms:|xZcs6O Ti0ͯGҬjdjd6J;5t fW+}4UJi{YjJ7} pv0*nQݧC~5{!mh)}Bv[׸%-U*({!6;C)x #(?N~lDArI^Lc-(lgJ&ۿВ>wBޙ?Z'<4N`#Ige¼5py3XaNM[(4^`Ζ*[L7eZއZKj fkZ2569ޜo84-!MUѥ{(IZ@< f gbdmxZ ,dU劀xZsKwI7]X-Uq xƟ|=*cEV̥vf Խ9*;[LS j -nɴp3]JM=Cbi8ŢV-sٹ ECɠڒw&Q6<TL*by396JU^yZ#* C n!'cg5Kj Y^'O=VTU֚& >Sq#ݴ$ a\̀;݅VͲin2,;t=)@ U17ܰ %6TQ:ȸ dyMlO֪\89~ԩܿ"y{b&>Q LCN.ʌ zc̝l;[:e^J3ː&vޅqg-%AD7۸0ӺδcɪRp`]sx:9G,9/lsjrnᛐTWh~P Y? `̙+=lMː zɈf ٌmh@$r.Uci`&@j#Rog 4?j v'ވ&en ɔ9^]̠^:zYKeO}Ӑ:z3S(_~A+ͫ[t߄cYi:fC!v7f+L(r`*>Lf12A(DqCm{ɗP=0*ێKD6;o" o&ɺrwZT3uaz$r6t,m?^䦐T/ۨu^&;w!\ ^p]oL&_LŠ]S_T%|< YҸ4 d"FL=qZ`ORm (ՃX47t";o,׏ig;gifotX ˙5'y'QfRy]e+M/Ï`?v*3lÑ6SHͷT ;]di0%R쪬>π85גĹ3q&L̀L2[̫@j9Kˊ  jp s ;oNyCdN{.OģHg{\:V҂_]&~'!cwr⁈r+E˱939vf6jѺC(9fZvY+L_ic@WltG/. ^ܲUJGR:=^S9wFVtu{)Fw,=8<|4"纲u۠m.W FIY6<>wv e/8|^I/jSo*/̿P@ϦYvV=/m:WQAQ0NP?v<㊼8egttYTmRJ+($jz.zV&Tp-+tC {4f:~KVK%:ڂ;:[_0,<>ՀKu;Un.j5_39''%#๞ekjZ6jmvE5;铝0Wi;]B*X1j_R T7zmMI QOBev(E8#'WP*KGX"MW- Iui3nfפ/5;{JZ՝B~=7_y ;iRzX߽͡Fί\[6[#OvxS.km'V+Sn2y擳7C__xGY3WKNi_Z#+L({Mh${v֋9a˻IM/.}0Qwz4990[.TwW6OM_o v;W4rcf*t3`.bA י^0;y7DGw67&6DV4[+.򚆷4?xE䵒vJ>LO@< js${\5G `8ɦ"u _ :ppH$%?" a=%ٙN!ȇT8FtLN+"uÐ^C) TTnJ aJEQ$u* ӥè%5Zو:ԕh֐0J`5QCtr6A!{!mr0䨘V)IH9fꎭ yW 4 m'!l%(3Gklb Ba]xp})+Fuv[n聰rOuҍēNDt=̪ORU“O@USO^Ui4YU$UbUmzWU[[*x_4"WEj+ԳŪ lmB=HU%mALIUmzXUPj`:*ړ+ڵW֖Mxv-lCCșk?# &l]C6t8Р$hp@Zs 6p'ĀAȃ%?$ EU^N䆶נ,}u=Hz;)#1VȰMvcrzs">*{tbBp@!~z,1]VBXN \S !l{6 6l8a;[GH;ܲ=NaZ,_1`K[a[[6"Mj٦@e*V[i"\#- \S-S!Le0\t)L>[4^&Y]CV**t~ǷRIjX*W,S7]q2 V"%+AS,we٘+k$|`(aݶfι-(wPXnjͨ;&X144|:3H p-fp KBٻa[C3"= ͞ʳ`EFB3 4Th6 2,,S µEB"=4TCy$N@s)?#lFh6UH44EЌdk%4{!\CY TJ?XkYHhf ,!L.4Sl* PIe{evke{&߲m~%:<Yɵ6nF(l#i}F>}mn\jܪRMmHkq{2moܖ@`)$Nq MvL Ӈ@"vfkSƥyT*W՚Q?wrr@k( Fk; Yp'$(%,pPblL*1J,䳱z֯s /$էVU-ȷZېoJ7f?Ldnƀ'Ӎ^.ߵi"uq. +Erς!o+;:-봰>f[wtӥdžk}i'AR'1އpwѶ,:;}[w5$} 0ָyN#[HHv=4(i5/l ƊBt)n%1+&\䊺ŵtv8[7BMI%@@wlN=$DTQPRXstWU,E6 sΑ{!+pr6pt?) V.@ͱu|7lrnh 4:jy;9^`4kS^Noi&Y_Ȗ3ec+pDDeS+1InƛE td9a##51Qz 8 YiJ3ipR~!/g&)?<Y) 6B>(~1boB~mw 5g T8%?- V^@MP60~<52=oUKfNĽ16`bk; l1#|9~䲭U #Ĵ wp׮(pR[P6 mve>!YpVJxZMϣljm#$/Nnlu!j4+&yH([}'.仱5}_LWJcA/ؠK`JΫl~3!Ln B3Y~$Y2,V4[*-HT`t Ims9C @r yDZ\KsIaa?fYT博y#*Ðߧ#V՗41 Zb:-T>d(@^+ׁ ǯT2L;0|hCQnK2ɱEP=G.Uh3*ܫJ#ax8y,1vf/EEZُ|O+Eh[C22a?u3<Ym9ZBNrE*:<9~Du^NEE## rӐ;ʬ[,JAHdY!'oMȄydSqXE@wGѕ4άI]x6➬eA^I Ia04}#QͼtcP*ai6ȎTo= c |Gw\Hvp;պ'r_jybif'E7eSm|jٸt~fDyĽwcU\C-K֐yƼcl~4KDJ,e-?1 oO-VXYhJg*R8JFQg B2~fwb\9[ש8okqQZveA0_3mÜyHCŪ[Ԣ%6˗,5\FC~.YcKzIs= r$Cw(6dB6G?\|vnVN@gfxewhpx>M?zYjNe `Koi2TDgvH⿈ C^mae@oO`#rgWJKd؉a^⍛f֖g4{(ɚW6GDdXe ^>hxJ%gä {qV Y2A8 YiX] aIa)6eU eCلYLwVѯ2Jnѯ !?&Mk ڜw?i@t wtrhgY֔C(iDҶ^ekW,}jjJ]=gД>ly8jedpxg+r#)L QT2sK[ވ]MQR~y L.Hf4ƽ\[.{z'g_CxG+4N @SliMnLruK:L0 I˖)GÐ3ԧzG ܷPtiF<*5+z89~t7ڬQxep\,oxd|(w틖\Gl: #) GRm|>2;~>熿B"< kM#{^ݖQ8 <9lKgU^UTtxr%۽EZOG˝mU `j.gBxI/i-TV409VzV]|KHoA|gFoC[CUZ1=SvRz(;)$yW;~NJ~-QmQ]5. s'{5̢!kڜfK4l:s՟z>Zl~/Ary^eƝL6J:,st '< Y#~[qFuYr\urs<ٜa{;|SJJ=7#J#X3=J.?4?g=`̙@ zyduzJS}Q@< sqm ۙ{B;N[(y$3v+Q M)(Mq~?8.@<1KnY`0 Zv3Yml~*4KTi$ˑ@iu$`sv?2ukv6tCǧE۵irZ&WAVZdex%SGy4]iimyQKL |4KSd!n!%ǀH{ <ܜ5,"7|9~\}yi0,/̾%藽Z;F!{3-S:;\^opT3-IB jo9G _HnX}/B[u{29F=V>K+͊8]NbTWּ_Q,@Y^|TMՑ2;̎d̆U-%NӸH簮;ZTioO6K 񟤒\_xЀ &^N- @87m>ʕMo6 O>L4Qz#Io<lsCE[J@19< 9ɕW.Gʲ` Y!2G}p|AY_`}WzJ?됯74 7bT~Jބ|SY#'7&țZ~6 4N9lIa?"K@<1ineU;BW$̣{(pSO%^sfIrYy=Z8y0uuaSB !7#X/gGe`3z:fjkTxWO:;XF`=$?L:ÐWU/ 0 Y @;@(ӣqmhdjSL=Jp~Tݬp&GcThfl&*79 J~\HrobuPFWH) $X;#!o'ޜmm͛פpr|lHx aNB7ޫ@?̑Cu@>[;2٨|fqNoNw٢9!!?wu 킣jyO5mc ,% ,=ːYEniQG_Q[j\pl$[t$8n&2Mf/l-N-X/(|kJn  5+CqWyYdm]އ u}JEt3,sYCL)d_HY^w @TD | % w.`2s{g57lIP a[2##_tbt܂2%AoIhĦvv.Ѿ,kXD !Ůi]d-椏wAV:D a}%FCw 9/5vycJmiŶCPij/%7\xz9ѝQj))\: ZDv)?P}j5=[%yclemӗ,&[Cݵ4"!W(V>fZe;lAtEBV9<2Zkux؊:G{efI>bxo|M{ng? ls6|gCc%ޥR-*I!+Mm8Y)_* )s[euJW^%7 ߈+]FSJplk4 CVkOdÊ˚H,U8HQ&n7B5h(FQ(CNL[ b\*IC:YE-uwшZyZF7 hZG!VKiDE57ǕUs8@јPf鳥l3FzX^3)vMׁ 'lxtbBE=ihzzY񗕞.Oלr:Ϡ`kJ%逖? 4xi\ lPrt ;k7_p-4e0% L~~j\cAl^Bnǂd-XPrǁ:{C-X N;wISJʫ.-oVU[B*K |M)1,] &nߝ".Pr JS1g)~k] '=lc ' p 2LTz- ;oGc|x7$ -X%Kf!_%{kݞLkk-S-b| qrUd(mg +&pVJ]{KL%{mݩpSѕMfk hAbekn>*< C&M> m5 ~8{K> 4+`@-+5Em+wLgYsUc94L|\ lNBmT}yFz m%Ѿ~x&_ wOAVr~Qmvij'ϊHGD <Yi[t `>C60_G /y Gӊ'f*f//b<;xg|2,mӼi\֩ Sd؇;@(M"WXp\ۘRzfw)=800N[fWA^_샙WA<;+ Ujïa]G3w1Y;30.o$k<܉bڙdyuWwcPtvZ/Su~j4*;\ˌM9vJ^91g[JF[徜V XuhpZ+WKm#\_b 9v˚3 XЗeg3<<oҰ" W+3kLxY@c,co&:qm]sk}K(g;l2žțNª/yPoqZ9?񯟦%В$nbyvߪ)[d󔅆ÓW!kɩL`Cܾ^;{}>a+%Fm6ios|*č{f]m%oE]7e 2'8l;ѽ. 6F[5ȍbM4xtn5q)C/{sضfYDndFuHC= 6$D_! ;5[+wʪrgl;-nVؖ='[ރX[x]3Ui͕yoʹM ^H^iNǠLb&gwلY1i,{3=e] XBIxF\ϲeyV %]/靕` ڦ=cIzj^jI9C7}th<*b67=|y Vʚ/ Z3VhqkYM\^˲qZxmwøȖVM4D}} ^TuӭFdsrN@A Qΰ\1[oZ ^CeO01u:.KΥj907yo  nC)iE %׹jml(е3222UxF=+XF`o#|ŋϝ0ӚuSENH\x3VmztDOUA,:rMYY׌SҜ?s%~ګM58{/5Z;}V?տlB10W?k1UH%0,fbP*YxԏA#_|=FvSB:1[f;MP.5T.Q.KGX"M&C1xb{&5ץ}Z~^;@{JZ՝ZQ(8TT%Q_c#۩j5Kw~=܎^'5m??Z\k'z?+o>9{9?tn`Ep/oS/kf\)^qMJ']m%f[/tsh,fZ. Kkfԑ]MN= O.{*ӫwW7Oa犆n[Ne E̼c7(: :?'.fp\~[Ɣ'rN F2#F Ƴ8ϞdK:L`ވrɦBJ_;0dQ"w" GF%%}F;{ Qa12kHv0dKtC y.^Jdh!vJX{~#RJ~xRξ.jIVI£@I¬oAfW%L@[4^th//łU&I+zɌ{ 33>`!GϸdlUy,;)[;L?seM*"!)+֪/K*@Kʵç܎ϱD+NK\s61 8#-q G0?[k8n`LB 09jҩTBl>B|p`>V> Qn[ HGXbdB -u 5@{kV!h{\D?'@&ihsaD_10J@P;Kg0/X Oa:sRC)1nx㦍 t{zN_9vK3Vr,\c 9 <\C_L O%V 7BX!OǮWZ߆vrjY=0_U- Q;@[-T9%ȥ `G/u2rJOi6ػ^Ib ;뷪ۃҒ[G iB>z䙀xZMְ9aKR%yntN >s\f@E2 /B\E]m25wdRrw!M$ubpri zE7 ZdUuVYg mh%QQլģ7q5%[4y-tSLG,"8 Z# *兩Yp Ƈ0Yd%ݜs-ss,<|bۚO QQJUX^ ܔ'[S B~2w\N@paۄFAfͷJnXT1B@VEӆYXx5ySm׎)((>k$Jog +p໐ߍÍ=,5R of@[3vծs_XJ YUџy?URyxrr7Δ ֯7rumOZ͝SO5ߜ+,ŕ1͹Xeh, B) B] vxZM*J v I^P-Ú6馮[^0xV,}k[[]}h d>gSjP6o^S>*: jIm;u+yJ e,yXr![ @r<r'4W\z!ŽIRٳ5OT.!'3".)SYMM ɫk%MZ1 B+ׁ ǯT2L;0|򛱹I2bDBTsePI^0UbTa}LnES-5{)r'ʨH;9+(@ԑvDh8d՚UuxR%sGTG%=[ڊ7F>F @!_ [URue#IdfGB,xu'@< ~fyw}L>L c%! EW!.G)-d>9N+ZHkyT-Έ,PaS5-9 =6 GNJFO;aa,ēT8 # Yi\Hub\WW ؓ0b=L;,3W~!C),rOO+u89~7sT59ɨ#R:ENr:zG@ 4O'V`]o1>+7}{c]m.;ēPlt!˟jo?,df 'fl:+es8?n>撽H8<%w8maKq)-A `߯ROqP!mYVadB3VzJCxtE P}Wm+U.v$x`8FUnZAa S. %`? lc w@!Qߙw݊3߯kΒ-_u6gX䢨8ϿQu7w˥56u3"oۀSt|BSXbv"O0Ҟ^ MX\hui>=lc9o|ʷY*OCRu տwVD3 Ѐl4 jvWAJxZs}7jk7:zF%`2A`$(c/:Q7 ߈tj^NV(Q[7hƃaή^-VnX/W#V Wb묃>Іl']쪓g9K TP7:=2#?石yݛ~AVLF1isar=R2xJKUs}Y-RN/K9=e7w,/E.pl>'&+{}\w˗ 2OJƂD>pҗ !:("L@:V 4|ϗ S* n:_0/]͏|0npp?w>G d:X]{{3ڸܣw,;z1pGE4M R{}CmzJQdBO=y[g=6(0<9R޹ |wwCV⛷Aۛ_Q4%2WAV9ۚ!'f(!ݡ!~*֘bvPBXzr׀'5?Mkw '&NunNw\:8hbw|{o ?3|^C@D;)*ǷAyj&>zkTrx*9@G&{KmtJ[gK}DޝFīߵVu]-^jjuy-kÄ3\)Vխ[7VRw10 BX9zVܞ:l?q#;%:nqYyfspN Ljo54M3 9/Uj$`VaA@}ec%G?3X9^s-p_ Y +ukW{F D0ޅ]p %wxrVElƹkx|w}d0mM옰,S'㱳sQe% xj> 'Z`KxR7~ cdbrxr 2J<Ul5*,3 L~6sEyUR ­SPI ˱7$׎@d- WDdvd9tJY؞ZݞRݕ(~~:8!Q!m6_m C^\'( i]lvB_v@w d2g:_ Wz6zM?Ntgɫy>& 9'zvFa%jyOlشӷuW3Ji: VAI{g ߬m#ȏTYs#GUYAcȏ[co߄J{mJ~J@<1i\s xcoU $lcpAo%P=s}t'|*qߥk.c tj gw._Ly<<Yi"4UV.d /@zQ}| SliD7X).}cy hkyƸM.{wre3["Jۂg>g,OH 5c0%_H׺/jvQ oC;'Eέh[eD^k|^[ B#<c?9vKh8;Q˟4 ˆlv>,&7NM@}ޜCyOw c FhҴBcoGRߓjk`Iw/t"}.@<1eWɘe/.AK j"y|?0]1B]ei@Ly t@n~=/?o4||$/85J 2VƗK*cUA^rr &QH;dju~,M@09vkA&䛱3lEP"Yﵩ{^Khʟ^ 0wMͻAf7G1 N;c_1 62B³R9]̱٢`Rֿ*+yv3 Mxfy9f`P4ήVEI0$LCVUW~ I~ܛvAwFOvcsbn[~dohI AoH8E,0x_\5SwEP!yGlZƤ][7 N&>A@+!~, .HϟG&!jT,_uKO,$ԉ1jF:1rRd$*μr=hWy:՜23N'ݾ?fMcϰF*ԱfԱǚǺȆʐI=Hb?D(B6ҖidWd'LosOB Ao JˁzcaSB:ēPlLeR+AY\-zE*k zj;^'#[rYTnk?aR\r˕7? .:70tRaf7.e>Rǵ =n{.1z4C#`L/.}0Q'*tz4990[(.'6h>wp#~k[tͲJW08zz%\gxoG/ܘػ `0Ժ1%Zo+*b'EJ+zQGēv*5ykT.K*H8ܳ.U[TP]lUo wmC ڝn;V 0YW:$:bqA;: E#rS~@'[Z\vg t`/aB3 2kB3 k(^$ÏkS-JہXRHR|"AM[ :F OB>zMμ*=ۡQ,1LȲ\.'V[Dr&^aHǖJAdvaI-+H;@YmE6,? A'J%<YI MsF<4{E)R}SdƸ*U՛efTPq"yx 6ظy֦]no~*B> 3 -2l9<N| *5g{a{b]/Za Ob{#'dD(6m0rᨸiab/A\`KP' ^ُHx *5gS˃M®Bj6. %hii9%< Y)\_؋+D#9adӋEnwraoxx5`IJތ<_Aqޅa|cy\]ܮϸd%NU-S,we٘+k9Bbq븶1[]fU_%CT (4X'Ą<7u?T網& W'm+[nH)0&F-4 p? KxfB0jq]b# a|mF4 lBdUH485 G,?pFŵEC3,1]Z8}͵orB [?B B8a;[~kR~?; P> |W|e`r1Em2gbEs JnzI/?qko m*R_~#oLO[ @bE_lb %_jKw!pbWioun~ e_nn~_96&~kmmD/ 5u_Wc+c.EZr3Ym58a,?yMU8 Zl JŊoT+LEuU͂n{xqAgѧ2<׍g}҆ZUvoѼ˄H.hN?P i]Xe„ S9N]}eOJuJl߼!_&cbOPƗ RHEμd/ze VӸ9#AhZp! YPF'y7RSTq;%XcOl-X[UZ٪Z*&&Jͧ3-KvGBPN++ͯP;o%RxZMc%`s*D'Jzpљ^Nt42=5.XB#_Y[._]2^d2#|9˶VmS/\gL^[ Кe/#޺'&%oE쬭/V++:]Y/ Z]K(i5(SYkm%]C%ܣ\]76кR]y%,͊KlcF1q nlMV,c1OѪ`2w>,uJΫl~LC;?ddydXj-Q`t Im#b@V+!bpx tBJZއk:jSit`G&oBV̯2V4J720!e, 9wbq*ܫ(Yc3nZ.kR:#keĥ+2-P?22a?u3<YmZBVuxr(7yK[Fy4${eVխT]HNfG7dY!ԝ$3=]䄤=lM c%! EW+.{)9w-px.r,o< 9G y.ԝ?7*KP.$INE~&o$.̈F. M7)Nh}`˱:KۼexWN^eh?[4y i 3;6.K$`2у$,4ٽs^xL72\Y]J#m*v] ɀW|^=Qr;4ue1w/ @6&féڝPVv%xe"px Ĵ}ݪ I3)5PH/ cyqʼT:뉩h=Tttxr |%w89o^/W#Bm۲禵hrؔNkʇIQYCEV>&Ivڷ:SHL>S @hQri LlA a[Dw INENFՒLC~t{6"ٻ7!ǟؗ()Ĥ?$<3 ҖAL{ 42( ֆ ygr 1rzN~Tf{SJuTj,*5h*I{S4t Sr 4$pع:A%u*9/ Vhx9T!ctub뮷P N!lOcůNnu?-/E*<ÕSb<7/~ f@?\9 :459͓ӋF~^02A܅8w[tS@sްM'$uy&k|6Z:*6RI8YG)bX~{};@2XS= 7*.DmuG( 3Ƹ=G '"[Q)]/geʏ:1c[140&% Ln ټ$ɴL @!S亊xZ9 ٌǃCʲ̞xwZTdw(̻e˔?YF0f3+9T.{~~>rʵ~oq*'NF7XU7"&rcg҅^9tuohko }!/ mu|yRQhaaֿCLP3FGa1)%H/?91.h#ai+OͼU.{b:FTp'䝉Y*g~,RqOD?̦c_q}9hsY]m]Zٟ79`tٜ|Ox9sL~#[ڤ"R8$ a,_:dPӺ2ViIw '!Uhfd7|I I"aj\a`AJl~BcoH${ 'wKQRߒjk0EwL5SQrBR֐omo|+2`hrS\:.廷9'{'\lo~ ܗ]뚳D$9wt; ;OV1M |Y)`J!73FNF.#Rog $V!#3)7B~7u@ 9ydwuv1oP.bT.A^J?zOCzL8e_N̾6TI+ͫGZ|5u -?_6A!v7-79߀Sߞjk~}aW}Gʿ2@f} fS K7 ʳ{yvF{!L(,f"y9>mS/~$<9)N#qCuNΞ+m ēd %AUK DQ(Ryeu:k }. 2y[Dkb*i `TOJ rыg~<μC^5}|^As U(? ?h>hYG|{d؇ B6adnt H:dǙ^tp3-j[WZu-Typ"ukL}%˴¨cK]t{#(ș6_DڿZKH:dj㌮T1 !oԯW{j*R5*&7rN/KLae}0']cl A>6_µ*3@5MFH:d\ߞw4:YNiCO Mp%LsB*y¯Gbs<ج7Ҕ;ֆ4?(|*6םf6z`/o N;c:+ @${ mX^/4ծev I漣ttؑh t/4Zs;kMDrr x̝qqUģ\*/Ч'֢`nMkyfYv6NͲW znp#{'-y?f CceL1gE#/vQY9. i.R~*YsKy+CwШ/ ]p1$9>xuiE;Ւ\ Y6_»U3zY2ҽ{3 Ξ4_8Ks9bn21NMMޚz`ƶ]1=_ X-kd4ɉRfS#ݜeGi{V+4/7~.1Y[מ;-dDT#%OH:"'ՠ]ejb`k)i9?}Uͦ &k>kjZ6jmɮrq';azc{[-bE|KT!@XDPm+7_ R5c`76%Q.DCxM4T_֔Qzʟ"< h> uX .{W|s'Af Fv¿'<Xp.E]7cۚf tiT| BlbeǃY]5ob~PD7+ J05Kw~=^'}HSG6Zhr[s +J[L޾)o\й. ;˚/Lbq؃# =n{wr.1z4C#`yåy _n={fi{ 0zԟ۸=}rF,~ع܇tE.bA \0N;y70AGw67&vj-kB4&w?uE䵯hU^I7 +~"'㒮9o+_#ɦ"7% YsHX(xhz#ٕN[T(E(VQR[!omB(miBvBV*T7h%C k-ZQ%]@< idKY2LN/Fp'-F0dEBOBz{vcsml.Uj ZBe ={ēfhUiшL'btBL'`[[\:cBpnp^jb'Gˎy+!5RX=>: GQ_Njnp LDb9pYU6@Z,r[}?6O< {y FsQî/hY9 Acr\mJo:nGz)6RK–UHYu ψHk(LGZb>~c؋ȭʑ\zPB.KS{ tCNARx in;0H+Ø-=tox5`IJތ<ߝH6#~y'[n>㒕ƭ~y?:erPx?seMAB0r$M3>kARN@>!W4Z*߫Ӊ yn&hțd@ЭfQxhBhvK,؇\C5\&/xB5`KcPTc !h{l!_kp͚-Y>ap$L."h$>0uzfEZ0_# Nx 5Fb@F K|ѡn:W2\^|19kېoF)cPlBR&oMޚ+˲Z9&vۛS~<ӆr #1l(y|5 <PllVQA' 2KL'gR59 |Q1`r6R:3~>Yj3kwT3 M %w89~b9N@H W/GD#<%P@* zZ)3{!٦x;j7qjxZMC*obSI2Doh^*줰dק>J<\9ˑ2D"dI9/C CtA/jՒEk zK֜S)) ֨h.wҙ&5jN9rL{k~t3t ;,Ѕ춾iv#}ע Z:Ri5 2&6s9YN`7n뷪nig]u #4|y톒gi5>@1N,,$nԕyZ*0Ɋiּ`_R[_?e9@UwG&Z8C瑔[ؤA ad+k?l2dʚJk %U@<1IcUUQE {RI ^Z$,ʊKcw^XO +ӰL§huZ_:4IZ\y5̄08r~4:GM\>Pc-7P`Re gwf*"R=<LlmuP(7CchrN@1P_+1_*_m2жITrN{2ŌJe5@uw٭ \wo٪b#S"h.._Bui ]l[w*YpU|Wj+龼NO֖Ehjk(@w/ql Rrutyġ \r<:`RA,\2 Uݓ__G#2ǛGUއ K;X L܉f(g0BV[i9w0ʆh$dhԣxڈ$Q?s31eM033^17-{Q %U߄rl9,VE+cYPnvjNE~&o$~H(nde@oO`#!;%%+y([i/R7pdӖd+-X$ar'=1a*-ؗB96:AWCx&?Nm*yRq5ӫyDS-8,nb5CNkk{Keu9g }MH :7H*= &-3NuQDDFJjkNwŻ*fF,!P ` 0^7qAwRAY,JƬMewɟSuZ"L9"Lr'&,fiC߁ ģ m+WK&/Yd;h%.GS  NJ}Wz6W*˕91[tlNUVv-3e Qsug8 IdmoΜHˢ4A 2xI{^.HcP!Yiv1_az\}͐#Ҷi$`2u8{d~ NYU5uǛ F'.d΀L|;st>{ ɵ=ϫw27s&OҘNb%#C2CY@P8=nTtu.rs> Ju 13kP,; bzw<MENgGsx, CVd|'=^g[BaLLC‰/.mP%<ƤHpd)#~Ҟ5B~ʢzIs,GbsH&بlN,ׂ:~_=[ǪV"X>%m(a?C&4<lhЬtxJ 6uj*fW#YEdbJ3.%a04ծev5l +L($T3ڟ Vk jBT.o@}\:?* E:w;/"rw L_dskynYvS7f,5lhMͿRuv[ٟWBq9l(е3222UxF=+XF`o棠yȝ={iXqre0i<5("ضT6ף&/dOM!W`*'$'>¢JNMy׭8#ݜeGi{V+~Fd)6z TbHgƴfm]{TGAQbSnӋ#~үZ Uf& kʺf3/^lj{製樭ɮe?lQKN>Ov7^[/-STb1w9#9~E0k;X~^ݥ;Y{]lͱ&RyGa \Cz#`!U+Hl DG/F,#ٔ$T([f;M=A rR]{REY\. T-Ѡ Eo-bnvY=purqӷ6& LՎ5M~^,z\\x9-J5֋X`{nWڸ(1$]m#̚2"/r!Š4J|5Kw~=\q/smb}kÛwI(JjV^+?˕)\}ٛS(sC//\_&_~o%A.()?x0n:&O^Ks94 w$^0\`^36wz4990 wW8O]?^a 8f歂wG.bA Y[0;y70>Gw67&V 5}niL~rjɫ](髼S"O`0WO%]㱶g+U[67p% v\tuU ;%hGPH>4 y aak7o]n^AoOA RiF+ZXoGX:咈J~ s1s8UR1 B̧gE4M1ˆGvV)- DcrooMPƦ+&h7W1UL_cjK%u }>BzPēh延G)F8 joBE 0JS;/>J=F*! *gēzy꙼5ykT.Kjf3ܥjЅmjf(=Iԣц~[<3B1BR&^Ѷ.lF|F;lbv*f+`95UdmxAhuzx,#yDYit'BeFb\UF/ E*ȕۡrS,}m\N'zt@ҪaFRW𕾫DXюn4~KF1V! Y9#~"u=ne%{G!mDYvG%m/YT+'5sNmŨҽY74y-o[4^:k`IJɷ 33>`EI7zKV[P吷̂wx2=sux?seMAǑ st;O_gU_%CT _8o7]uC>v|N&j;Yc!M36M i2špqa_L>K#`8<| Τ K9B@ tC@;{A1a{՚ ĝ0uzfE~qkf|5!!l{@sΦgs hB4SudۑTFTz-yd;{qt.)xޢºu]L$ģmls-WWlnԢe4ёlVo)d3p rKFVBb3osq֪'Ǧj@/&`vPdLs x7%]wddm>Z佂^M:no/rC̪y6yG ~DXGt];lptO2=n<^A&|{ߪ8^[ *Oܹò2s\yzn.e#ۀ!OLs[龁`2*  Y +{)C5N^qCcs]𼃫 H0sT/*O\d|A>[K}uU|58p+7JDr|Úֵ|?9:[ lo`crj$_"0EiG0ךR^Ux@hvU0XvՅ?pы Rud}k%kSPQ}F#I@i߅nlnT]g!Ϯe߇~k4ڐؚavغ>Hw"/RZ)ϼʟYQ GS~“O&a\_o2oۀ{!+sN)ȧoΣZ4::L oZX""`% W+{dB d(FFdEDDJ-[%[mI-ydynk۞=g=g~9s|$#P|/^fx*@}oQ9kȈUFF8y@c)JANX vxM,J ֌v,I^P-CH9,[<}f ύNm!!8z}«7LDgN8l|[_9߆vxMC8cM}$  oR#˛hTGj؍*CFpkj>åW,ЯM_3MyV,YΊ%? qv'Q- }OATcJ.%Ӑ?cȺmJgj}!2?3gnQ1mwcɤ$9}n8Զ;M.{ã_ֆ/==ކ/ASI%W@|3c3Z-ӧf$'*|8%19陳4$݉ {[_YDŽd vJxMCء~B B5TQGan]d"&[ g_rEHUJ2ԕ? A"Z7JJfrS9) N[zUNJ zW0Xλs! 1<փH2{O.Zzߐ2|ZPň' |b/?[&M˙ɫ0~Ba}SƜ\/[SS&m'gS[argB+}Uj N|b.}5~A(geflfr¾+\:Aٝ^<4#eZeӐk0 17c5lzE+YĘ76mLkQGrLFGxG!rދ,A˜gydd4+tk(8 Gk =ЈRnG4F#f:QjO4N# Y"Jǀ _Lmmqw˭)/}µf+~իG hnBhH%aMIvg{cP""rr)/(y  吗gAdP+Ӿ2X 솬tYp_-KE/׫ic̯MO~ɧѐPOqjL˜E ^~JjQN֛hXsڶ},ܔ܌(3J%׋rsgL #7U2m:gؔvѥpI82@/)Pɣ9 ehXe ڲ>ƍٌbuxYMX %<\f5vI`yjsLB'hQ ܊ixoCἛZ+LMr viʻ>sQ4˸n"ž8Jvp d_T)'{ꇈl>Y30a%@P_!`[C%Jt%%Z@=҂.6՝KӬ3"< YHb>,;@Dƀ +ua2J:efEOF+ ^1^5=u& [ ySf 7Aph%Z9^oxӖc؍֯Jӂi %-P_rٍbJF\w dneȻy_ pƘ ? ,ܼ9 7;kzQj!/AJ+FFU+֭ޅUJ6[o#+/A~IC=Pe70暦u GaZ07cfddP_b re%^?uJAp]Mssf}! J ֞F{Fo=]g!Gd\|Tkvaךm\+h0Q6$)#neYz_6gΘ“ 4OA>cӐǍ 8< sYVSF_'xWI^KGotqNfq^j#>e.ĭjh5%e@dIæ1vsgI;)f: ,7 ,mMgLp*llN@VL%lʏ]J~mP+PuY/CٚNjn5Hl,8Y_xMZC{3vDi>NXQd7 Lǘ-Z"#9<#]f0IV~>S?D͑籙 #i+M-7~*٦?I ~}8TRϣH١@xxA3Uy?[~H܆[\)A~-u) F$ ,C.k,dMf2=: ,hV7cc9Ù$׫ + е;{0dM? uM㮏/,BEE^LNKVȺ8U>@@옞~d?r.:-FFO~ɣCy ZNi~EC̼^:toڮ+i6ٴixV6+ -_ j}# ' q@:s  '+gpJ35 \YiF;$N_/w0Hy$fjK\cJxQt*[ o)*?a\#x\ 5@Z $ng^E^ !o̮Һ$R##,M>PKZUŽ},{-"{H:ƥP_Z.&THNZNjEVW{5V‹/vrX_vr| IǨr<W𔏋M J&4p^:EL_BfkB{@tv"Yi|b'ʮx_FjG(.h*5&0pAp!1*@8j<+n1Q;+o䢁1[z7a+!l#Yg ב;gzp {gM3Ƭwp`$~K Ou&^y ''k]~um.ߣ]+߳gm|#E*`s;~so#߄76's˟/͸;vǣN` }+DHϐ3iTB{sm1T2):YNט㝷rxbu,᤮ݭP胔Y:&pO*(o_ Y>bUa4}ߧ~6Fi#'-v~51~_kLi~_k"5kZ}?=&zxOſo&ߗ}qߝ[|54cxx S/صU,7c8KY7w]fX~N.AE<ι"]'C8Oז :zt=ȿ4~Ӵ|8@TQ~wȧ=Gl^`wh_b)w_Ϛ^:~맍TlHdb?2K%ee]ݼO7?ٴZCr^kq_'Gd{`wš~gڈFF"YI3~/0FٿEvՎ:zp+x,K=~=pHN4GVC wуx./@\xÝG#}G ͫ(s-?S7}o*Z=oF5dwJvP^?C+82,9_1~FJ?;{h {%1=g`~tFXe0lӜ깛ׯ_db ++OV>>ݽsX6G-‡ N-[tC YX00JxţݷnĶ4hmsb-źV5yE_ ޔeAg"3Ol6P[ifdeT-^P] gd-]x|Q@< 4۵2 i f^)cT[گJ~x:[ +Ob& 2PŊ\⭡R %x4ePQSRԳ*!y{#L$瞅lC&]2;)]xfvB9 9MmY}Iꫠ-Ci\`vOÐ_sV>*ԋd,'<Ţ5ӫ$BX VBkvM=UIuJFRȫ"R3) />Vx&].}sE&^CVrC)Ie.?RmJu1QLkYhdWlbģXgFui 7&{ϜJ[@~Fcǩ䖓j vJBT,pd}1`7#l]etK6 'c3C7ZלRt W\`F-p锿af`rh65+0&yKlFA ˜80C7 ;aX'A(3%w8 9{yW= /hrИW70SoMN%̯8Q%FCˆW4yyǜib3:9(|:3Fza 9wG;& @yX(-4fOŘ-v+*e%(?S&'ն|l`Bm.{<יNIxp/余u7ه ^|)sf0FDlTEya/cnҶ4 -dy$[аeXq8v@ɭ9AxElpeYóähY6+O$d72T/y;hzVȦfg\_z1-ޝ LVŲy?m>HJv #d-`=F#wS2z&-ǭXSOlQM7օAcZz[eMw aQ= }Ja)>\QZ^hKvSݐIodL_kHzg-Q\`X Nq[F/  󑵿 8y X=XҖx Y!| KKl:F1^AvzY?E^ȽKBwV}H0Kq9 )Gj/k5%!TK #WP1l\3a>0*U;$"m](5t4w=1X*g\t'ZĖ"m rLJ%.{O3dUi\jbDױgiExuF\xNW B!͘gK-+P/]77tl@θ 0"6)z-ѣC:<!w,+#a;zģ@#q",ɐRnhu*RGn(>p}Boi !o4R$h7 ZQd%Q@<4rb|}EF&ﲸ4M;֔U HޡQg/k숥x'94*2fN"a]I_";srnQ, a@|+vmx_,ZW>xkH|{Gw.qWfJ5$nxzK2ՑAԱg$iZ\[pHP!֐0n ytZ\{٤U3rQrgY2w5YL3lߥ FcZ5ZWYm6Bb1<1J9|{X\\'@O)Aڣ)ϬGAB>䏵G5O? rtp>iS?&xM#,4MMV!6D<(ɓBnpԵb[޼GsΌ)#6-%ɖ2Jfe$˶llT@#_`smGR,0s\?c%=AG SiR (n74F\w٬MF2i6'{QBYw3p85O)Yt[oX{J>)Ѵ!ݓ;[,wYܝfqfo5&5ֹ~!lnċxf, {~ɌRmU.7h ;T}biI݈A˜ <-q;SNFszt ܦ +0Pnmͦ6J܁n6%Rt{;ͦO ]A@1 A@ )aoGFM 9]:Ѡk-LgGʔn%W.sx>ǛBx'uԭPѦ+! JJ@ξ,*AK>p`f 7*ͮGW7mn0g!my/ tP*Nɟ?ʝģODK~Cl)C-!ddԲ9<k.Ñj>V=aw+~u,˻5rHhizySUny,G~zf7+L2PSz;P_vV67ruoDGMT g%C)˱6AئQbTk9^i#nm9cbG; BbY#`Ar6Q:ycH-¡V[J1jZ-[Z8S%AsA-\pY`2`+Š؄?SG_|ox\yǍi/s6.luutNpkv|~]E\=?OzǨVyiKVڈ3c.U rFXMydF3!U&0S*%ͤ=fv{ aNNn⡀~9sLko޺[)| aƠzOxP`O7fVI3s*r@q6ŭF`M+UnkǠcN^7%ц֖M@.5F/IUЂ^raO}!?<1Z5P_DC:I " oRvǃYc{hVm>}(l+$._>BV/vrm9cyҫ _jvODR^ Y籐&yuڶ-?p exYe-%W[U JchuFI̞И`OҩiTSD!/_6[1àT_އ|_G%pëTR{k ~5FX?C5RRL ~;Ӈ! Z23s6Q-ZV}0C!-@NjO k;|8Q'?3ܾՀv8`5n(dU p7V)u=TVZ?vG}3^V-nG%qL[rYC= 41qZCkb{%:__:nWU:?ڱJqDCzFJ o@FƉ?o|[|}oyH} ?hGH ~7&6{5W7.~uB[}eNkF 6eSr;?AO OIRRQ o[{;Y)]ҁo"!/P@0PtӃ0KͤA7'qr o>R8y#%< 䃙5p _¦TRN9 +4D" ]ypoj[FvbwCEpg}uRZwufs+$SVk֖/]m vU"1^Ƚz;|kFIyo5bB3=a Uf{>5|bن0@<i׷_~[Sr?/t&" _~)Ee} 1 ^{t[߇4XfXY}NlBo!5-H!vx,/0U)ROCގ\|;͢hK!nhY{:ʘtg^鐊@g&lݛ5fOwM<.Ȼ2kgYA!L@. IH 2:zNpJ5ĈG϶G.oh=F]tmM!8I:&9o2`3^=٦tzxN1GNAs\(.wAޕdWJ)z1y B>Do}wοob5)/E6]_}ayq$ u_/0F-m^` c4Z\-+ns oVM_SAt^޻<|h$u,WrE§Eh׭+x 7mYvFFFn{ʮ3vX ~qg_}})߀Y>ProE/_t~`>_Eu}꾙9N|%0߽}mA]o?ގgh"_Ig?(`y%0ḓiF|mITSm[$])j@+ѺVmßggZkbZk&|ņæVVf!ҏ%&2w7֟(UJEho]+Z=oF5dwJvP^?C+82ߘNVFJ? <7//elyMghpY.Kn^~F &3k+>yCM;Eas$|\,,הkro{W)/1h|&[{uݮkNCUW)6yEz(2>mv-'FGYPބK!%H@< 4%%nSRaf}).> 높F@ǜi:J7\Sփ÷ꇛf\?<,1_UFޓJz8/g4/b8$5 1ria}Wܲl r¶чxɐL>O,(R1Jn7fvcO5zԕ Gy&Rg"PSΝMe1Ot &kؒR,4oJ?)Vp'dz3ÔeXfNH:B¡2dd= :O.SSm^dӦIS]$8Qˏإ=n MQHӡēT|ի:J]d;[dixڙqT2UvS νQA6Z] `-G򸛵A}+|xxɊ%сc6=@}Ynu_ѣC:<{3cr1 b\rEs/ី-31$i7=Lrr!;o eIMW\7v'5]1HgX[ͫDܧ{yx5Vʞ`;lTbyH7 YHܣw)E@,c ;//Bj aG# "$pD@;Q ;sK**-Hz,"p$Q=ӏk$^j׺>ؗp$nd$n^Jy5VD@O.Bjg;'K%7/(b: KV$.f}""8s?P?@F@M"A!x4CG@8a'#CH;ƥEQa "wX5 F@oƗX? \K֏¾׾RExm>Y)ܕ5PaaQK܉3oh+Kke6GfiVs u#F<sN3+M~Mݴ$5ۄ oRhmeXdܸS`6(CU)}fcYV@>O+wTsϵG+G!Ϭ5̝Ϩ^D7!_קcTEޅpd"9F3Dxx]t^lڰG/ȱ)$曀W _޹ %@f[4B!٧ gn)n€K + ũ\T AVښHĨP{etFdQ:9K?}^K%U@!lK/eaK1Z/y#}kmj,09]!#2,S4 UR` W[1Q?n2kku2x8y>?6SJVXTngKOi\G 9%QǥfemsfLߧyP;I#`ulBoU5\+/=[F\m$I߄=C6{i7mNCn}@ɬJ+8St*SJݿY) iU+ժ^fxҁq| ri Sot~ٜ2xhZ`\ :`J붢P^-Yʭ0'dbkZbx|VF Y0?Pl /힉J%GcBV.oMYR@C>. Qւj-oìoH%F݉ܯO$=l粆ߙ+i^T,MN%us:Qq_uâY}5v@l)`eQwMS\J'J^B/#A吗߬(H 70NJc45.{hʍ4>i>Mo $R}e3fmKYv8:'I]B/K^IHPMdScBaR yWf_G3nȻeDxv%;Ѷ0[۲5+䢵1X*ч-ut4* C_V<a4iDJ6B%ȥ}>trƴYnִlN{-!?[-@hSf:N^ٗ}9N{\R8[`BFMf\Uy mʠhA̹z^B{l܂#:ewDO@<;Jn?Ycg"- wt7YY8[<;KB< W~`r5s>lcwn >95Wi%oaؙVXkRrGܼeeeb_1l[޵6KhC3xJU WB^*-역LDd=p#d]2pM6=-;9n`6IɯP۩X+ݐuSTBIZ+}"w^ף],SnzӴ*yjKi8epP# Ey_zNC\+|vV? [A7 /NM-W7Tm؋3ANWJx Z@4b WRp)ZC@˃ҵ #,Ymv:l7L!^;4]Jw GSk5R_GΖ|Ǩ VOCR'@LǨ\]ZFkhXx2;3&||7oCE6[lmoM…N*+(Oo@P O&WEZgJ%k/dm2ːAR.Jn Є}6m76*pީ)ԳrL8U_f_iGpQj_~ 2꾪Q_K ~ hO{M\)?~ 4f^4x:Eق؟t5{q3⁁x-$hVyJk}Ǣ6`r)^ȽtWV"{KӘG-QOsJG]})7 됯gRk@Y՗oamuSu[4ΔWJnp`f=KY"fC됳j5o%P" %,g3kPq6,'ٴ9lzJ ߠc+mkup3tR8 An ?7gEK)m30mÍ߆EkU=OUSchwʘuyH_yt*k9{:xwm%|BV_;R]1Px(u#dܶ"3Sgޭ!)btxr34C<ɴW4C77c:a>f ^?x) óu^a 솬6#H0ێnWWA^GP1:8,ÆRUb܆2% RyaK("d (ހ=6޼-(}O’A֮jQ$HvCvP9.U&nh!OAI!ԆK8yއB~TL+%5ұ:.PԞVӹ(ꏱ;>[%Էt{Pɢ +]|z}l=">[BVGz;2ppWxY+5 dZ)ȧڣCӐOgV xtRnzHiҾ l>i:N; l9UQPsݐwkSpevxnAJSAV.vA!ȇ:qiJW^%g ɬ|oCTλ'ۣq` rwplfZaW6eV~("Rtn?!RjV`eJn=Ӎ2qxQiE(?bT#uúi{J[yZFg iZG!fVheӈj )MDc\@Ws?لx: -6JJCo T'!kfVܸ{횊z! /+=T=7cf|b~8U`=cΉ;tt@KyOESM< ql亁<͞4c5kئ}Ð5iϷGoG ݂+~&kAԁN+PPE4d Z KY G=^c_@|2)%qH#sY@*-ǿa#8VQ!ZCABxyA+ u!}!˧#!˧PsBF(m> mvu`7pd_ >U%O%BoXB|v{ss4FeX8U`dg 4ڙ׀d+F6b--k-KGCu9m؁X48Mg~&Rk(wXJK@< ?vzJ3.)Y)YJn=Pߔlw{(9`m6+KvfѢCof dNn?1#}~៍4bm¥  66Ô\7P_3yԸtǂ<cAtu,ɶ`8 /BV;'>ׁw!m._rfU Q\.īMܩxBAA?#t|)ȟҨ O? Yij;L03@LZ蟋b[kB-L0QlL0% 76UǓwvq&6{/Bn*YJn9*jN]"yx,EY^Am}[oR=;ݣWBV++%7oh_{dmgUn*^Li^]nf/03({ #OĤ | [43]G? ۣ?c!˙O;$x+C|{T9 !{ũ-߃'&_~  G= 3Y=Ϣ_`s࿃b->?)9;ނO ৑.L<2H~2 -_@cT.l4~%zi\tR/U71X,lk3|1S20يWvz p/D+[~Y5?Bx!;IUeR | CVo&~z>lHAX|VH;F=VoYzC$'sRn#u'Nn9idDo&ț2zz nNo2 "vZ QO Y^ .Vg.ז Rш/mM_-m,5z{ekjHհ+==EYUB}mAi&x|ntzby#oRgYKق(^՟ɟ1#7vY R; gv%m wi4lNԟaNb'ԞԒϿBk #.U$&fW) I)DcT.pmeV:VLj'c_1Mb7 乎Q+"1iNx)]," +Y4=64^zhZ.Bbct߬adN%tNX\ViWMޫW.N̴yrf \GFFn{ʮ3vƞTjgvOXS'-{766Duhީo]iBPTz&;g:a@_2 # U:ʅ(u GGJ_%;1Yf0cz],Fa~BXWJ|dI6,pM_燒n9o'[[!o՘߆ Bj5vw% $dewiv%m߁)n4*g߁)~[6d5$7rw`\"oAL4'k߅-~7Vw]wsv߅=~7Vw]wsu߅=].7Yl{il{al{˙]~a]~ajݰcģ)cϤR:;46bVM.  MQHuJf5ido׷_!$2=fo휊-6їna1X|k ݝs#w}nbV Y*9xvСJG 7aW ߷qiAiQ/qL]2 O3}0?m:C#2 Y`pwyI&8{rnC.}ǢWy@wᭁE3JXM‚P L-^J}ػJAlyA6'fjiMpܫ$]ל}/4ڇ`7S])fE' ~~JOn&]eZw-*4] o]NxK?* I F0ebtCޮB0%.tJR\*!|W%C@; M:wYPDdFsx3d&Bo Ҩ 줠}-Fx됒?# E?*ԡuPAx2XBEp-i{썻׮X}˙η5IO;^?I͍AkSy16 KzY;i)I[k; +u }"vCV ɓ_&*f0& 4ocG1" |/v7b\&%-Fj'JaoBVhzip?d3FPIm„#mb 4+a-Z?-'\b-Cі[XZі[\[b.j im"v@VmPXqe.’BNǢgEA!\: .{d߼)Zp-9 HS;:mJ!qLMIb&j1%>%K?^rَK5a LV:Gr hnPs(IPt@^܀6yhaee30,-0H)+`ntϣ,d,X$C7 P.i NCCquͼ=$e1TCB}>i58b$0iVڅRti]ą )/Q;n([mFFP;F~NIjcNẂ09$x-$b+[# @x Dn \6b9F@76N_9/bqA$NOwRr'!㊴"zfEdNAMOD2UI'PUOdrJ:[%FeTR_ٜI|ZjUm'oDco@VOy4fT O";oNtm@e|UlmwTG(ci73([TU16%InsTKUm&;ͻ¼9ܕ6<@V'gB#N,x(i7([dØ$wv{N 777'gܚ]f(임3iEIT$w<-sx*ݮ<,f7{2f$nk.9kѴle= :edݵ?) vӸ5uM}^ETQM5I4^ۘl/2-bx ̚"c@Ze uc=TkWc\^|%s~CGe0#da7 ZhSr)}]tu2x8y>` e-%H@<1Uu)ɏJ7ps.x÷mx.4}Dⱗe2f S]5H)BJNg )l<[ MFd,o2.ͪ^Ws1K:}%BȌy:\v2T]4kQIcҲ2yf-IRt 3 :^TŘ B6n[>ɇ+? Y}OC;!ۙmKڻSіwZ.śQyW>0ߛλsѩ1X*o~f[$X<9<)湼1PZsP@͘aUX8$ɽ Br~Fe;|cxZ(g ʬh#'V|^ּ}Y]CVJ c6EOFGs3l𨒖Th[/y2w~& ĸ7{DEm[FGX^49%gWQ5j;e2%]@<i\m+ߢ$"sᩒs*qepY(]i8z Znx kaX>6[,6up hEw#y -u*5^dr5;9Ҝm g;~qag{B=b=-թKwQ%J ,˛O6Wxe7^0Jn;p dW\O+gW g_3ZqoQj%31)[U Y_3f0nټ[U,&lP= Пvu P>UsJ4F#ZUhvݦ%l%iH*3fyOF|tE [(^E3R~ҿ Y_|⠄ vhJE:FM]*UC'zT[Q;CŃi,S.&v={4V_:$%ٕ=<) -.,E<'2!g3W9ϝJƫ\V.vℿl:WlvňS$7.6عG[X) IUru|\gJ|\Tcģlf>.8YS,o,x& c_ޒ\9Ay7c^ei7i(? p# ywf[.>|4?G?YO-^<B!0%߁0ҎQOqoQ o^]oK $NYCMQ?cԣm{Whϡ0#MC v.S :Q?b;!L cas/ &]g/\R)X->O:g:`C11^iNW(hr88B@}iǨO#kX`Ff+K`JzBٴ#2 wiocLl[:3B/B0#ߴ|vic" hq7qXO U6S| m?ݑ2,Xt;hsiQρ VjC 7d0P J3y!$ڵi-~p"7pTiP/1[&67 H%P؉_F~Ƃ`6w Cϝmn37&-@״`C=cmPգ} G"|Q9*y@hc6ݪ4/ ~~nAʀP_zY*-0-m_ ~^p\=a  CV:R1c6ҎQOC*f)@!3I D[FDW^&U#7z\| v]=vGM] -?B{Hovc3e+~M^:&0PTZ D+I33M&k+ٮOHWJ8y 3m1?B]T$&f:OB. *(إpjW~F{ mQ7K@nQloA(y2+?EŨ~4bR[p%䕙m,;gϗ^,oU\t25mU`,?8<|(h:@%%ٯo O ۬7=t"]ͮ5uýmņ#hXHKƟ@G3gc%+<\-}CǤW)ϐtN9su8;Jȴk e=\ טpl4ܼHL6;UASN>^-'rxYv׵{޽k{ /xrpxpp~ן*ZnCi }3!L*%*RQY&U"0j! Sa>I.q#,Yk#l&H?zvt;{%9ḓiF|4r)'KNu­%VIW}:ЩKwuLqI 6vƵf~%VV%% _jj.0&訁>ƞTjgvOXS'K7669fo;-]9 {15v3UTa$ 3cZ J(eØ ҽl8eϸfun|S-)eﭸj9M%3<)ϗf\۝/%{yqV% GN¬ćJ4+hP,8n*ɚm0:`2 lp~FJ JꯑtmTFZ.%-X׸Yrl|ɖ9/Z? r}Ûgt W'gg0zSԘ@!+98}7x*5(u GSD~3i^|erO~.1޲\~‚-)5WE鳖s*^mѳ: l@QC <ƢI9'!wP}J%4T) 7M_RH&5L?||qrw4lTww+/\|kynϣ[y#0%cVJw:40tX"OXY J|#%~b1 L;xKln?lx v8k>#6V-/7~zbT?w{nl~δվ{ m,pgo5~x*scN8 bayestestR/tests/0000755000175000017500000000000014124623562013700 5ustar nileshnileshbayestestR/tests/testthat/0000755000175000017500000000000014137247742015546 5ustar nileshnileshbayestestR/tests/testthat/test-simulate_data.R0000644000175000017500000000137314006425552021456 0ustar nileshnileshtest_that("simulate_correlation", { set.seed(333) data <- simulate_correlation(r = 0.5, n = 50) expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tolerance = 0.001) data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tolerance = 0.001) expect_equal(c(mean(data$V1), sd(data$V1)), c(0, 0.7), tolerance = 0.001) expect_equal(c(mean(data$V2), sd(data$V2)), c(1, 1.7), tolerance = 0.001) cor_matrix <- matrix(c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix) expect_equal(matrix(cor(data), nrow = 3), cor_matrix, tolerance = 0.001) }) bayestestR/tests/testthat/test-hdi.R0000644000175000017500000000435514030221266017402 0ustar nileshnileshif (require("rstanarm") && require("brms") && require("httr") && require("testthat") && require("BayesFactor")) { # numeric ------------------------------- test_that("hdi", { expect_equal(hdi(distribution_normal(1000), ci = .90)$CI_low[1], -1.64, tolerance = 0.02) expect_equal(nrow(hdi(distribution_normal(1000), ci = c(.80, .90, .95))), 3, tolerance = 0.01) expect_equal(hdi(distribution_normal(1000), ci = 1)$CI_low[1], -3.09, tolerance = 0.02) expect_equal(nchar(capture.output(print(hdi(distribution_normal(1000))))), 22) expect_equal(length(capture.output(print(hdi(distribution_normal(1000), ci = c(.80, .90))))), 5) expect_warning(hdi(c(2, 3, NA))) expect_warning(hdi(c(2, 3))) expect_warning(hdi(distribution_normal(1000), ci = 0.0000001)) expect_warning(hdi(distribution_normal(1000), ci = 950)) expect_warning(hdi(c(distribution_normal(1000, 0, 1), distribution_normal(1000, 6, 1), distribution_normal(1000, 12, 1)), ci = .10)) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("insight")) { # stanreg --------------------------- m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("ci", { expect_equal( hdi(m, ci = c(.5, .8), effects = "all")$CI_low, hdi(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) # brms --------------------------- m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { expect_equal( hdi(m, ci = c(.5, .8), effects = "all", component = "all")$CI_low, hdi(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) # BayesFactor --------------------------- mod_bf <- proportionBF(y = 15, N = 25, p = .5) p_bf <- insight::get_parameters(mod_bf) test_that("ci - BayesFactor", { expect_equal( hdi(mod_bf, ci = c(.5, .8), effects = "all", component = "all")$CI_low, hdi(p_bf, ci = c(.5, .8))$CI_low, tolerance = 0.1 ) }) } } } bayestestR/tests/testthat/test-check_prior.R0000644000175000017500000000255714133140641021131 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest && require("rstanarm") && require("testthat") && require("bayestestR") && require("brms")) { skip_on_cran() # stanreg -------------------------- set.seed(333) model1 <- insight::download_model("stanreg_lm_1") expect_equal( check_prior(model1)$Prior_Quality, c("informative", "uninformative") ) expect_equal( check_prior(model1, method = "lakeland")$Prior_Quality, c("informative", "informative") ) # brms ----------------------------- # all `brms` examples in circus have uniform prior distribution, so # need to use a custom example here set.seed(333) model2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2, silent = TRUE, refresh = 0 ) expect_equal( check_prior(model2)$Prior_Quality, c( "uninformative", "informative", "informative", "uninformative", "uninformative", "not determinable", "not determinable", "not determinable" ) ) expect_equal( check_prior(model2, method = "lakeland")$Prior_Quality, c( "informative", "misinformative", "informative", "informative", "informative", "not determinable", "not determinable", "not determinable" ) ) } bayestestR/tests/testthat/test-point_estimate.R0000644000175000017500000000134514006425552021665 0ustar nileshnileshif (require("rstanarm", quietly = TRUE) && require("brms", quietly = TRUE)) { if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("point_estimate", { expect_equal( point_estimate(m, effects = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("point_estimate", { expect_equal( point_estimate(m, effects = "all", component = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) } } bayestestR/tests/testthat/test-emmGrid.R0000644000175000017500000001551414054643373020236 0ustar nileshnileshif (require("rstanarm") && require("testthat") && require("bayestestR") && require("emmeans")) { set.seed(300) model <- stan_glm(extra ~ group, data = sleep, refresh = 0) em_ <- emmeans(model, ~group) c_ <- pairs(em_) emc_ <- emmeans(model, pairwise ~ group) all_ <- rbind(em_, c_) all_summ <- summary(all_) set.seed(4) model_p <- unupdate(model, verbose = FALSE) set.seed(300) # estimate + hdi ---------------------------------------------------------- test_that("emmGrid hdi", { xhdi <- hdi(all_, ci = 0.95) expect_equal(xhdi$CI_low, all_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, all_summ$upper.HPD, tolerance = 0.1) xhdi2 <- hdi(emc_, ci = 0.95) expect_equal(xhdi$CI_low, xhdi2$CI_low) }) test_that("emmGrid point_estimate", { xpest <- point_estimate(all_, centrality = "all", dispersion = TRUE) expect_equal(xpest$Median, all_summ$emmean, tolerance = 0.1) xpest2 <- point_estimate(emc_, centrality = "all", dispersion = TRUE) expect_equal(xpest$Median, xpest2$Median) }) # Basics ------------------------------------------------------------------ test_that("emmGrid ci", { xci <- ci(all_, ci = 0.9) expect_equal(length(xci$CI_low), 3) expect_equal(length(xci$CI_high), 3) }) # test_that("emmGrid eti", { # xeti <- eti(all_, ci = 0.9) # expect_equal(length(xeti$CI_low), 3) # expect_equal(length(xeti$CI_high), 3) # }) test_that("emmGrid equivalence_test", { xeqtest <- equivalence_test(all_, ci = 0.9, range = c(-0.1, 0.1)) expect_equal(length(xeqtest$ROPE_Percentage), 3) expect_equal(length(xeqtest$ROPE_Equivalence), 3) }) test_that("emmGrid estimate_density", { xestden <- estimate_density(c_, method = "logspline", precision = 5) expect_equal(length(xestden$x), 5) }) test_that("emmGrid map_estimate", { xmapest <- map_estimate(all_, method = "kernel") expect_equal(length(xmapest$MAP_Estimate), 3) }) test_that("emmGrid p_direction", { xpd <- p_direction(all_, method = "direct") expect_equal(length(xpd$pd), 3) }) test_that("emmGrid p_map", { xpmap <- p_map(all_, precision = 2^9) expect_equal(length(xpmap$p_MAP), 3) }) test_that("emmGrid p_rope", { xprope <- p_rope(all_, range = c(-0.1, 0.1)) expect_equal(length(xprope$p_ROPE), 3) }) test_that("emmGrid p_significance", { xsig <- p_significance(all_, threshold = c(-0.1, 0.1)) expect_equal(length(xsig$ps), 3) }) test_that("emmGrid rope", { xrope <- rope(all_, range = "default", ci = .9) expect_equal(length(xrope$ROPE_Percentage), 3) }) # describe_posterior ------------------------------------------------------ test_that("emmGrid describe_posterior", { expect_equal( describe_posterior(all_)$median, describe_posterior(emc_)$median ) expect_equal( describe_posterior(all_, bf_prior = model_p, test = "bf")$BF, describe_posterior(emc_, bf_prior = model_p, test = "bf")$BF ) }) # BFs --------------------------------------------------------------------- test_that("emmGrid bayesfactor_parameters", { skip_on_cran() set.seed(4) expect_equal( bayesfactor_parameters(all_, prior = model, verbose = FALSE), bayesfactor_parameters(all_, prior = model_p) ) emc_p <- emmeans(model_p, pairwise ~ group) xbfp <- bayesfactor_parameters(all_, prior = model_p) xbfp2 <- bayesfactor_parameters(emc_, prior = model_p) xbfp3 <- bayesfactor_parameters(emc_, prior = emc_p) expect_equal(xbfp$BF, xbfp2$BF) expect_equal(xbfp$BF, xbfp3$BF) expect_warning(bayesfactor_parameters(all_)) # error - cannot deal with regrid / transform expect_error(bayesfactor_parameters(regrid(all_), prior = model)) }) test_that("emmGrid bayesfactor_restricted", { skip_on_cran() skip_on_ci() set.seed(4) hyps <- c("`1` < `2`", "`1` < 0") xrbf <- bayesfactor_restricted(em_, prior = model_p, hypothesis = hyps) expect_equal(length(xrbf$log_BF), 2) expect_equal(length(xrbf$p_prior), 2) expect_equal(length(xrbf$p_posterior), 2) expect_warning(bayesfactor_restricted(em_, hypothesis = hyps)) xrbf2 <- bayesfactor_restricted(emc_, prior = model_p, hypothesis = hyps) expect_equal(xrbf, xrbf2) }) test_that("emmGrid si", { skip_on_cran() set.seed(4) xrsi <- si(all_, prior = model_p) expect_equal(length(xrsi$CI_low), 3) expect_equal(length(xrsi$CI_high), 3) xrsi2 <- si(emc_, prior = model_p) expect_equal(xrsi$CI_low, xrsi2$CI_low) expect_equal(xrsi$CI_high, xrsi2$CI_high) }) # For non linear models --------------------------------------------------- set.seed(333) df <- data.frame( G = rep(letters[1:3], each = 2), Y = rexp(6) ) fit_bayes <- stan_glm(Y ~ G, data = df, family = Gamma(link = "identity"), refresh = 0 ) fit_bayes_prior <- unupdate(fit_bayes, verbose = FALSE) bayes_sum <- emmeans(fit_bayes, ~G) bayes_sum_prior <- emmeans(fit_bayes_prior, ~G) # test_that("emmGrid bayesfactor_restricted2", { # skip_on_travis() # skip_on_cran() # skip_on_ci() # # hyps <- c("a < b", "b < c") # xrbf1 <- bayesfactor_restricted(bayes_sum, fit_bayes, hypothesis = hyps) # xrbf2 <- bayesfactor_restricted(bayes_sum, bayes_sum_prior, hypothesis = hyps) # # expect_equal(xrbf1, xrbf2, tolerance = 0.1) # }) test_that("emmGrid bayesfactor_parameters", { set.seed(333) xsdbf1 <- bayesfactor_parameters(bayes_sum, prior = fit_bayes) xsdbf2 <- bayesfactor_parameters(bayes_sum, prior = bayes_sum_prior) expect_equal(xsdbf1$log_BF, xsdbf2$log_BF, tolerance = 0.1) }) # link vs response test_that("emmGrid bayesfactor_parameters / describe w/ nonlinear models", { skip_on_cran() model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0 ) probs <- emmeans(model, "mpg", type = "resp") link <- emmeans(model, "mpg") probs_summ <- summary(probs) link_summ <- summary(link) xhdi <- hdi(probs, ci = 0.95) xpest <- point_estimate(probs, centrality = "median", dispersion = TRUE) expect_equal(xhdi$CI_low, probs_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, probs_summ$upper.HPD, tolerance = 0.1) expect_equal(xpest$Median, probs_summ$prob, tolerance = 0.1) xhdi <- hdi(link, ci = 0.95) xpest <- point_estimate(link, centrality = "median", dispersion = TRUE) expect_equal(xhdi$CI_low, link_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, link_summ$upper.HPD, tolerance = 0.1) expect_equal(xpest$Median, link_summ$emmean, tolerance = 0.1) }) } bayestestR/tests/testthat/test-rope.R0000644000175000017500000001017014133230441017572 0ustar nileshnileshif (suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("testthat", quietly = TRUE) && require("rstanarm", quietly = TRUE) && require("brms", quietly = TRUE)) { test_that("rope", { expect_equal(as.numeric(rope(distribution_normal(1000, 0, 1), verbose = FALSE)), 0.084, tolerance = 0.01) expect_equal(equivalence_test(distribution_normal(1000, 0, 1))$ROPE_Equivalence, "Undecided") expect_equal(length(capture.output(print(equivalence_test(distribution_normal(1000))))), 9) expect_equal(length(capture.output(print(equivalence_test(distribution_normal(1000), ci = c(0.8, 0.9) )))), 14) expect_equal(as.numeric(rope(distribution_normal(1000, 2, 0.01), verbose = FALSE)), 0, tolerance = 0.01) expect_equal(equivalence_test(distribution_normal(1000, 2, 0.01))$ROPE_Equivalence, "Rejected") expect_equal(as.numeric(rope(distribution_normal(1000, 0, 0.001), verbose = FALSE)), 1, tolerance = 0.01) expect_equal(equivalence_test(distribution_normal(1000, 0, 0.001))$ROPE_Equivalence, "Accepted") expect_equal(equivalence_test(distribution_normal(1000, 0, 0.001), ci = 1)$ROPE_Equivalence, "Accepted") # print(rope(rnorm(1000, mean = 0, sd = 3), ci = .5)) expect_equal(rope(rnorm(1000, mean = 0, sd = 3), ci = c(.1, .5, .9), verbose = FALSE)$CI, c(.1, .5, .9)) x <- equivalence_test(distribution_normal(1000, 1, 1), ci = c(.50, .99)) expect_equal(x$ROPE_Percentage[2], 0.0494, tolerance = 0.01) expect_equal(x$ROPE_Equivalence[2], "Undecided") expect_error(rope(distribution_normal(1000, 0, 1), range = c(0.0, 0.1, 0.2))) set.seed(333) expect_s3_class(rope(distribution_normal(1000, 0, 1), verbose = FALSE), "rope") expect_error(rope(distribution_normal(1000, 0, 1), range = c("A", 0.1))) expect_equal(as.numeric(rope(distribution_normal(1000, 0, 1), range = c(-0.1, 0.1) )), 0.084, tolerance = 0.01 ) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("rope", { expect_equal( # fix range to -.1/.1, to compare to data frame method rope(m, range = c(-.1, .1), effects = "all", verbose = FALSE)$ROPE_Percentage, rope(p, verbose = FALSE)$ROPE_Percentage, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { expect_equal( rope(m, effects = "all", component = "all", verbose = FALSE)$ROPE_Percentage, rope(p, verbose = FALSE)$ROPE_Percentage, tolerance = 1e-3 ) }) } } } .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" # if (.runThisTest && require("brms", quietly = TRUE)) { # set.seed(123) # model <- brm(mpg ~ wt + gear, data = mtcars, iter = 500) # rope <- rope(model, verbose = FALSE) # # test_that("rope (brms)", { # expect_equal(rope$ROPE_high, -rope$ROPE_low, tolerance = 0.01) # expect_equal(rope$ROPE_high[1], 0.6026948) # expect_equal(rope$ROPE_Percentage, c(0.00, 0.00, 0.50), tolerance = 0.1) # }) # # model <- brm(mvbind(mpg, disp) ~ wt + gear, data = mtcars, iter = 500) # rope <- rope(model, verbose = FALSE) # # test_that("rope (brms, multivariate)", { # expect_equal(rope$ROPE_high, -rope$ROPE_low, tolerance = 0.01) # expect_equal(rope$ROPE_high[1], 0.6026948, tolerance = 0.01) # expect_equal(rope$ROPE_high[4], 12.3938694, tolerance = 0.01) # expect_equal( # rope$ROPE_Percentage, # c(0, 0, 0.493457, 0.072897, 0, 0.508411), # tolerance = 0.1 # ) # }) # } if (require("BayesFactor", quietly = TRUE)) { mods <- regressionBF(mpg ~ am + cyl, mtcars, progress = FALSE) rx <- suppressMessages(rope(mods, verbose = FALSE)) expect_equal(rx$ROPE_high, -rx$ROPE_low, tolerance = 0.01) expect_equal(rx$ROPE_high[1], 0.6026948, tolerance = 0.01) } bayestestR/tests/testthat/test-estimate_density.R0000644000175000017500000000130614006425552022210 0ustar nileshnileshif (require("logspline") && require("KernSmooth") && require("mclust")) { test_that("estimate_density", { set.seed(333) x <- distribution_normal(500, 1) # Methods density_kernel <- estimate_density(x, method = "kernel") density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") expect_equal(mean(density_kernel$y - density_logspline$y), 0, tolerance = 0.1) expect_equal(mean(density_kernel$y - density_KernSmooth$y), 0, tolerance = 0.1) expect_equal(mean(density_kernel$y - density_mixture$y), 0, tolerance = 0.1) }) } bayestestR/tests/testthat/test-rstanarm.R0000644000175000017500000001061614030221266020462 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("rstanarm") && suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("httr") && require("insight")) { test_that("rstanarm", { skip_on_cran() set.seed(333) model <- insight::download_model("stanreg_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_meanfield_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_fullrank_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_lmerMod_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.097, tolerance = 0.1) model <- insight::download_model("stanreg_glm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.18, tolerance = 0.1) model <- insight::download_model("stanreg_merMod_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.18, tolerance = 0.1) model <- insight::download_model("stanreg_gamm4_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.043, tolerance = 0.1) model <- insight::download_model("stanreg_gam_1") params <- describe_posterior(model, centrality = "all", test = "all", dispersion = TRUE) expect_equal(c(nrow(params), ncol(params)), c(4, 22)) expect_s3_class(hdi(model), "data.frame") expect_s3_class(ci(model), "data.frame") expect_s3_class(rope(model, verbose = FALSE), "data.frame") # expect_true("equivalence_test" %in% class(equivalence_test(model))) expect_s3_class(map_estimate(model), "data.frame") expect_s3_class(p_map(model), "data.frame") expect_s3_class(p_direction(model), "data.frame") # expect_error(equivalence_test(model, range = c(.1, .3, .5))) # print(equivalence_test(model, ci = c(.1, .3, .5))) }) test_that("rstanarm", { skip_on_cran() set.seed(333) model <- insight::download_model("stanreg_glm_3") out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean") s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s[1:4, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[1:4, 8, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() set.seed(333) model <- insight::download_model("stanreg_merMod_3") out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean") s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Effects", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s[1:8, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[1:8, 8, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() set.seed(333) model <- insight::download_model("stanmvreg_1") out <- describe_posterior(model, effects = "fixed", components = "all", centrality = "mean", test = NULL) s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Response", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS" )) expect_equal(as.vector(s[c(1:2, 5:7), 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[c(1:2, 5:7), 10, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() set.seed(333) model <- insight::download_model("stanmvreg_1") out <- describe_posterior(model, effects = "fixed", components = "all", centrality = "mean", test = NULL, priors = TRUE) expect_identical(colnames(out), c( "Parameter", "Response", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS", "Prior_Distribution", "Prior_Location", "Prior_Scale" )) expect_equal(nrow(out), 5) }) } } bayestestR/tests/testthat/test-blavaan.R0000644000175000017500000000604714133140641020243 0ustar nileshnileshif (suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("testthat")) { test_that("blavaan, all", { skip_on_cran() skip_if_not_installed("blavaan") skip_if_not_installed("lavaan") skip_if_not_installed("cmdstanr") require(blavaan) data("PoliticalDemocracy", package = "lavaan") model <- " # latent variable definitions dem60 =~ y1 + a*y2 dem65 =~ y5 + a*y6 # regressions dem65 ~ dem60 # residual correlations y1 ~~ y5 " model2 <- " # latent variable definitions dem60 =~ y1 + a*y2 dem65 =~ y5 + a*y6 # regressions dem65 ~ 0*dem60 # residual correlations y1 ~~ 0*y5 " suppressWarnings(capture.output({ bfit <- blavaan::bsem(model, data = PoliticalDemocracy, n.chains = 1, burnin = 50, sample = 100 ) bfit2 <- blavaan::bsem(model2, data = PoliticalDemocracy, n.chains = 1, burnin = 50, sample = 100 ) })) x <- point_estimate(bfit, centrality = "all", dispersion = TRUE) expect_true(all(c("Median", "MAD", "Mean", "SD", "MAP", "Component") %in% colnames(x))) expect_equal(nrow(x), 14) x <- eti(bfit) expect_equal(nrow(x), 14) x <- hdi(bfit) expect_equal(nrow(x), 14) x <- p_direction(bfit) expect_equal(nrow(x), 14) x <- rope(bfit, range = c(-.1, .1)) expect_equal(nrow(x), 14) x <- p_rope(bfit, range = c(-.1, .1)) expect_equal(nrow(x), 14) x <- p_map(bfit) expect_equal(nrow(x), 14) x <- p_significance(bfit, threshold = c(-.1, .1)) expect_equal(nrow(x), 14) x <- equivalence_test(bfit, range = c(-.1, .1)) expect_equal(nrow(x), 14) x <- estimate_density(bfit) expect_equal(length(unique(x$Parameter)), 14) ## Bayes factors ---- expect_warning(bayesfactor_models(bfit, bfit2)) x <- suppressWarnings(bayesfactor_models(bfit, bfit2)) expect_true(x$log_BF[2] < 0) expect_warning(weighted_posteriors(bfit, bfit2)) x <- suppressWarnings(weighted_posteriors(bfit, bfit2)) expect_equal(ncol(x), 14) # bfit_prior <- unupdate(bfit) # capture.output(x <- bayesfactor_parameters(bfit, prior = bfit_prior)) # expect_equal(nrow(x), 14) # # x <- expect_warning(si(bfit, prior = bfit_prior)) # expect_equal(nrow(x), 14) # # ## Prior/posterior checks ---- # suppressWarnings(x <- check_prior(bfit)) # expect_equal(nrow(x), 13) # # x <- check_prior(bfit, simulate_priors = FALSE) # expect_equal(nrow(x), 14) x <- diagnostic_posterior(bfit) expect_equal(nrow(x), 14) x <- simulate_prior(bfit) expect_equal(ncol(x), 13) # YES this is 13! We have two parameters with the same prior. x <- describe_prior(bfit) expect_equal(nrow(x), 13) # YES this is 13! We have two parameters with the same prior. # x <- describe_posterior(bfit, test = "all", rope_range = c(-.1, .1)) # expect_equal(nrow(x), 14) }) } bayestestR/tests/testthat/test-p_significance.R0000644000175000017500000000217714054317545021613 0ustar nileshnileshif (suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("rstanarm", quietly = TRUE) && require("testthat", quietly = TRUE)) { test_that("p_significance", { # numeric set.seed(333) x <- bayestestR::distribution_normal(10000, 1, 1) ps <- bayestestR::p_significance(x) expect_equal(as.numeric(ps), 0.816, tolerance = 0.1) expect_equal(nrow(p_significance(data.frame(replicate(4, rnorm(100))))), 4) expect_s3_class(ps, "p_significance") expect_equal(tail(capture.output(print(ps)), 1), "Practical Significance (threshold: 0.10): 0.82") }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("insight")) { # stanreg m <- insight::download_model("stanreg_merMod_5") expect_equal( p_significance(m, effects = "all")$ps[1], 0.99, tolerance = 1e-2 ) # brms m2 <- insight::download_model("brms_1") expect_equal( p_significance(m2, effects = "all")$ps, c(1.0000, 0.9985, 0.9785), tolerance = 0.01 ) } } } bayestestR/tests/testthat/test-distributions.R0000644000175000017500000000274614006425552021551 0ustar nileshnileshtest_that("distributions", { expect_equal(mean(distribution_normal(10)), 0, tolerance = 0.01) expect_equal(length(distribution_normal(10, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_beta(10, 1, 1)), 0.5, tolerance = 0.01) expect_equal(length(distribution_normal(10, 1, 1, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_binomial(10, 0, 0.5)), 0, tolerance = 0.01) expect_equal(length(distribution_binomial(10, 0, 0.5, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_cauchy(10)), 0, tolerance = 0.01) expect_equal(length(distribution_cauchy(10, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_chisquared(10, 1)), 0.778, tolerance = 0.01) expect_equal(length(distribution_chisquared(10, 1, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_gamma(10, 1)), 0.874, tolerance = 0.01) expect_equal(length(distribution_gamma(10, 1, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_poisson(10)), 0.8, tolerance = 0.01) expect_equal(length(distribution_poisson(10, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_student(10, 1)), 0, tolerance = 0.01) expect_equal(length(distribution_student(10, 1, random = TRUE)), 10, tolerance = 0.01) expect_equal(mean(distribution_uniform(10)), 0.5, tolerance = 0.01) expect_equal(length(distribution_uniform(10, random = TRUE)), 10, tolerance = 0.01) }) bayestestR/tests/testthat/test-different_models.R0000644000175000017500000000446114034712166022155 0ustar nileshnileshosx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (!osx && require("rstanarm", quietly = TRUE)) { test_that("insight::get_predicted", { x <- insight::get_predicted(rstanarm::stan_glm(hp ~ mpg, data = mtcars, iter = 500, refresh = 0)) rez <- point_estimate(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 4)) rez <- hdi(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 4)) rez <- eti(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 4)) rez <- ci(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 4)) rez <- map_estimate(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 2)) rez <- p_direction(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 2)) # rez <- p_map(x) # expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) # # rez <- p_significance(x) # expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) # # rez <- rope(x) # expect_equal(c(nrow(rez), ncol(rez)), c(2, 5)) rez <- describe_posterior(x) expect_equal(c(nrow(rez), ncol(rez)), c(32, 5)) # rez <- estimate_density(x) # expect_equal(c(nrow(rez), ncol(rez)), c(2048, 3)) }) } if (!osx && require("bayesQR", quietly = TRUE)) { test_that("bayesQR", { x <- bayesQR::bayesQR(Sepal.Length ~ Petal.Width, data = iris, quantile = 0.1, alasso = TRUE, ndraw = 500) rez <- p_direction(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) rez <- p_map(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) rez <- p_significance(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) rez <- rope(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 5)) rez <- hdi(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 4)) rez <- eti(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 4)) rez <- map_estimate(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 2)) rez <- point_estimate(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 4)) rez <- describe_posterior(x) expect_equal(c(nrow(rez), ncol(rez)), c(2, 10)) rez <- estimate_density(x) expect_equal(c(nrow(rez), ncol(rez)), c(2048, 3)) }) } bayestestR/tests/testthat/test-p_map.R0000644000175000017500000000224014006425552017730 0ustar nileshnileshif (requireNamespace("rstanarm", quietly = TRUE)) { test_that("p_map", { expect_equal(as.numeric(p_map(distribution_normal(1000))), 1, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 1, 1))), 0.62, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 2, 1))), 0.15, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 3, 0.01))), 0, tolerance = 0.1) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("p_map", { expect_equal( p_map(m, effects = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 0.1 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("p_map", { expect_equal( p_map(m, effects = "all", component = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 0.1 ) }) } } } bayestestR/tests/testthat/test-si.R0000644000175000017500000000342314054317545017260 0ustar nileshnileshif (require("rstanarm") && suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("testthat") && require("emmeans")) { test_that("si.numeric", { set.seed(333) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) res <- si(posterior, prior) expect_equal(res$CI_low, 0.03999124, tolerance = 0.02) expect_equal(res$CI_high, 1.053103, tolerance = 0.02) expect_s3_class(res, c("bayestestR_si")) res <- si(posterior, prior, BF = 3) expect_equal(res$CI_low, 0.333, tolerance = 0.02) expect_equal(res$CI_high, 0.759, tolerance = 0.02) res <- si(posterior, prior, BF = 100) expect_true(all(is.na(res$CI_low))) expect_true(all(is.na(res$CI_high))) res <- si(posterior, prior, BF = c(1 / 3, 1, 3)) expect_equal(res$CI, c(1 / 3, 1, 3), tolerance = 0.02) expect_equal(res$CI_low, c(-0.119, 0.039, 0.333), tolerance = 0.02) expect_equal(res$CI_high, c(1.213, 1.053, 0.759), tolerance = 0.02) }) test_that("si.rstanarm", { skip_on_cran() data(sleep) contrasts(sleep$group) <- contr.orthonorm # See vignette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep, refresh = 0) set.seed(333) stan_model_p <- update(stan_model, prior_PD = TRUE) res1 <- si(stan_model, stan_model_p, verbose = FALSE) set.seed(333) res2 <- si(stan_model, verbose = FALSE) expect_s3_class(res1, c("bayestestR_si")) expect_equal(res1, res2) set.seed(123) group_diff <- pairs(emmeans(stan_model, ~group)) res3 <- si(group_diff, prior = stan_model) expect_equal(res3$CI_low, -2.746, tolerance = 0.3) expect_equal(res3$CI_high, -0.4, tolerance = 0.3) }) } bayestestR/tests/testthat/test-bayesfactor_parameters.R0000644000175000017500000000666014133140641023365 0ustar nileshnileshif (require("rstanarm") && require("BayesFactor") && require("testthat") && require("insight") && require("httr") && require("brms")) { # bayesfactor_parameters data frame --------------------------------------- test_that("bayesfactor_parameters data frame", { Xprior <- data.frame( x = distribution_normal(1e4), y = distribution_normal(1e4) ) Xposterior <- data.frame( x = distribution_normal(1e4, mean = 0.5), y = distribution_normal(1e4, mean = -0.5) ) # point bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 0) expect_equal(bfsd$log_BF, c(0.12, 0.12), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 1) expect_equal(bfsd$log_BF, c(0.44, -0.35), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = -1) expect_equal(bfsd$log_BF, c(-0.35, 0.44), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0.5, direction = 0) expect_equal(bfsd$log_BF, c(-0.12, 0.37), tolerance = 0.1) expect_warning(bfsd <- bayestestR::bayesfactor_parameters(Xposterior)) expect_equal(bfsd$log_BF, c(0, 0), tolerance = 0.1) # interval bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = 0) expect_equal(bfsd$log_BF, c(0.13, 0.13), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = 1) expect_equal(bfsd$log_BF, c(0.47, -0.39), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = -1) expect_equal(bfsd$log_BF, c(-0.39, 0.47), tolerance = 0.1) # interval with inf bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, Inf)) expect_equal(bfsd$log_BF, c(-0.81, 0.80), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-Inf, .1)) expect_equal(bfsd$log_BF, c(0.80, -0.81), tolerance = 0.1) }) # bayesfactor_parameters RSTANARM ----------------------------------------- test_that("bayesfactor_parameters RSTANARM", { skip_on_cran() fit <- stan_glm(mpg ~ ., data = mtcars, refresh = 0) set.seed(333) fit_p <- unupdate(fit) BF2 <- bayesfactor_parameters(fit, fit_p) set.seed(333) BF1 <- bayesfactor_parameters(fit) expect_equal(BF1, BF2) model_flat <- stan_glm(extra ~ group, data = sleep, prior = NULL, refresh = 0) expect_error(bayesfactor_parameters(model_flat)) }) # bayesfactor_parameters BRMS --------------------------------------------- # .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" # if (.runThisTest) { # test_that("bayesfactor_parameters BRMS", { # skip_on_cran() # # brms_mixed_6 <- insight::download_model("brms_mixed_6") # # set.seed(222) # brms_mixed_6_p <- unupdate(brms_mixed_6) # bfsd1 <- bayesfactor_parameters(brms_mixed_6, brms_mixed_6_p, effects = "fixed") # # set.seed(222) # bfsd2 <- bayesfactor_parameters(brms_mixed_6, effects = "fixed") # # expect_equal(log(bfsd1$BF), log(bfsd2$BF), tolerance = .11) # # # brms_mixed_1 <- insight::download_model("brms_mixed_1") # expect_error(bayesfactor_parameters(brms_mixed_1)) # }) # } } bayestestR/tests/testthat/test-p_direction.R0000644000175000017500000000266014023526535021143 0ustar nileshnileshif (require("testthat", quietly = TRUE) && suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("rstanarm", quietly = TRUE) && require("brms", quietly = TRUE)) { test_that("p_direction", { set.seed(333) x <- bayestestR::distribution_normal(10000, 1, 1) pd <- bayestestR::p_direction(x) expect_equal(as.numeric(pd), 0.842, tolerance = 0.1) expect_equal(as.numeric(p_direction(x, method = "kernel")), 0.842, tolerance = 0.1) expect_equal(nrow(p_direction(data.frame(replicate(4, rnorm(100))))), 4) expect_s3_class(pd, "p_direction") expect_equal(tail(capture.output(print(pd)), 1), "Probability of Direction: 0.84") }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("p_direction", { expect_equal( p_direction(m, effects = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("p_direction", { expect_equal( p_direction(m, effects = "all", component = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) }) } } } bayestestR/tests/testthat/test-overlap.R0000644000175000017500000000030514006425552020304 0ustar nileshnileshtest_that("overlap", { set.seed(333) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) expect_equal(as.numeric(overlap(x, y)), 0.185, tolerance = 0.01) }) bayestestR/tests/testthat/test-rope_range.R0000644000175000017500000000150214133230367020754 0ustar nileshnileshtest_that("rope_range", { x <- cor.test(ToothGrowth$len, ToothGrowth$dose) expect_equal( rope_range(x), c(-0.05, 0.05) ) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" # if (.runThisTest && require("brms", quietly = TRUE)) { # test_that("rope_range", { # model <- brm(mpg ~ wt + gear, data = mtcars, iter = 300) # # expect_equal( # rope_range(model), # c(-0.6026948, 0.6026948), # tolerance = 0.01 # ) # }) # # test_that("rope_range (multivariate)", { # model <- brm(mvbind(mpg, disp) ~ wt + gear, data = mtcars, iter = 300) # # expect_equal( # rope_range(model), # list( # mpg = c(-0.602694, 0.602694), # disp = c(-12.393869, 12.393869) # ), # tolerance = 0.01 # ) # }) # } bayestestR/tests/testthat/test-describe_posterior.R0000644000175000017500000004216514133726427022542 0ustar nileshnileshif (require("testthat") && suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("rstanarm") && require("brms") && require("httr") && require("insight") && require("BayesFactor", quietly = TRUE)) { test_that("describe_posterior", { set.seed(333) # numeric ------------------------------------------------- x <- distribution_normal(1000) expect_warning(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = 0.89 )) rez <- as.data.frame(suppressWarnings(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = 0.89 ))) expect_equal(dim(rez), c(1, 19)) expect_equal(colnames(rez), c( "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_map", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "log_BF" )) expect_warning(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) )) # rez <- suppressWarnings(describe_posterior( # x, # centrality = "all", # dispersion = TRUE, # test = "all", # ci = c(0.8, 0.9) # )) # expect_equal(dim(rez), c(2, 19)) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile" ) expect_equal(dim(rez), c(1, 4)) # dataframes ------------------------------------------------- x <- data.frame(replicate(4, rnorm(100))) expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all")) # rez <- suppressWarnings(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all")) # expect_equal(dim(rez), c(4, 19)) expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9))) # rez <- suppressWarnings(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9))) # expect_equal(dim(rez), c(8, 19)) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile") expect_equal(dim(rez), c(4, 4)) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest && Sys.info()["sysname"] != "Darwin") { test_that("describe_posterior", { set.seed(333) # Rstanarm x <- rstanarm::stan_glm(mpg ~ wt, data = mtcars, refresh = 0, iter = 500) rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") expect_equal(dim(rez), c(2, 21)) expect_equal(colnames(rez), c( "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_MAP", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "log_BF", "Rhat", "ESS" )) rez <- describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) ) expect_equal(dim(rez), c(4, 21)) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", diagnostic = NULL, priors = FALSE ) expect_equal(dim(rez), c(2, 4)) # brms ------------------------------------------------- # x <- brms::brm(mpg ~ wt + (1 | cyl) + (1 + wt | gear), data = mtcars, refresh = 0) # rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, ci = c(0.8, 0.9)) # # expect_equal(dim(rez), c(4, 16)) # expect_equal(colnames(rez), c( # "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", # "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", # "Rhat", "ESS" # )) # # rez <- describe_posterior( # x, # centrality = NULL, # dispersion = TRUE, # test = NULL, # ci_method = "quantile", # diagnostic = NULL # ) # # expect_equal(dim(rez), c(2, 4)) # # model <- brms::brm( # mpg ~ drat, # data = mtcars, # chains = 2, # algorithm = "meanfield", # refresh = 0 # ) # # expect_equal(nrow(describe_posterior(model)), 2) # rstanarm ------------------------------------------------- model <- rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "meanfield", refresh = 0 ) expect_equal(nrow(describe_posterior(model)), 2) model <- rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "optimizing", refresh = 0 ) expect_equal(nrow(describe_posterior(model)), 2) model <- rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "fullrank", refresh = 0 ) expect_equal(nrow(describe_posterior(model)), 2) # model <- brms::brm(mpg ~ drat, data = mtcars, chains=2, algorithm="fullrank", refresh=0) # expect_equal(nrow(describe_posterior(model)), 2) # BayesFactor # library(BayesFactor) # x <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) # rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") # expect_equal(dim(rez), c(4, 16)) # rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9)) # expect_equal(dim(rez), c(8, 16)) # rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method="quantile") # expect_equal(dim(rez), c(4, 4)) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("describe_posterior", { expect_equal( describe_posterior(m, effects = "all")$Median, describe_posterior(p)$Median, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("describe_posterior", { expect_equal( describe_posterior(m, effects = "all", component = "all")$Median, describe_posterior(p)$Median, tolerance = 1e-3 ) }) } test_that("describe_posterior w/ BF+SI", { skip_on_cran() x <- insight::download_model("stanreg_lm_1") set.seed(555) rez <- describe_posterior(x, ci_method = "SI", test = "bf") # test si set.seed(555) rez_si <- si(x) expect_equal(rez$CI_low, rez_si$CI_low, tolerance = 0.1) expect_equal(rez$CI_high, rez_si$CI_high, tolerance = 0.1) # test BF set.seed(555) rez_bf <- bayesfactor_parameters(x) expect_equal(rez$BF, rez_bf$BF, tolerance = 0.1) }) # BayesFactor ------------------------------------------------- if (getRversion() >= "4.0") { set.seed(123) expect_equal( as.data.frame(describe_posterior(correlationBF( mtcars$wt, mtcars$mpg, rscale = 0.5 ))), structure( list( Parameter = "rho", Median = -0.833281858269296, CI = 0.95, CI_low = -0.919418102114416, CI_high = -0.715602277241063, pd = 1, ROPE_CI = 0.95, ROPE_low = -0.05, ROPE_high = 0.05, ROPE_Percentage = 0, log_BF = 17.328704623688, BF = 33555274.5519413, Prior_Distribution = "beta", Prior_Location = 2, Prior_Scale = 2 ), row.names = 1L, class = "data.frame", ci_method = "hdi" ), tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_equal( describe_posterior(ttestBF(mtcars$wt, mu = 3), ci = 0.95), structure(list(Parameter = "Difference", Median = -0.198578438156886, CI = 0.95, CI_low = -0.535759904384745, CI_high = 0.1557581, pd = 0.858, ROPE_CI = 0.95, ROPE_low = -0.0978457442989697, ROPE_high = 0.0978457442989697, ROPE_Percentage = 0.246250986582478, log_BF = -0.949713514141272, BF = 0.386851835160946, Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.707106781186548), row.names = 1L, class = c("describe_posterior", "see_describe_posterior", "data.frame"), ci_method = "hdi", object_name = "ttestBF(mtcars$wt, mu = 3)"), tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_equal( describe_posterior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "poisson" ), ci = 0.95), structure( list( Parameter = c( "cell[1,1]", "cell[2,1]", "cell[1,2]", "cell[2,2]", "cell[1,3]", "cell[2,3]", "Ratio" ), Median = c( 3.04620767622137, 7.33170140780154, 3.96252503900368, 3.06206636495483, 10.7088156207511, 2.26008072419983, NA ), CI = c(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA), CI_low = c( 0.537476720942068, 3.33553818106395, 1.05013765177975, 0.746538992318074, 5.49894434136364, 0.275642629940081, NA ), CI_high = c( 6.62852027141624, 12.6753970192515, 7.74693313388489, 6.87239730676778, 16.9198964674968, 5.4533083861175, NA ), pd = c(1, 1, 1, 1, 1, 1, NA), ROPE_CI = c(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA), ROPE_low = c(-0.1, -0.1, -0.1, -0.1, -0.1, -0.1, NA), ROPE_high = c(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, NA), ROPE_Percentage = c(0, 0, 0, 0, 0, 0, NA), log_BF = c( 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, NA ), BF = c( 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, NA ), Prior_Distribution = c(NA, NA, NA, NA, NA, NA, "poisson"), Prior_Location = c(NA, NA, NA, NA, NA, NA, 0), Prior_Scale = c(NA, NA, NA, NA, NA, NA, 1) ), row.names = c(1L, 4L, 2L, 5L, 3L, 6L, 7L), class = c("describe_posterior", "see_describe_posterior") ), tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_equal( describe_posterior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "indepMulti", fixedMargin = "cols", priorConcentration = 1.6 ), ci = 0.95), structure( list( Parameter = c( "cell[1,1]", "cell[2,1]", "cell[1,2]", "cell[2,2]", "cell[1,3]", "cell[2,3]", "Ratio" ), Median = c( 3.33359102240953, 7.27094924961528, 4.13335763121549, 3.36172537199681, 10.3872621523407, 2.56061336771352, NA ), CI = c(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA), CI_low = c( 0.912122089726423, 3.51744611674693, 1.39218072401004, 0.923175932880601, 6.18021898129278, 0.465587711080369, NA ), CI_high = c( 6.61128887457661, 11.4058892728414, 7.61378018576518, 6.65522159416386, 15.1209075845299, 5.35853420162441, NA ), pd = c(1, 1, 1, 1, 1, 1, NA), ROPE_CI = c(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA), ROPE_low = c(-0.1, -0.1, -0.1, -0.1, -0.1, -0.1, NA), ROPE_high = c(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, NA), ROPE_Percentage = c(0, 0, 0, 0, 0, 0, NA), log_BF = c( 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, NA ), BF = c( 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, NA ), Prior_Distribution = c(NA, NA, NA, NA, NA, NA, "independent multinomial"), Prior_Location = c(NA, NA, NA, NA, NA, NA, 0), Prior_Scale = c(NA, NA, NA, NA, NA, NA, 1.6) ), row.names = c(1L, 4L, 2L, 5L, 3L, 6L, 7L), class = c("describe_posterior", "see_describe_posterior", "data.frame"), ci_method = "hdi", object_name = "contingencyTableBF(x = table(mtcars$am, mtcars$cyl), sampleType = \"indepMulti\", fixedMargin = \"cols\", priorConcentration = 1.6)" ) , tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_equal( describe_posterior(anovaBF(extra ~ group, data = sleep, progress = FALSE), ci = 0.95), structure( list( Parameter = c("mu", "group-1", "group-2", "sig2", "g_group"), Median = c( 1.53667371296145, -0.571674439385088, 0.571674439385088, 3.69268743002151, 0.349038661644431 ), CI = c(0.95, 0.95, 0.95, 0.95, 0.95), CI_low = c( 0.691696017646264, -1.31604531656452, -0.229408603643392, 1.75779899540302, 0.0192738130412634 ), CI_high = c( 2.43317955922589, 0.229408603643392, 1.31604531656452, 6.88471056133351, 5.30402785651874 ), pd = c(0.99975, 0.927, 0.927, 1, 1), ROPE_CI = c(0.95, 0.95, 0.95, 0.95, 0.95), ROPE_low = c( -0.201791972090071, -0.201791972090071, -0.201791972090071, -0.201791972090071, -0.201791972090071 ), ROPE_high = c( 0.201791972090071, 0.201791972090071, 0.201791972090071, 0.201791972090071, 0.201791972090071 ), ROPE_Percentage = c(0, 0.162325703762168, 0.162325703762168, 0, 0.346487766377269), log_BF = c( 0.235803198474248, 0.235803198474248, 0.235803198474248, 0.235803198474248, 0.235803198474248 ), BF = c( 1.26592514964916, 1.26592514964916, 1.26592514964916, 1.26592514964916, 1.26592514964916 ), Prior_Distribution = c(NA, "cauchy", "cauchy", NA, NA), Prior_Location = c(NA, 0, 0, NA, NA), Prior_Scale = c(NA, 0.5, 0.5, NA, NA) ), row.names = c(4L, 2L, 3L, 5L, 1L), class = c("describe_posterior", "see_describe_posterior", "data.frame"), ci_method = "hdi", object_name = "anovaBF(extra ~ group, data = sleep, progress = FALSE)" ), tolerance = 0.1, ignore_attr = TRUE ) } } } bayestestR/tests/testthat/test-bayesfactor_models.R0000644000175000017500000001137314054317545022515 0ustar nileshnileshif (suppressPackageStartupMessages(require("bayestestR", quietly = TRUE)) && require("testthat")) { # bayesfactor_models BIC -------------------------------------------------- test_that("bayesfactor_models BIC", { skip_if_not_installed("lme4") set.seed(444) mo1 <- lme4::lmer(Sepal.Length ~ (1 | Species), data = iris) mo2 <- lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) mo3 <- lme4::lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) mo4 <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) mo5 <- lme4::lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) mo4_e <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris[-1, ]) # both uses of denominator BFM1 <<- bayesfactor_models(mo2, mo3, mo4, mo1, denominator = 4) BFM2 <- bayesfactor_models(mo2, mo3, mo4, denominator = mo1) BFM3 <- bayesfactor_models(mo2, mo3, mo4, mo1, denominator = mo1) BFM4 <<- bayesfactor_models(mo2, mo3, mo4, mo5, mo1, denominator = mo1) expect_equal(BFM1, BFM2) expect_equal(BFM1, BFM3) expect_equal(BFM1, bayestestR::bayesfactor_models(list(mo2 = mo2, mo3 = mo3, mo4 = mo4, mo1 = mo1), denominator = 4)) # only on same data! expect_error(bayestestR::bayesfactor_models(mo1, mo2, mo4_e)) # update models expect_equal(update(BFM2, subset = c(1, 2))$log_BF, c(1, 57.3, 54.52), tolerance = 0.1) # update reference expect_equal(update(BFM2, reference = 1)$log_BF, c(0, -2.8, -6.2, -57.4), tolerance = 0.1 ) }) test_that("bayesfactor_models BIC (unsupported / diff nobs)", { skip_on_cran() set.seed(444) fit1 <- lm(Sepal.Length ~ Sepal.Width + Petal.Length, iris) fit2a <- lm(Sepal.Length ~ Sepal.Width, iris[-1, ]) # different number of objects fit2b <- lm(Sepal.Length ~ Sepal.Width, iris) # not supported class(fit2b) <- "NOTLM" logLik.NOTLM <<- function(...) { stats:::logLik.lm(...) } # Should fail expect_error(bayesfactor_models(fit1, fit2a)) # Should warn, but still work res <- bayesfactor_models(fit1, fit2b) expect_equal(res$log_BF, c(0, -133.97), tolerance = 0.1) }) # bayesfactor_models STAN --------------------------------------------- test_that("bayesfactor_models STAN", { skip_on_cran() skip_if_not_installed("rstanarm") skip_if_not_installed("bridgesampling") set.seed(333) stan_bf_0 <- rstanarm::stan_glm( Sepal.Length ~ 1, data = iris, refresh = 0, iter = 500, diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_bf_1 <- rstanarm::stan_glm( Sepal.Length ~ Species, data = iris, refresh = 0, iter = 500, diagnostic_file = file.path(tempdir(), "df1.csv") ) set.seed(333) # compare against bridgesampling bridge_BF <- bridgesampling::bayes_factor( bridgesampling::bridge_sampler(stan_bf_1), bridgesampling::bridge_sampler(stan_bf_0) ) set.seed(333) expect_warning(stan_models <- bayesfactor_models(stan_bf_0, stan_bf_1)) expect_s3_class(stan_models, "bayesfactor_models") expect_equal(length(stan_models$log_BF), 2) expect_equal(stan_models$log_BF[2], log(bridge_BF$bf), tolerance = 0.1) }) # bayesfactor_inclusion --------------------------------------------------- test_that("bayesfactor_inclusion | BayesFactor", { set.seed(444) skip_if_not_installed("BayesFactor") # BayesFactor ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- BayesFactor::anovaBF(len ~ dose * supp, ToothGrowth) expect_equal( bayesfactor_inclusion(BF_ToothGrowth), bayesfactor_inclusion(bayesfactor_models(BF_ToothGrowth)) ) }) test_that("bayesfactor_inclusion | LMM", { # with random effects in all models: skip_if_not_installed("lme4") expect_true(is.nan(bayesfactor_inclusion(BFM1)["1:Species", "log_BF"])) bfinc_all <- bayesfactor_inclusion(BFM4, match_models = FALSE) expect_equal(bfinc_all$p_prior, c(1, 0.8, 0.6, 0.4, 0.2), tolerance = 0.1) expect_equal(bfinc_all$p_posterior, c(1, 1, 0.06, 0.01, 0), tolerance = 0.1) expect_equal(bfinc_all$log_BF, c(NaN, 56.04, -3.22, -5.9, -8.21), tolerance = 0.1) # + match_models bfinc_matched <- bayesfactor_inclusion(BFM4, match_models = TRUE) expect_equal(bfinc_matched$p_prior, c(1, 0.2, 0.6, 0.2, 0.2), tolerance = 0.1) expect_equal(bfinc_matched$p_posterior, c(1, 0.94, 0.06, 0.01, 0), tolerance = 0.1) expect_equal(bfinc_matched$log_BF, c(NaN, 57.37, -3.92, -5.25, -3.25), tolerance = 0.1) }) } bayestestR/tests/testthat/test-bayesfactor_restricted.R0000644000175000017500000000326414054317545023402 0ustar nileshnileshif (require("testthat")) { # bayesfactor_restricted data.frame --------------------------------------- test_that("bayesfactor_restricted df", { prior <- data.frame( X = distribution_normal(100), X1 = c(distribution_normal(50), distribution_normal(50)), X3 = c(distribution_normal(80), distribution_normal(20)) ) posterior <- data.frame( X = distribution_normal(100, .4, .2), X1 = distribution_normal(100, -.2, .2), X3 = distribution_normal(100, .2) ) hyps <- c( "X > X1 & X1 > X3", "X > X1" ) bfr <- bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) expect_equal(bfr$p_prior, c(0.2, 0.5), tolerance = 0.1) expect_equal(bfr$p_posterior, c(0.31, 1), tolerance = 0.1) expect_equal(bfr$log_BF, c(0.43, 0.69), tolerance = 0.1) expect_equal(exp(bfr$log_BF), bfr$p_posterior / bfr$p_prior, tolerance = 0.1) expect_error(bayesfactor_restricted(posterior, prior, hypothesis = "Y < 0")) }) # bayesfactor_restricted RSTANARM ----------------------------------------- test_that("bayesfactor_restricted RSTANARM", { skip_on_cran() skip_if_not_installed("rstanarm") suppressWarnings( fit_stan <- rstanarm::stan_glm(mpg ~ wt + cyl + am, data = mtcars, refresh = 0, iter = 200) ) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) set.seed(444) fit_p <- unupdate(fit_stan) bfr1 <- bayesfactor_restricted(fit_stan, prior = fit_p, hypothesis = hyps) set.seed(444) bfr2 <- bayesfactor_restricted(fit_stan, hypothesis = hyps) expect_equal(bfr1, bfr2) }) } bayestestR/tests/testthat/test-density_at.R0000644000175000017500000000031414006425552020777 0ustar nileshnileshtest_that("density_at", { expect_equal(density_at(distribution_normal(1000), 0), 0.389, tolerance = 0.1) expect_equal(density_at(distribution_normal(1000), c(0, 1))[1], 0.389, tolerance = 0.1) }) bayestestR/tests/testthat/test-effective_sample.R0000644000175000017500000000161714006425552022144 0ustar nileshnileshif (require("rstanarm") && require("brms") && require("insight")) { test_that("effective_sample", { brms_1 <- insight::download_model("brms_1") res <- effective_sample(brms_1) expect_equal( res, data.frame( Parameter = c("b_Intercept", "b_wt", "b_cyl"), ESS = c(5242, 2071, 1951), stringsAsFactors = F ) ) brms_null_1 <- insight::download_model("brms_null_1") res <- effective_sample(brms_null_1) expect_equal( res, data.frame( Parameter = c("b_Intercept"), ESS = c(2888), stringsAsFactors = F ) ) brms_null_2 <- insight::download_model("brms_null_2") res <- effective_sample(brms_null_2) expect_equal( res, data.frame( Parameter = c("b_Intercept"), ESS = c(1059), stringsAsFactors = F ) ) }) } bayestestR/tests/testthat/test-BFBayesFactor.R0000644000175000017500000000566114032727756021272 0ustar nileshnileshif (require("testthat") && require("BayesFactor") && suppressPackageStartupMessages(require("bayestestR", quietly = TRUE))) { set.seed(333) x <- BayesFactor::correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), 0.9225, tolerance = 1) }) # BF t.test one sample --------------------------- data(sleep) diffScores <- sleep$extra[1:10] - sleep$extra[11:20] x <- BayesFactor::ttestBF(x = diffScores) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), 0.99675, tolerance = 1) }) # BF t.test two samples --------------------------- data(chickwts) chickwts <- chickwts[chickwts$feed %in% c("horsebean", "linseed"), ] chickwts$feed <- factor(chickwts$feed) x <- BayesFactor::ttestBF(formula = weight ~ feed, data = chickwts) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), 1, tolerance = 1) }) # BF t.test meta-analytic --------------------------- t <- c(-.15, 2.39, 2.42, 2.43) N <- c(100, 150, 97, 99) x <- BayesFactor::meta.ttestBF(t = t, n1 = N, rscale = 1) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), 0.99975, tolerance = 1) }) # # --------------------------- # # "BF ANOVA" # data(ToothGrowth) # ToothGrowth$dose <- factor(ToothGrowth$dose) # levels(ToothGrowth$dose) <- c("Low", "Medium", "High") # x <- BayesFactor::anovaBF(len ~ supp*dose, data=ToothGrowth) # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # --------------------------- # # "BF ANOVA Random" # data(puzzles) # x <- BayesFactor::anovaBF(RT ~ shape*color + ID, data = puzzles, whichRandom="ID") # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # # --------------------------- # # "BF lm" # x <- BayesFactor::lmBF(len ~ supp + dose, data = ToothGrowth) # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # x2 <- BayesFactor::lmBF(len ~ supp + dose + supp:dose, data = ToothGrowth) # x <- x / x2 # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) test_that("rope_range", { x <- BayesFactor::lmBF(len ~ supp + dose, data = ToothGrowth) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10) x <- BayesFactor::ttestBF( ToothGrowth$len[ToothGrowth$supp == "OJ"], ToothGrowth$len[ToothGrowth$supp == "VC"] ) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10) x <- BayesFactor::ttestBF(formula = len ~ supp, data = ToothGrowth) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10) # else x <- BayesFactor::correlationBF(ToothGrowth$len, ToothGrowth$dose) expect_equal(rope_range(x, verbose = FALSE), c(-0.05, 0.05)) }) } bayestestR/tests/testthat/test-map_estimate.R0000644000175000017500000000260114121073153021277 0ustar nileshnileshif (require("testthat") && requireNamespace("rstanarm", quietly = TRUE)) { # numeric ---------------------- test_that("map_estimate", { expect_equal( as.numeric(map_estimate(distribution_normal(1000))), 0, tolerance = 0.01 ) }) if (require("insight") && require("BayesFactor")) { # stanreg ---------------------- m <- insight::download_model("stanreg_merMod_5") test_that("map_estimate", { expect_equal( map_estimate(m, effects = "all")$Parameter, colnames(as.data.frame(m))[1:21] ) }) # brms ---------------------- m <- insight::download_model("brms_zi_3") test_that("map_estimate", { expect_equal( map_estimate(m, effects = "all", component = "all")$Parameter, c( "b_Intercept", "b_child", "b_camper", "r_persons[1,Intercept]", "r_persons[2,Intercept]", "r_persons[3,Intercept]", "r_persons[4,Intercept]", "sd_persons__Intercept", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "r_persons__zi[1,Intercept]", "r_persons__zi[2,Intercept]", "r_persons__zi[3,Intercept]", "r_persons__zi[4,Intercept]", "sd_persons__zi_Intercept" ) ) }) # BayesFactor ------------- m <- correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) expect_error(map_estimate(m)) } } bayestestR/tests/testthat/test-ci.R0000644000175000017500000000320214006425552017226 0ustar nileshnileshif (require("rstanarm") && require("httr") && require("brms") && require("insight") && require("testthat")) { test_that("ci", { expect_equal(ci(distribution_normal(1000), ci = .90)$CI_low[1], -1.6361, tolerance = 0.02) expect_equal(nrow(ci(distribution_normal(1000), ci = c(.80, .90, .95))), 3, tolerance = 0.01) expect_equal(ci(distribution_normal(1000), ci = 1)$CI_low[1], -3.09, tolerance = 0.02) # expect_equal(length(capture.output(print(ci(distribution_normal(1000)))))) # expect_equal(length(capture.output(print(ci(distribution_normal(1000), ci = c(.80, .90)))))) expect_warning(ci(c(2, 3, NA))) expect_warning(ci(c(2, 3))) expect_warning(ci(distribution_normal(1000), ci = 950)) x <- data.frame(replicate(4, rnorm(100))) x <- ci(x, ci = c(0.68, 0.89, 0.95)) a <- reshape_ci(x) expect_equal(c(nrow(x), ncol(x)), c(12, 4)) expect_true(all(reshape_ci(a) == x)) }) .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("ci", { expect_equal( ci(m, ci = c(.5, .8), effects = "all")$CI_low, ci(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { expect_equal( ci(m, ci = c(.5, .8), effects = "all", component = "all")$CI_low, ci(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) } } bayestestR/tests/testthat/test-as.data.frame.density.R0000644000175000017500000000017614006425552022724 0ustar nileshnileshtest_that("as.data.frame.density", { expect_s3_class(as.data.frame(density(distribution_normal(1000))), "data.frame") }) bayestestR/tests/testthat/test-weighted_posteriors.R0000644000175000017500000000524214006425552022732 0ustar nileshnileshif (require("BayesFactor", quietly = TRUE)) { test_that("weighted_posteriors for BayesFactor", { skip_on_cran() set.seed(123) # compute Bayes Factor for 31 different regression models null_den <- regressionBF(mpg ~ cyl + disp + hp + drat + wt, data = mtcars, progress = FALSE ) wBF <- weighted_posteriors(null_den) expect_s3_class(wBF, "data.frame") expect_equal( attr(wBF, "weights")$weights, c( 0, 13, 9, 0, 0, 55, 11, 4, 4, 1246, 6, 2, 38, 4, 946, 12, 3, 3, 209, 3, 491, 174, 4, 134, 7, 293, 1, 123, 35, 92, 51, 27 ) ) }) test_that("weighted_posteriors for BayesFactor (intercept)", { set.seed(123) # fails for win old-release skip_on_cran() skip_on_ci() dat <- data.frame( x1 = rnorm(10), x2 = rnorm(10), y = rnorm(10) ) BFmods <- regressionBF(y ~ x1 + x2, data = dat, progress = FALSE) res <- weighted_posteriors(BFmods) expect_equal(attr(res, "weights")$weights, c(1032, 805, 1388, 775)) wHDI <- hdi(res[c("x1", "x2")], ci = 0.9) expect_equal(wHDI$CI_low, c(-0.519, -0.640), tolerance = 0.01) expect_equal(wHDI$CI_high, c(0.150, 0.059), tolerance = 0.01) }) test_that("weighted_posteriors for nonlinear BayesFactor", { set.seed(123) data(sleep) BFS <- ttestBF( x = sleep$extra[sleep$group == 1], y = sleep$extra[sleep$group == 2], nullInterval = c(-Inf, 0), paired = TRUE ) res <- weighted_posteriors(BFS) expect_equal(attributes(res)$weights$weights, c(113, 3876, 11)) }) } .runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest) { if (require("brms", quietly = TRUE)) { test_that("weighted_posteriors vs posterior_average", { skip_on_cran() fit1 <- brm(rating ~ treat + period + carry, data = inhaler, refresh = 0, save_pars = save_pars(all = TRUE) ) fit2 <- brm(rating ~ period + carry, data = inhaler, refresh = 0, save_pars = save_pars(all = TRUE) ) set.seed(444) res_BT <- weighted_posteriors(fit1, fit2) set.seed(444) res_brms <- brms::posterior_average(fit1, fit2, weights = "bma", missing = 0) res_brms <- res_brms[, 1:4] res_BT1 <- eti(res_BT) res_brms1 <- eti(res_brms) expect_equal(res_BT1$Parameter, res_brms1$Parameter) expect_equal(res_BT1$CI, res_brms1$CI) expect_equal(res_BT1$CI_low, res_brms1$CI_low) expect_equal(res_BT1$CI_high, res_brms1$CI_high) # plot(res_brms1) # plot(res_BT1) }) } } bayestestR/tests/testthat/test-brms.R0000644000175000017500000000647314032344057017612 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest && require("brms") && require("testthat") && require("insight") && require("httr")) { test_that("brms", { skip_on_cran() set.seed(333) model <- insight::download_model("brms_mixed_1") expect_s3_class(hdi(model), "data.frame") expect_s3_class(ci(model), "data.frame") expect_s3_class(rope(model, verbose = FALSE), "data.frame") # expect_true("equivalence_test" %in% class(equivalence_test(model))) expect_s3_class(map_estimate(model), "data.frame") expect_s3_class(p_map(model), "data.frame") expect_s3_class(p_direction(model), "data.frame") expect_equal(colnames(hdi(model)), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) expect_equal(colnames(hdi(model, effects = "all")), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) # expect_equal(nrow(equivalence_test(model)), 2) out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean") suppressWarnings( s <- summary(model) ) expect_identical(colnames(out), c( "Parameter", "Effects", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean[1:2], tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat[1:2], tolerance = 1e-1) expect_equal(as.vector(s$random$cyl[, 1, drop = TRUE]), out$Mean[12], tolerance = 1e-3) expect_equal(as.vector(s$random$gear[, 1, drop = TRUE]), out$Mean[13:15], tolerance = 1e-3) }) test_that("brms", { # skip_on_travis() skip_on_cran() set.seed(333) model <- insight::download_model("brms_1") out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean") s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean[1:3], tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat[1:3], tolerance = 1e-1) }) test_that("brms", { # skip_on_travis() skip_on_cran() set.seed(333) model <- insight::download_model("brms_mv_2") out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean", test = NULL) s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Effects", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS" )) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean[c(1, 11, 2:5, 12:14)], tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat[c(1, 11, 2:5, 12:14)], tolerance = 1e-1) }) test_that("brms", { skip_on_cran() set.seed(333) model <- insight::download_model("brms_2") out <- describe_posterior(model, effects = "all", components = "all", centrality = "mean", test = NULL) s <- summary(model) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) } bayestestR/tests/testthat/test-describe_prior.R0000644000175000017500000001106214133140641021623 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllbayestestRTests") == "yes" if (.runThisTest && require("testthat") && require("bayestestR") && require("rstanarm") && require("brms") && require("httr") && require("insight") && require("BayesFactor") && packageVersion("insight") > "0.13.2") { test_that("describe_prior", { # Bayes Factor ---------------------------------------- expect_equal( describe_prior(correlationBF(mtcars$wt, mtcars$mpg, rscale = 0.5)), structure(list( Parameter = "rho", Prior_Distribution = "beta", Prior_Location = 2, Prior_Scale = 2 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(ttestBF(mtcars$wt, mu = 3)), structure(list( Parameter = "Difference", Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.707106781186548 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "poisson" )), structure(list( Parameter = "Ratio", Prior_Distribution = "poisson", Prior_Location = 0, Prior_Scale = 1 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "indepMulti", fixedMargin = "cols", priorConcentration = 1.6 )), structure(list( Parameter = "Ratio", Prior_Distribution = "independent multinomial", Prior_Location = 0, Prior_Scale = 1.6 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(anovaBF(extra ~ group, data = sleep, progress = FALSE)), structure(list(Parameter = c( "group-1", "group-2", "mu", "sig2", "g_group" ), Prior_Distribution = c( "cauchy", "cauchy", NA, NA, NA ), Prior_Location = c(0, 0, NA, NA, NA), Prior_Scale = c( 0.5, 0.5, NA, NA, NA )), row.names = c(NA, -5L), class = "data.frame") ) # brms ---------------------------------------- mod_brms <- insight::download_model("brms_1") expect_equal( describe_prior(mod_brms), structure( list( Parameter = c("b_Intercept", "b_wt", "b_cyl", "sigma"), Prior_Distribution = c("student_t", "uniform", "uniform", "student_t"), Prior_Location = c(19.2, NA, NA, 0), Prior_Scale = c(5.4, NA, NA, 5.4), Prior_df = c(3, NA, NA, 3) ), row.names = c(NA, -4L), class = "data.frame", priors = structure( list( prior = c("(flat)", "(flat)", "(flat)", "student_t(3, 19.2, 5.4)", "student_t(3, 0, 5.4)"), class = c("b", "b", "b", "Intercept", "sigma"), coef = c("", "cyl", "wt", "", ""), group = c("", "", "", "", ""), resp = c("", "", "", "", ""), dpar = c("", "", "", "", ""), nlpar = c("", "", "", "", ""), bound = c("", "", "", "", ""), source = c("(unknown)", "(vectorized)", "(vectorized)", "(unknown)", "(unknown)"), Parameter = c("b_", "b_cyl", "b_wt", "b_Intercept", "sigma") ), special = list(mu = list()), row.names = c(NA, -5L), sample_prior = "no", class = "data.frame" ) ), ignore_attr = TRUE, tolerance = 1e-2 ) # stanreg ---------------------------------------- mod_stanreg1 <- insight::download_model("stanreg_gamm4_1") mod_stanreg2 <- insight::download_model("stanreg_merMod_1") expect_equal( describe_prior(mod_stanreg1), structure(list( Parameter = "(Intercept)", Prior_Distribution = "normal", Prior_Location = 3.05733333333333, Prior_Scale = 1.08966571234175 ), row.names = c( NA, -1L ), class = "data.frame") ) expect_equal( describe_prior(mod_stanreg2), structure(list( Parameter = c("(Intercept)", "cyl"), Prior_Distribution = c( "normal", "normal" ), Prior_Location = c(0, 0), Prior_Scale = c(2.5, 1.39983744766986) ), row.names = c(NA, -2L), class = "data.frame" ) ) }) } bayestestR/tests/testthat.R0000644000175000017500000000040313621654743015666 0ustar nileshnileshlibrary(testthat) library(bayestestR) if (length(strsplit(packageDescription("bayestestR")$Version, "\\.")[[1]]) > 3) { Sys.setenv("RunAllbayestestRTests" = "yes") } else { Sys.setenv("RunAllbayestestRTests" = "no") } test_check("bayestestR") bayestestR/tests/spelling.R0000644000175000017500000000023314054317544015640 0ustar nileshnileshif (requireNamespace("spelling", quietly = TRUE)) { spelling::spell_check_test( vignettes = TRUE, error = FALSE, skip_on_cran = TRUE ) } bayestestR/R/0000755000175000017500000000000014135670622012740 5ustar nileshnileshbayestestR/R/reexports.R0000644000175000017500000000011214064521534015106 0ustar nileshnilesh#' @export #' @importFrom datawizard reshape_ci datawizard::reshape_ci bayestestR/R/bayesfactor_restricted.R0000644000175000017500000001605014101116425017604 0ustar nileshnilesh#' Bayes Factors (BF) for Order Restricted Models #' #' This method computes Bayes factors for comparing a model with an order restrictions on its parameters #' with the fully unrestricted model. *Note that this method should only be used for confirmatory analyses*. #' \cr \cr #' The `bf_*` function is an alias of the main function. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).} #' #' @param posterior A `stanreg` / `brmsfit` object, `emmGrid` or a data frame - representing a posterior distribution(s) from (see Details). #' @param hypothesis A character vector specifying the restrictions as logical conditions (see examples below). #' @param prior An object representing a prior distribution (see Details). #' @inheritParams hdi #' #' @details This method is used to compute Bayes factors for order-restricted models vs un-restricted #' models by setting an order restriction on the prior and posterior distributions #' (\cite{Morey & Wagenmakers, 2013}). #' \cr\cr #' (Though it is possible to use `bayesfactor_restricted()` to test interval restrictions, #' it is more suitable for testing order restrictions; see examples). #' #' @inheritSection bayesfactor_parameters Setting the correct `prior` #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @return A data frame containing the (log) Bayes factor representing evidence *against* the un-restricted model. #' #' @examples #' library(bayestestR) #' prior <- data.frame( #' X = rnorm(100), #' X1 = rnorm(100), #' X3 = rnorm(100) #' ) #' #' posterior <- data.frame( #' X = rnorm(100, .4), #' X1 = rnorm(100, -.2), #' X3 = rnorm(100) #' ) #' #' hyps <- c( #' "X > X1 & X1 > X3", #' "X > X1" #' ) #' #' bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm") && require("emmeans")) { #' fit_stan <- stan_glm(mpg ~ wt + cyl + am, #' data = mtcars, refresh = 0 #' ) #' hyps <- c( #' "am > 0 & cyl < 0", #' "cyl < 0", #' "wt - cyl > 0" #' ) #' bayesfactor_restricted(fit_stan, hypothesis = hyps) #' #' # emmGrid objects #' # --------------- #' # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html #' disgust_data <- read.table(url("http://www.learnbayes.org/disgust_example.txt"), header = TRUE) #' #' contrasts(disgust_data$condition) <- contr.orthonorm # see vignette #' fit_model <- stan_glm(score ~ condition, data = disgust_data, family = gaussian()) #' #' em_condition <- emmeans(fit_model, ~condition) #' hyps <- c("lemon < control & control < sulfur") #' #' bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) #' # > # Bayes Factor (Order-Restriction) #' # > #' # > Hypothesis P(Prior) P(Posterior) BF #' # > lemon < control & control < sulfur 0.17 0.75 4.49 #' # > --- #' # > Bayes factors for the restricted model vs. the un-restricted model. #' } #' } #' @references #' \itemize{ #' \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. #' \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. #' \item Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. Retrived from https://richarddmorey.org/category/order-restrictions/. #' } #' #' @export bayesfactor_restricted <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { UseMethod("bayesfactor_restricted") } #' @rdname bayesfactor_restricted #' @export bf_restricted <- bayesfactor_restricted #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.stanreg <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ...) { effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, effects, component, verbose = verbose ) # Get savage-dickey BFs bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.brmsfit <- bayesfactor_restricted.stanreg #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.blavaan <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get savage-dickey BFs bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.emmGrid <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @export bayesfactor_restricted.emm_list <- bayesfactor_restricted.emmGrid #' @export bayesfactor_restricted.data.frame <- function(posterior, hypothesis, prior = NULL, ...) { p_hypothesis <- parse(text = hypothesis) if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please specify priors (with column names matching 'posterior')", " to get meaningful results." ) } .get_prob <- function(x, data) { x_logical <- try(eval(x, envir = data), silent = TRUE) if (inherits(x_logical, "try-error")) { cnames <- colnames(data) is_name <- make.names(cnames) == cnames cnames[!is_name] <- paste0("`", cnames[!is_name], "`") cnames <- paste0(cnames, collapse = ", ") stop(x_logical, "Available parameters are: ", cnames) } else if (!all(is.logical(x_logical))) { stop("Hypotheses must be logical") } mean(x_logical) } posterior_p <- sapply(p_hypothesis, .get_prob, data = posterior) prior_p <- sapply(p_hypothesis, .get_prob, data = prior) BF <- posterior_p / prior_p res <- data.frame( Hypothesis = hypothesis, p_prior = prior_p, p_posterior = posterior_p, log_BF = log(BF) ) class(res) <- unique(c( "bayesfactor_restricted", class(res) )) res } bayestestR/R/equivalence_test.R0000644000175000017500000003023614101116425016414 0ustar nileshnilesh#' Test for Practical Equivalence #' #' Perform a **Test for Practical Equivalence** for Bayesian and frequentist models. #' #' Documentation is accessible for: #' \itemize{ #' \item [Bayesian models](https://easystats.github.io/bayestestR/reference/equivalence_test.html) #' \item [Frequentist models](https://easystats.github.io/parameters/reference/equivalence_test.lm.html) #' } #' #' For Bayesian models, the **Test for Practical Equivalence** is based on the *"HDI+ROPE decision rule"* (\cite{Kruschke, 2014, 2018}) to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the percentage of the `89%` [HDI][hdi] that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. #' #' #' @inheritParams rope #' #' @details Using the [ROPE][rope] and the [HDI][hdi], \cite{Kruschke (2018)} #' suggests using the percentage of the `95%` (or `89%`, considered more stable) #' HDI that falls within the ROPE as a decision rule. If the HDI #' is completely outside the ROPE, the "null hypothesis" for this parameter is #' "rejected". If the ROPE completely covers the HDI, i.e., all most credible #' values of a parameter are inside the region of practical equivalence, the #' null hypothesis is accepted. Else, it’s undecided whether to accept or #' reject the null hypothesis. If the full ROPE is used (i.e., `100%` of the #' HDI), then the null hypothesis is rejected or accepted if the percentage #' of the posterior within the ROPE is smaller than to `2.5%` or greater than #' `97.5%`. Desirable results are low proportions inside the ROPE (the closer #' to zero the better). #' \cr \cr #' Some attention is required for finding suitable values for the ROPE limits #' (argument `range`). See 'Details' in [`rope_range()`][rope_range] #' for further information. #' \cr \cr #' **Multicollinearity: Non-independent covariates** #' \cr \cr #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or #' away from the ROPE. In such cases, the test for practical equivalence may #' have inappropriate results. Collinearity invalidates ROPE and hypothesis #' testing based on univariate marginals, as the probabilities are conditional #' on independence. Most problematic are the results of the "undecided" #' parameters, which may either move further towards "rejection" or away #' from it (\cite{Kruschke 2014, 340f}). #' \cr \cr #' `equivalence_test()` performs a simple check for pairwise correlations #' between parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (\cite{Piironen and Vehtari 2017}). #' #' #' @references \itemize{ #' \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} #' \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press #' \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} #' } #' #' @return A data frame with following columns: #' \itemize{ #' \item `Parameter` The model parameter(s), if `x` is a model-object. If `x` is a vector, this column is missing. #' \item `CI` The probability of the HDI. #' \item `ROPE_low`, `ROPE_high` The limits of the ROPE. These values are identical for all parameters. #' \item `ROPE_Percentage` The proportion of the HDI that lies inside the ROPE. #' \item `ROPE_Equivalence` The "test result", as character. Either "rejected", "accepted" or "undecided". #' \item `HDI_low` , `HDI_high` The lower and upper HDI limits for the parameters. #' } #' #' @note There is a `print()`-method with a `digits`-argument to control #' the amount of digits in the output, and there is a #' [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' to visualize the results from the equivalence-test (for models only). #' #' @examples #' library(bayestestR) #' #' equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' #' # print more digits #' test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' print(test, digits = 4) #' \dontrun{ #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' equivalence_test(model, ci = c(.50, 1)) #' #' # plot result #' test <- equivalence_test(model) #' plot(test) #' #' library(emmeans) #' equivalence_test(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' equivalence_test(model, ci = c(.50, .99)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' equivalence_test(bf) #' equivalence_test(bf, ci = c(.50, .99)) #' } #' @export equivalence_test <- function(x, ...) { UseMethod("equivalence_test") } #' @rdname equivalence_test #' @export equivalence_test.default <- function(x, ...) { NULL } #' @rdname equivalence_test #' @export equivalence_test.numeric <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { rope_data <- rope(x, range = range, ci = ci) out <- as.data.frame(rope_data) if (all(ci < 1)) { out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage == 0, "Rejected", ifelse(out$ROPE_Percentage == 1, "Accepted", "Undecided") ) } else { # Related to guidelines for full rope (https://easystats.github.io/bayestestR/articles/4_Guidelines.html) out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage < 0.025, "Rejected", ifelse(out$ROPE_Percentage > 0.975, "Accepted", "Undecided") ) } out$HDI_low <- attr(rope_data, "HDI_area", exact = TRUE)$CI_low out$HDI_high <- attr(rope_data, "HDI_area", exact = TRUE)$CI_high # remove attribute attr(out, "HDI_area") <- NULL attr(out, "data") <- x class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.data.frame <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { l <- .compact_list(lapply( x, equivalence_test, range = range, ci = ci, verbose = verbose )) dat <- do.call(rbind, l) out <- data.frame( Parameter = rep(names(l), each = nrow(dat) / length(l)), dat, stringsAsFactors = FALSE ) row.names(out) <- NULL attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("equivalence_test", "see_equivalence_test_df", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.emmGrid <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- equivalence_test(xdf, range = range, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.emm_list <- equivalence_test.emmGrid #' @rdname equivalence_test #' @export equivalence_test.BFBayesFactor <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { out <- equivalence_test(insight::get_parameters(x), range = range, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @keywords internal .equivalence_test_models <- function(x, range = "default", ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE) { if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } if (verbose && !inherits(x, "blavaan")) .check_multicollinearity(x) params <- insight::get_parameters(x, component = component, effects = effects, parameters = parameters) l <- sapply( params, equivalence_test, range = range, ci = ci, verbose = verbose, simplify = FALSE ) dat <- do.call(rbind, l) out <- data.frame( Parameter = rep(names(l), each = nrow(dat) / length(l)), dat, stringsAsFactors = FALSE ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.stanreg <- function(x, range = "default", ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .equivalence_test_models(x, range, ci, effects, component, parameters, verbose) out <- .prepare_output( out, insight::clean_parameters(x), inherits(x, "stanmvreg") ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.stanfit <- equivalence_test.stanreg #' @export equivalence_test.blavaan <- equivalence_test.stanreg #' @rdname equivalence_test #' @export equivalence_test.brmsfit <- function(x, range = "default", ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .equivalence_test_models(x, range, ci, effects, component, parameters, verbose) out <- .prepare_output( out, insight::clean_parameters(x), inherits(x, "stanmvreg") ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.sim.merMod <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models(x, range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.sim <- equivalence_test.sim.merMod #' @export equivalence_test.mcmc <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models(as.data.frame(x), range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.bcplm <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models(insight::get_parameters(x), range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.blrm <- equivalence_test.bcplm #' @export equivalence_test.mcmc.list <- equivalence_test.bcplm #' @export equivalence_test.bayesQR <- equivalence_test.bcplm #' @export equivalence_test.bamlss <- function(x, range = "default", ci = 0.95, component = c("all", "conditional", "location"), parameters = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .equivalence_test_models(insight::get_parameters(x, component = component), range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } bayestestR/R/mcse.R0000644000175000017500000000550514101116425014004 0ustar nileshnilesh#' Monte-Carlo Standard Error (MCSE) #' #' This function returns the Monte Carlo Standard Error (MCSE). #' #' @inheritParams effective_sample #' #' #' @details **Monte Carlo Standard Error (MCSE)** is another measure of #' accuracy of the chains. It is defined as standard deviation of the chains #' divided by their effective sample size (the formula for `mcse()` is #' from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative #' suggestion of how big the estimation noise is}. #' #' @references Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' #' @examples #' \dontrun{ #' library(bayestestR) #' library(rstanarm) #' #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' mcse(model) #' } #' @export mcse <- function(model, ...) { UseMethod("mcse") } #' @export mcse.brmsfit <- function(model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) params <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) ess <- effective_sample( model, effects = effects, component = component, parameters = parameters ) .mcse(params, stats::setNames(ess$ESS, ess$Parameter)) } #' @rdname mcse #' @export mcse.stanreg <- function(model, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) params <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) ess <- effective_sample( model, effects = effects, component = component, parameters = parameters ) .mcse(params, stats::setNames(ess$ESS, ess$Parameter)) } #' @export mcse.stanfit <- mcse.stanreg #' @export mcse.blavaan <- mcse.stanreg #' @keywords internal .mcse <- function(params, ess) { # get standard deviations from posterior samples stddev <- sapply(params, stats::sd) # check proper length, and for unequal length, shorten all # objects to common parameters if (length(stddev) != length(ess)) { common <- stats::na.omit(match(names(stddev), names(ess))) stddev <- stddev[common] ess <- ess[common] params <- params[common] } # compute mcse data.frame( Parameter = colnames(params), MCSE = stddev / sqrt(ess), stringsAsFactors = FALSE, row.names = NULL ) } bayestestR/R/si.R0000644000175000017500000002176314133414607013504 0ustar nileshnilesh#' Compute Support Intervals #' #' A support interval contains only the values of the parameter that predict the observed data better #' than average, by some degree *k*; these are values of the parameter that are associated with an #' updating factor greater or equal than *k*. From the perspective of the Savage-Dickey Bayes factor, testing #' against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller #' than *1/k*. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, #' see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).} #' #' @param BF The amount of support required to be included in the support interval. #' @inheritParams bayesfactor_parameters #' @inheritParams hdi #' @inherit hdi seealso #' @family ci #' #' @details This method is used to compute support intervals based on prior and posterior distributions. #' For the computation of support intervals, the model priors must be proper priors (at the very least #' they should be *not flat*, and it is preferable that they be *informative* - note #' that by default, `brms::brm()` uses flat priors for fixed-effects; see example below). #' #' \subsection{Choosing a value of `BF`}{ #' The choice of `BF` (the level of support) depends on what we want our interval to represent: #' \itemize{ #' \item A `BF` = 1 contains values whose credibility is not decreased by observing the data. #' \item A `BF` > 1 contains values who received more impressive support from the data. #' \item A `BF` < 1 contains values whose credibility has *not* been impressively decreased by observing the data. #' Testing against values outside this interval will produce a Bayes factor larger than 1/`BF` in support of #' the alternative. E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null will be larger than 3. #' } #' } #' #' @inheritSection bayesfactor_parameters Setting the correct `prior` #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @return #' A data frame containing the lower and upper bounds of the SI. #' \cr #' Note that if the level of requested support is higher than observed in the data, the #' interval will be `[NA,NA]`. #' #' @examples #' library(bayestestR) #' #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' #' si(posterior, prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' library(rstanarm) #' contrasts(sleep$group) <- contr.orthonorm # see vingette #' stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' si(stan_model) #' si(stan_model, BF = 3) #' #' # emmGrid objects #' # --------------- #' library(emmeans) #' group_diff <- pairs(emmeans(stan_model, ~group)) #' si(group_diff, prior = stan_model) #' #' # brms models #' # ----------- #' library(brms) #' contrasts(sleep$group) <- contr.orthonorm # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors #' ) #' si(brms_model) #' } #' @references #' Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). The Support Interval. \doi{10.31234/osf.io/zwnxb} #' #' @export si <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { UseMethod("si") } #' @rdname si #' @export si.numeric <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { if (is.null(prior)) { prior <- posterior if (verbose) { warning(insight::format_message( "Prior not specified!", "Support intervals ('si') can only be computed for Bayesian models with proper priors.", "Please specify priors (with column order matching 'posterior')." ), call. = FALSE) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # Get SIs out <- si.data.frame( posterior = posterior, prior = prior, BF = BF, verbose = verbose, ... ) out$Parameter <- NULL out } #' @rdname si #' @export si.stanreg <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "zi", "zero_inflated", "all", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { cleaned_parameters <- insight::clean_parameters(posterior) effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose, effects = effects, component = component, parameters = parameters ) # Get SIs temp <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) out <- .prepare_output(temp, cleaned_parameters, inherits(posterior, "stanmvreg")) attr(out, "ci_method") <- "SI" attr(out, "object_name") <- .safe_deparse(substitute(posterior)) class(out) <- class(temp) attr(out, "plot_data") <- attr(temp, "plot_data") out } #' @rdname si #' @export si.brmsfit <- si.stanreg #' @rdname si #' @export si.blavaan <- si.stanreg #' @rdname si #' @export si.emmGrid <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get SIs out <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) attr(out, "ci_method") <- "SI" attr(out, "object_name") <- .safe_deparse(substitute(posterior)) out } #' @export si.emm_list <- si.emmGrid #' @rdname si #' @export si.data.frame <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { if (length(BF) > 1) { SIs <- lapply(BF, function(i) { si(posterior, prior = prior, BF = i, verbose = verbose, ...) }) out <- do.call(rbind, SIs) attr(out, "plot_data") <- attr(SIs[[1]], "plot_data") class(out) <- unique(c("bayestestR_si", "bayestestR_ci", class(out))) return(out) } if (is.null(prior)) { prior <- posterior warning(insight::format_message( "Prior not specified!", "Support intervals ('si') can only be computed for Bayesian models with proper priors.", "Please specify priors (with column order matching 'posterior')." ), call. = FALSE) } sis <- matrix(NA, nrow = ncol(posterior), ncol = 2) for (par in seq_along(posterior)) { sis[par, ] <- .si(posterior[[par]], prior[[par]], BF = BF, ... ) } out <- data.frame( Parameter = colnames(posterior), CI = BF, CI_low = sis[, 1], CI_high = sis[, 2], stringsAsFactors = FALSE ) attr(out, "ci_method") <- "SI" attr(out, "plot_data") <- .make_BF_plot_data(posterior, prior, 0, 0, ...)$plot_data class(out) <- unique(c("bayestestR_si", "see_si", "bayestestR_ci", "see_ci", class(out))) out } #' @export si.stanfit <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), ...) { si(insight::get_parameters(posterior, effects = effects)) } #' @export si.get_predicted <- function(posterior, ...) { out <- si(as.data.frame(t(posterior)), ...) attr(out, "object_name") <- .safe_deparse(substitute(posterior)) out } # Helper ------------------------------------------------------------------ #' @keywords internal .si <- function(posterior, prior, BF = 1, extend_scale = 0.05, precision = 2^8, ...) { insight::check_if_installed("logspline") if (isTRUE(all.equal(prior, posterior))) { return(c(NA, NA)) } x <- c(prior, posterior) x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) extension_scale <- diff(x_range) * extend_scale x_range <- x_range + c(-1, 1) * extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) f_prior <- .logspline(prior, ...) f_posterior <- .logspline(posterior, ...) d_prior <- logspline::dlogspline(x_axis, f_prior) d_posterior <- logspline::dlogspline(x_axis, f_posterior) relative_d <- d_posterior / d_prior crit <- relative_d >= BF cp <- rle(c(stats::na.omit(crit))) if (length(cp$lengths) > 3) { warning("More than 1 SI detected. Plot the result to investigate.", call. = FALSE) } x_supported <- stats::na.omit(x_axis[crit]) if (length(x_supported) < 2) { return(c(NA, NA)) } else { range(x_supported) } } bayestestR/R/bayesfactor_parameters.R0000644000175000017500000004534714101116425017612 0ustar nileshnilesh#' Bayes Factors (BF) for a Single Parameter #' #' This method computes Bayes factors against the null (either a point or an #' interval), based on prior and posterior samples of a single parameter. This #' Bayes factor indicates the degree by which the mass of the posterior #' distribution has shifted further away from or closer to the null value(s) #' (relative to the prior distribution), thus indicating if the null value has #' become less or more likely given the observed data. #' \cr \cr #' When the null is an interval, the Bayes factor is computed by comparing the #' prior and posterior odds of the parameter falling within or outside the null #' interval (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, #' a Savage-Dickey density ratio is computed, which is also an approximation of #' a Bayes factor comparing the marginal likelihoods of the model against a #' model in which the tested parameter has been restricted to the point null #' (Wagenmakers et al., 2010; Heck, 2019). #' \cr \cr #' Note that the `logspline` package is used for estimating densities and #' probabilities, and must be installed for the function to work. #' \cr \cr #' `bayesfactor_pointnull()` and `bayesfactor_rope()` are wrappers #' around `bayesfactor_parameters` with different defaults for the null to #' be tested against (a point and a range, respectively). Aliases of the main #' functions are prefixed with `bf_*`, like `bf_parameters()` or #' `bf_pointnull()`. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors #' with more than 2 levels, see #' [the #' Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).} #' #' @param posterior A numerical vector, `stanreg` / `brmsfit` object, #' `emmGrid` or a data frame - representing a posterior distribution(s) #' from (see 'Details'). #' @param prior An object representing a prior distribution (see 'Details'). #' @param direction Test type (see 'Details'). One of `0`, #' `"two-sided"` (default, two tailed), `-1`, `"left"` (left #' tailed) or `1`, `"right"` (right tailed). #' @param null Value of the null, either a scalar (for point-null) or a range #' (for a interval-null). #' @param ... Arguments passed to and from other methods. (Can be used to pass #' arguments to internal [logspline::logspline()].) #' @inheritParams hdi #' #' @return A data frame containing the (log) Bayes factor representing evidence #' *against* the null. #' #' @note There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' implemented in the #' \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' This method is used to compute Bayes factors based on prior and posterior #' distributions. #' #' \subsection{One-sided & Dividing Tests (setting an order restriction)}{ #' One sided tests (controlled by `direction`) are conducted by restricting #' the prior and posterior of the non-null values (the "alternative") to one #' side of the null only (\cite{Morey & Wagenmakers, 2014}). For example, if we #' have a prior hypothesis that the parameter should be positive, the #' alternative will be restricted to the region to the right of the null (point #' or interval). For example, for a Bayes factor comparing the "null" of `0-0.1` #' to the alternative `>0.1`, we would set #' `bayesfactor_parameters(null = c(0, 0.1), direction = ">")`. #' \cr\cr #' It is also possible to compute a Bayes factor for **dividing** #' hypotheses - that is, for a null and alternative that are complementary, #' opposing one-sided hypotheses (\cite{Morey & Wagenmakers, 2014}). For #' example, for a Bayes factor comparing the "null" of `<0` to the alternative #' `>0`, we would set `bayesfactor_parameters(null = c(-Inf, 0))`. #' } #' #' @section Setting the correct `prior`: #' For the computation of Bayes factors, the model priors must be proper priors #' (at the very least they should be *not flat*, and it is preferable that #' they be *informative*); As the priors for the alternative get wider, the #' likelihood of the null value(s) increases, to the extreme that for completely #' flat priors the null is infinitely more favorable than the alternative (this #' is called *the Jeffreys-Lindley-Bartlett paradox*). Thus, you should #' only ever try (or want) to compute a Bayes factor when you have an informed #' prior. #' \cr\cr #' (Note that by default, `brms::brm()` uses flat priors for fixed-effects; #' See example below.) #' \cr\cr #' It is important to provide the correct `prior` for meaningful results. #' \itemize{ #' \item When `posterior` is a numerical vector, `prior` should also be a numerical vector. #' \item When `posterior` is a `data.frame`, `prior` should also be a `data.frame`, with matching column order. #' \item When `posterior` is a `stanreg` or `brmsfit` model: \itemize{ #' \item `prior` can be set to `NULL`, in which case prior samples are drawn internally. #' \item `prior` can also be a model equivalent to `posterior` but with samples from the priors *only*. See [unupdate()]. #' \item **Note:** When `posterior` is a `brmsfit_multiple` model, `prior` **must** be provided. #' } #' \item When `posterior` is an `emmGrid` object: \itemize{ #' \item `prior` should be the `stanreg` or `brmsfit` model used to create the `emmGrid` objects. #' \item `prior` can also be an `emmGrid` object equivalent to `posterior` but created with a model of priors samples *only*. #' \item **Note:** When the `emmGrid` has undergone any transformations (`"log"`, `"response"`, etc.), or `regrid`ing, then `prior` must be an `emmGrid` object, as stated above. #' } #' } #' #' @section Interpreting Bayes Factors: #' A Bayes factor greater than 1 can be interpreted as evidence against the #' null, at which one convention is that a Bayes factor greater than 3 can be #' considered as "substantial" evidence against the null (and vice versa, a #' Bayes factor smaller than 1/3 indicates substantial evidence in favor of the #' null-model) (\cite{Wetzels et al. 2011}). #' #' @examples #' library(bayestestR) #' if (require("logspline")) { #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' bayesfactor_parameters(posterior, prior) #' } #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm") && require("emmeans") && require("logspline")) { #' contrasts(sleep$group) <- contr.orthonorm # see vingette #' stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' bayesfactor_parameters(stan_model) #' bayesfactor_parameters(stan_model, null = rope_range(stan_model)) #' #' # emmGrid objects #' # --------------- #' group_diff <- pairs(emmeans(stan_model, ~group)) #' bayesfactor_parameters(group_diff, prior = stan_model) #' } #' #' # brms models #' # ----------- #' if (require("brms")) { #' contrasts(sleep$group) <- contr.orthonorm # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors #' ) #' bayesfactor_parameters(brms_model) #' } #' } #' @references #' \itemize{ #' \item Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). #' Bayesian hypothesis testing for psychologists: A tutorial on the #' Savage-Dickey method. Cognitive psychology, 60(3), 158-189. #' \item Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The #' case of computing Bayes factors for regression parameters. British Journal of #' Mathematical and Statistical Psychology, 72(2), 316-333. #' \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between #' Bayesian order-restricted and point-null hypothesis tests. Statistics & #' Probability Letters, 92, 121-124. #' \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for #' testing interval null hypotheses. Psychological methods, 16(4), 406. #' \item Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting #' the Bayes factor and a modified ROPE procedure for testing interval null #' hypotheses. The American Statistician, 1-19. #' \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and #' Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: #' An Empirical Comparison Using 855 t Tests. Perspectives on Psychological #' Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' } #' #' @author Mattan S. Ben-Shachar #' #' @export bayesfactor_parameters <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { UseMethod("bayesfactor_parameters") } #' @rdname bayesfactor_parameters #' @export bayesfactor_pointnull <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { if (length(null) > 1) { message("'null' is a range - computing a ROPE based Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bayesfactor_rope <- function(posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ...) { if (length(null) < 2) { message("'null' is a point - computing a Savage-Dickey (point null) Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bf_parameters <- bayesfactor_parameters #' @rdname bayesfactor_parameters #' @export bf_pointnull <- bayesfactor_pointnull #' @rdname bayesfactor_parameters #' @export bf_rope <- bayesfactor_rope #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.numeric <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { # nm <- .safe_deparse(substitute(posterior) if (is.null(prior)) { prior <- posterior if (verbose) { warning( "Prior not specified! ", "Please specify a prior (in the form 'prior = distribution_normal(1000, 0, 1)')", " to get meaningful results." ) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # colnames(posterior) <- colnames(prior) <- nm # Get BFs sdbf <- bayesfactor_parameters.data.frame( posterior = posterior, prior = prior, direction = direction, null = null, ... ) sdbf$Parameter <- NULL sdbf } #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.stanreg <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "location", "smooth_terms", "sigma", "zi", "zero_inflated", "all"), parameters = NULL, ...) { cleaned_parameters <- insight::clean_parameters(posterior) effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose, effects = effects, component = component, parameters = parameters ) # Get BFs temp <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, ... ) bf_val <- .prepare_output(temp, cleaned_parameters, inherits(posterior, "stanmvreg")) class(bf_val) <- class(temp) attr(bf_val, "clean_parameters") <- cleaned_parameters attr(bf_val, "hypothesis") <- attr(temp, "hypothesis") # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- attr(temp, "direction") attr(bf_val, "plot_data") <- attr(temp, "plot_data") bf_val } #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.brmsfit <- bayesfactor_parameters.stanreg #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.blavaan <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { cleaned_parameters <- insight::clean_parameters(posterior) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get BFs temp <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, ... ) bf_val <- .prepare_output(temp, cleaned_parameters) class(bf_val) <- class(temp) attr(bf_val, "clean_parameters") <- cleaned_parameters attr(bf_val, "hypothesis") <- attr(temp, "hypothesis") # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- attr(temp, "direction") attr(bf_val, "plot_data") <- attr(temp, "plot_data") bf_val } #' @export bayesfactor_parameters.emmGrid <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get BFs bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, ... ) } #' @export bayesfactor_parameters.emm_list <- bayesfactor_parameters.emmGrid #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.data.frame <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { # find direction direction <- .get_direction(direction) if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please specify priors (with column order matching 'posterior')", " to get meaningful results." ) } sdbf <- numeric(ncol(posterior)) for (par in seq_along(posterior)) { sdbf[par] <- .bayesfactor_parameters( posterior[[par]], prior[[par]], direction = direction, null = null, ... ) } bf_val <- data.frame( Parameter = colnames(posterior), log_BF = log(sdbf), stringsAsFactors = FALSE ) class(bf_val) <- unique(c( "bayesfactor_parameters", "see_bayesfactor_parameters", class(bf_val) )) attr(bf_val, "hypothesis") <- null # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- direction attr(bf_val, "plot_data") <- .make_BF_plot_data(posterior, prior, direction, null, ...) bf_val } #' @keywords internal .bayesfactor_parameters <- function(posterior, prior, direction = 0, null = 0, ...) { stopifnot(length(null) %in% c(1, 2)) if (isTRUE(all.equal(posterior, prior))) { return(1) } insight::check_if_installed("logspline") if (length(null) == 1) { relative_density <- function(samples) { f_samples <- .logspline(samples, ...) d_samples <- logspline::dlogspline(null, f_samples) if (direction < 0) { norm_samples <- logspline::plogspline(null, f_samples) } else if (direction > 0) { norm_samples <- 1 - logspline::plogspline(null, f_samples) } else { norm_samples <- 1 } d_samples / norm_samples } return(relative_density(prior) / relative_density(posterior)) } else if (length(null) == 2) { null <- sort(null) null[is.infinite(null)] <- 1.797693e+308 * sign(null[is.infinite(null)]) f_prior <- .logspline(prior, ...) f_posterior <- .logspline(posterior, ...) h0_prior <- diff(logspline::plogspline(null, f_prior)) h0_post <- diff(logspline::plogspline(null, f_posterior)) BF_null_full <- h0_post / h0_prior if (direction < 0) { h1_prior <- logspline::plogspline(min(null), f_prior) h1_post <- logspline::plogspline(min(null), f_posterior) } else if (direction > 0) { h1_prior <- 1 - logspline::plogspline(max(null), f_prior) h1_post <- 1 - logspline::plogspline(max(null), f_posterior) } else { h1_prior <- 1 - h0_prior h1_post <- 1 - h0_post } BF_alt_full <- h1_post / h1_prior return(BF_alt_full / BF_null_full) } } # Bad Methods ------------------------------------------------------------- #' @export bayesfactor_parameters.bayesfactor_models <- function(...) { stop( "Oh no, 'bayesfactor_parameters()' does not know how to deal with multiple models :(\n", "You might want to use 'bayesfactor_inclusion()' here to test specific terms across models." ) } #' @export bayesfactor_parameters.sim <- function(...) { stop( "Bayes factors are based on the shift from a prior to a posterior. ", "Since simulated draws are not based on any priors, computing Bayes factors does not make sense :(\n", "You might want to try `rope`, `ci`, `pd` or `pmap` for posterior-based inference." ) } #' @export bayesfactor_parameters.sim.merMod <- bayesfactor_parameters.sim bayestestR/R/simulate_priors.R0000644000175000017500000000725614101116425016303 0ustar nileshnilesh#' Returns Priors of a Model as Empirical Distributions #' #' Transforms priors information to actual distributions. #' #' @inheritParams effective_sample #' @param n Size of the simulated prior distributions. #' #' @seealso [unupdate()] for directly sampling from the prior #' distribution (useful for complex priors and designs). #' #' @examples #' \dontrun{ #' library(bayestestR) #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' simulate_prior(model) #' } #' } #' @export simulate_prior <- function(model, n = 1000, ...) { UseMethod("simulate_prior") } #' @export simulate_prior.stanreg <- function(model, n = 1000, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) priors <- insight::get_priors( model, effects = effects, component = component, parameters = parameters, verbose = verbose ) .simulate_prior(priors, n = n, verbose = verbose) } #' @export simulate_prior.blavaan <- simulate_prior.stanreg #' @export simulate_prior.brmsfit <- function(model, n = 1000, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) priors <- insight::get_priors( model, effects = effects, component = component, parameters = parameters, verbose = verbose, ) .simulate_prior(priors, n = n, verbose = verbose) } #' @export simulate_prior.bcplm <- function(model, n = 1000, verbose = TRUE, ...) { .simulate_prior(insight::get_priors(model, verbose = verbose), n = n, verbose = verbose) } #' @keywords internal .simulate_prior <- function(priors, n = 1000, verbose = TRUE) { simulated <- data.frame(.bamboozled = 1:n) sim_error_msg <- FALSE # iterate over parameters for (param in priors$Parameter) { prior <- priors[priors$Parameter == param, ] # edge cases if (nrow(prior) > 1) { prior <- prior[1, ] } # Get actual scale if ("Adjusted_Scale" %in% names(prior)) { scale <- prior$Adjusted_Scale # is autoscale = FALSE, scale contains NA values - replace # with non-adjusted then. if (anyNA(scale)) scale[is.na(scale)] <- prior$Scale[is.na(scale)] } else { scale <- prior$Scale } # Simulate prior prior <- tryCatch( { if (prior$Distribution %in% c("t", "student_t", "Student's t")) { distribution(prior$Distribution, n, prior$df, prior$Location) } else { distribution(prior$Distribution, n, prior$Location, scale) } }, error = function(e) { sim_error_msg <- TRUE NA } ) simulated[param] <- prior } if (sim_error_msg && verbose) { warning(paste0("Can't simulate priors from a ", prior$Distribution, " distribution."), call. = FALSE) } simulated$.bamboozled <- NULL simulated } bayestestR/R/plot.R0000644000175000017500000000457014125235747014053 0ustar nileshnilesh#' @export plot.equivalence_test <- function(x, ...) { insight::check_if_installed("see", "to plot results from equivalence-test") NextMethod() } #' @export plot.p_direction <- function(x, ...) { insight::check_if_installed("see", "to plot results from p_direction()") NextMethod() } #' @export plot.point_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } #' @export plot.map_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } #' @export plot.rope <- function(x, ...) { insight::check_if_installed("see", "to plot ROPE") NextMethod() } #' @export plot.bayestestR_hdi <- function(x, ...) { insight::check_if_installed("see", "to plot HDI") NextMethod() } #' @export plot.bayestestR_eti <- function(x, ...) { insight::check_if_installed("see", "to plot credible intervals") NextMethod() } #' @export plot.bayestestR_si <- function(x, ...) { insight::check_if_installed("see", "to plot support intervals") NextMethod() } #' @export plot.bayesfactor_parameters <- function(x, ...) { insight::check_if_installed("see", "to plot Savage-Dickey Bayes factor") NextMethod() } #' @export plot.bayesfactor_models <- function(x, ...) { insight::check_if_installed("see", "to plot models' Bayes factors") NextMethod() } #' @export plot.estimate_density <- function(x, ...) { insight::check_if_installed("see", "to plot densities") NextMethod() } #' @export plot.estimate_density_df <- function(x, ...) { insight::check_if_installed("see", "to plot models' densities") NextMethod() } #' @export plot.p_significance <- function(x, ...) { insight::check_if_installed("see", "to plot practical significance") NextMethod() } #' @export plot.describe_posterior <- function(x, stack = FALSE, ...) { insight::check_if_installed("see", "to plot posterior samples") insight::check_if_installed("ggplot2", "to plot posterior samples") model <- .retrieve_model(x) if (!is.null(model)) { plot(estimate_density(model), stack = stack, ...) + ggplot2::labs(title = "Posterior Samples", x = NULL, y = NULL) } else { warning(insight::format_message("Could not find model-object. Try ' plot(estimate_density(model))' instead."), call. = FALSE) } } bayestestR/R/p_map.R0000644000175000017500000002134214101116425014146 0ustar nileshnilesh#' Bayesian p-value based on the density at the Maximum A Posteriori (MAP) #' #' Compute a Bayesian equivalent of the *p*-value, related to the odds that a parameter (described by its posterior distribution) has against the null hypothesis (*h0*) using Mills' (2014, 2017) *Objective Bayesian Hypothesis Testing* framework. It corresponds to the density value at 0 divided by the density at the Maximum A Posteriori (MAP). #' #' @details Note that this method is sensitive to the density estimation `method` (see the section in the examples below). #' \subsection{Strengths and Limitations}{ #' **Strengths:** Straightforward computation. Objective property of the posterior distribution. #' \cr \cr #' **Limitations:** Limited information favoring the null hypothesis. Relates on density approximation. Indirect relationship between mathematical definition and interpretation. Only suitable for weak / very diffused priors. #' } #' #' @inheritParams hdi #' @inheritParams density_at #' #' @examples #' library(bayestestR) #' #' p_map(rnorm(1000, 0, 1)) #' p_map(rnorm(1000, 10, 1)) #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' p_map(model) #' #' library(emmeans) #' p_map(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_map(model) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' p_map(bf) #' #' # --------------------------------------- #' # Robustness to density estimation method #' set.seed(333) #' data <- data.frame() #' for (iteration in 1:250) { #' x <- rnorm(1000, 1, 1) #' result <- data.frame( #' "Kernel" = p_map(x, method = "kernel"), #' "KernSmooth" = p_map(x, method = "KernSmooth"), #' "logspline" = p_map(x, method = "logspline") #' ) #' data <- rbind(data, result) #' } #' data$KernSmooth <- data$Kernel - data$KernSmooth #' data$logspline <- data$Kernel - data$logspline #' #' summary(data$KernSmooth) #' summary(data$logspline) #' boxplot(data[c("KernSmooth", "logspline")]) #' } #' @seealso [Jeff Mill's talk](https://www.youtube.com/watch?v=Ip8Ci5KUVRc) #' #' @references \itemize{ #' \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' \item Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. #' } #' #' @export p_map <- function(x, precision = 2^10, method = "kernel", ...) { UseMethod("p_map") } #' @rdname p_map #' @export p_pointnull <- p_map #' @export p_map.numeric <- function(x, precision = 2^10, method = "kernel", ...) { # Density at MAP map <- attributes(map_estimate(x, precision = precision, method = method, ...))$MAP_density # Density at 0 d_0 <- density_at(x, 0, precision = precision, method = method, ...) if (is.na(d_0)) d_0 <- 0 # Odds p <- d_0 / map class(p) <- c("p_map", class(p)) p } #' @export p_map.data.frame <- function(x, precision = 2^10, method = "kernel", ...) { x <- .select_nums(x) if (ncol(x) == 1) { p_MAP <- p_map(x[, 1], precision = precision, method = method, ...) } else { p_MAP <- sapply(x, p_map, precision = precision, method = method, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "p_MAP" = p_MAP, row.names = NULL, stringsAsFactors = FALSE ) class(out) <- c("p_map", class(out)) out } #' @export p_map.emmGrid <- function(x, precision = 2^10, method = "kernel", ...) { xdf <- insight::get_parameters(x) out <- p_map(xdf, precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.emm_list <- p_map.emmGrid #' @keywords internal .p_map_models <- function(x, precision, method, effects, component, parameters, ...) { p_map(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method, ...) } #' @export p_map.mcmc <- function(x, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.bcplm <- p_map.mcmc #' @export p_map.blrm <- p_map.mcmc #' @export p_map.mcmc.list <- p_map.mcmc #' @export p_map.BGGM <- p_map.mcmc #' @export p_map.bamlss <- function(x, precision = 2^10, method = "kernel", component = c("all", "conditional", "location"), parameters = NULL, ...) { component <- match.arg(component) out <- .p_map_models( x = x, precision = precision, method = method, effects = "all", component = component, parameters = parameters, ... ) out <- .add_clean_parameters_attribute(out, x) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.sim.merMod <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .p_map_models( x = x, precision = precision, method = method, effects = effects, component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) out } #' @export p_map.sim <- function(x, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @rdname p_map #' @export p_map.stanreg <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( p_map(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.stanfit <- p_map.stanreg #' @export p_map.blavaan <- p_map.stanreg #' @rdname p_map #' @export p_map.brmsfit <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( p_map(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method, ...), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.BFBayesFactor <- function(x, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.MCMCglmm <- function(x, precision = 2^10, method = "kernel", ...) { nF <- x$Fixed$nfl out <- p_map(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.bayesQR <- function(x, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname as.numeric.p_direction #' @method as.numeric p_map #' @export as.numeric.p_map <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$p_MAP))) } else { return(as.vector(x)) } } #' @method as.double p_map #' @export as.double.p_map <- as.numeric.p_map bayestestR/R/check_prior.R0000644000175000017500000001545714101116425015354 0ustar nileshnilesh#' Check if Prior is Informative #' #' Performs a simple test to check whether the prior is informative to the #' posterior. This idea, and the accompanying heuristics, were discussed in #' [this blogpost](https://statmodeling.stat.columbia.edu/2019/08/10/). #' #' @param method Can be `"gelman"` or `"lakeland"`. For the #' `"gelman"` method, if the SD of the posterior is more than 0.1 times #' the SD of the prior, then the prior is considered as informative. For the #' `"lakeland"` method, the prior is considered as informative if the #' posterior falls within the `95%` HDI of the prior. #' @param simulate_priors Should prior distributions be simulated using #' [simulate_prior()] (default; faster) or sampled via #' [unupdate()] (slower, more accurate). #' @inheritParams effective_sample #' @inheritParams hdi #' #' @return A data frame with two columns: The parameter names and the quality #' of the prior (which might be `"informative"`, `"uninformative"`) #' or `"not determinable"` if the prior distribution could not be #' determined). #' #' @examples #' \dontrun{ #' library(bayestestR) #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' #' # An extreme example where both methods diverge: #' model <- stan_glm(mpg ~ wt, #' data = mtcars[1:3, ], #' prior = normal(-3.3, 1, FALSE), #' prior_intercept = normal(0, 1000, FALSE), #' refresh = 0 #' ) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' plot(si(model)) # can provide visual confirmation to the Lakeland method #' } #' } #' @references https://statmodeling.stat.columbia.edu/2019/08/10/ #' @export check_prior <- function(model, method = "gelman", simulate_priors = TRUE, ...) { UseMethod("check_prior") } #' @export check_prior.brmsfit <- function(model, method = "gelman", simulate_priors = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) posteriors <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) if (isTRUE(simulate_priors)) { priors <- simulate_prior( model, effects = effects, component = component, parameters = parameters, verbose = verbose ) } else { priors <- unupdate(model, verbose = FALSE) priors <- insight::get_parameters( priors, effects = effects, component = component, parameters = parameters ) } .check_prior(priors, posteriors, method, verbose = verbose, cleaned_parameters = insight::clean_parameters(model) ) } #' @export check_prior.stanreg <- check_prior.brmsfit #' @export check_prior.blavaan <- check_prior.brmsfit #' @keywords internal .check_prior <- function(priors, posteriors, method = "gelman", verbose = TRUE, cleaned_parameters = NULL) { # sanity check for matching parameters. Some weird priors like # rstanarm's R2 prior might cause problems if (!is.null(cleaned_parameters) && ncol(priors) != ncol(posteriors)) { ## TODO for now only fixed effects if ("Effects" %in% colnames(cleaned_parameters)) { cleaned_parameters <- cleaned_parameters[cleaned_parameters$Effects == "fixed", ] } # rename cleaned parameters, so they match name of prior parameter column cp <- cleaned_parameters$Cleaned_Parameter cp <- gsub("(.*)(\\.|\\[)\\d+(\\.|\\])", "\\1", cp) cp[cp == "Intercept"] <- "(Intercept)" cleaned_parameters$Cleaned_Parameter <- cp colnames(priors)[colnames(priors) == "Intercept"] <- "(Intercept)" # at this point, the colnames of "posteriors" should match "cp$Parameter", # while colnames of "priors" should match "cp$Cleaned_Parameter". To ensure # that ncol of priors is the same as ncol of posteriors, we now duplicate # prior columns and match them with the posteriors if (ncol(posteriors) > ncol(priors)) { matched_columns <- stats::na.omit(match(cleaned_parameters$Cleaned_Parameter, colnames(priors))) matched_column_names <- stats::na.omit(match(colnames(priors), cleaned_parameters$Cleaned_Parameter)) priors <- priors[matched_columns] } else { matched_columns <- stats::na.omit(match(colnames(priors), cleaned_parameters$Cleaned_Parameter)) matched_column_names <- stats::na.omit(match(cleaned_parameters$Cleaned_Parameter, colnames(priors))) priors <- priors[matched_columns] } colnames(priors) <- cleaned_parameters$Parameter[matched_column_names] } # still different ncols? if (ncol(priors) != ncol(posteriors)) { common_columns <- intersect(colnames(priors), colnames(posteriors)) priors <- priors[common_columns] posteriors <- posteriors[common_columns] if (verbose) { warning("Parameters and priors could not be fully matched. Only returning results for parameters with matching priors.", call. = FALSE) } } # for priors whose distribution cannot be simulated, prior values are # all NA. Catch those, and warn user all_missing <- sapply(priors, function(i) { all(is.na(i)) }) if (any(all_missing) && verbose) { warning("Some priors could not be simulated.", call. = FALSE) } .gelman <- function(prior, posterior) { if (all(is.na(prior))) { "not determinable" } else if (stats::sd(posterior, na.rm = TRUE) > 0.1 * stats::sd(prior, na.rm = TRUE)) { "informative" } else { "uninformative" } } .lakeland <- function(prior, posterior) { if (all(is.na(prior))) { "not determinable" } else { hdi <- hdi(prior, ci = .95) r <- rope(posterior, ci = 1, range = c(hdi$CI_low, hdi$CI_high)) if (as.numeric(r) > 0.99) { "informative" } else { "misinformative" } } } if (method == "gelman") { result <- mapply(.gelman, priors, posteriors) } else if (method == "lakeland") { result <- mapply(.lakeland, priors, posteriors) } else { stop("method should be 'gelman' or 'lakeland'.") } data.frame( Parameter = names(posteriors), Prior_Quality = unname(result), stringsAsFactors = FALSE ) } bayestestR/R/bayesfactor_models.R0000644000175000017500000004346414101116425016730 0ustar nileshnilesh#' Bayes Factors (BF) for model comparison #' #' @description This function computes or extracts Bayes factors from fitted models. #' \cr \cr #' The `bf_*` function is an alias of the main function. #' #' @author Mattan S. Ben-Shachar #' #' @param ... Fitted models (see details), all fit on the same data, or a single #' `BFBayesFactor` object (see 'Details'). Ignored in `as.matrix()`, #' `update()`. #' @param denominator Either an integer indicating which of the models to use as #' the denominator, or a model to be used as a denominator. Ignored for #' `BFBayesFactor`. #' @param object,x A [bayesfactor_models()] object. #' @param subset Vector of model indices to keep or remove. #' @param reference Index of model to reference to, or `"top"` to #' reference to the best model, or `"bottom"` to reference to the worst #' model. #' @inheritParams hdi #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' If the passed models are supported by \pkg{insight} the DV of all models will be tested for equality #' (else this is assumed to be true), and the models' terms will be extracted (allowing for follow-up #' analysis with `bayesfactor_inclusion`). #' #' \itemize{ #' \item For `brmsfit` or `stanreg` models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. #' \itemize{ #' \item `brmsfit` models must have been fitted with `save_pars = save_pars(all = TRUE)`. #' \item `stanreg` models must have been fitted with a defined `diagnostic_file`. #' } #' \item For `BFBayesFactor`, `bayesfactor_models()` is mostly a wraparound `BayesFactor::extractBF()`. #' \item BIC approximations are used to compute Bayes factors for all other model types (with a BIC method). #' \itemize{ #' \item **Note** that BICs are extracted from models as-is. So if for example you want to compare mixed-models bases on ML instead of REML, you must supply models fit with ML. #' } #' } #' In order to correctly and precisely estimate Bayes factors, a rule of thumb #' are the 4 P's: **P**roper **P**riors and **P**lentiful #' **P**osteriors. How many? The number of posterior samples needed for #' testing is substantially larger than for estimation (the default of 4000 #' samples may not be enough in many cases). A conservative rule of thumb is to #' obtain 10 times more samples than would be required for estimation #' (\cite{Gronau, Singmann, & Wagenmakers, 2017}). If less than 40,000 samples #' are detected, `bayesfactor_models()` gives a warning. #' \cr \cr #' See also [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @return A data frame containing the models' formulas (reconstructed fixed and #' random effects) and their `log(BF)`s, that prints nicely. #' #' @examples #' # With lm objects: #' # ---------------- #' lm1 <- lm(Sepal.Length ~ 1, data = iris) #' lm2 <- lm(Sepal.Length ~ Species, data = iris) #' lm3 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' lm4 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1) #' bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result #' BFM <- bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result #' #' update(BFM, reference = "bottom") #' as.matrix(BFM) #' \dontrun{ #' # With lmerMod objects: #' # --------------------- #' if (require("lme4")) { #' lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) #' lmer3 <- lmer( #' Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), #' data = iris #' ) #' bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) #' bayesfactor_models(lmer1, lmer2, lmer3, denominator = lmer1) #' } #' #' # rstanarm models #' # --------------------- #' # (note that a unique diagnostic_file MUST be specified in order to work) #' if (require("rstanarm")) { #' stan_m0 <- stan_glm(Sepal.Length ~ 1, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df0.csv") #' ) #' stan_m1 <- stan_glm(Sepal.Length ~ Species, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df1.csv") #' ) #' stan_m2 <- stan_glm(Sepal.Length ~ Species + Petal.Length, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df2.csv") #' ) #' bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0) #' } #' #' #' # brms models #' # -------------------- #' # (note the save_pars MUST be set to save_pars(all = TRUE) in order to work) #' if (require("brms")) { #' brm1 <- brm(Sepal.Length ~ 1, data = iris, save_all_pars = TRUE) #' brm2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) #' brm3 <- brm( #' Sepal.Length ~ Species + Petal.Length, #' data = iris, #' save_pars = save_pars(all = TRUE) #' ) #' #' bayesfactor_models(brm1, brm2, brm3, denominator = 1) #' } #' #' #' # BayesFactor #' # --------------------------- #' if (require("BayesFactor")) { #' data(puzzles) #' BF <- anovaBF(RT ~ shape * color + ID, #' data = puzzles, #' whichRandom = "ID", progress = FALSE #' ) #' BF #' bayesfactor_models(BF) # basically the same #' } #' } #' @references #' \itemize{ #' \item Gronau, Q. F., Singmann, H., & Wagenmakers, E. J. (2017). Bridgesampling: An R package for estimating normalizing constants. arXiv preprint arXiv:1710.08162. #' \item Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, 90(430), 773-795. #' \item Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, 72, 33–37. #' \item Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804. #' \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' } #' @export bayesfactor_models <- function(..., denominator = 1, verbose = TRUE) { UseMethod("bayesfactor_models") } #' @rdname bayesfactor_models #' @export bf_models <- bayesfactor_models #' @export bayesfactor_models.default <- function(..., denominator = 1, verbose = TRUE) { # Organize the models and their names mods <- list(...) denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) names(mods) <- sapply(cl$`...`, .safe_deparse) names(denominator) <- .safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) mforms <- names(mods) denominator <- attr(mods, "denominator", exact = TRUE) # Get formula / model names # supported models supported_models <- sapply(mods, insight::is_model_supported) if (all(supported_models)) { temp_forms <- sapply(mods, .find_full_formula) has_terms <- sapply(temp_forms, nchar) > 0 mforms[has_terms] <- temp_forms[has_terms] supported_models[!has_terms] <- FALSE } # Get BF mBIC <- .BIC_list(mods) mBFs <- bic_to_bf(mBIC, denominator = mBIC[denominator], log = TRUE) res <- data.frame( Model = mforms, log_BF = mBFs, stringsAsFactors = FALSE ) .bf_models_output(res, denominator = denominator, bf_method = "BIC approximation", unsupported_models = !all(supported_models), model_names = names(mods) ) } .bayesfactor_models_stan <- function(mods, denominator = 1, verbose = TRUE) { # Warn n_samps <- sapply(mods, function(x) { alg <- insight::find_algorithm(x) if (is.null(alg$iterations)) alg$iterations <- alg$sample (alg$iterations - alg$warmup) * alg$chains }) if (any(n_samps < 4e4)) { warning( "Bayes factors might not be precise.\n", "For precise Bayes factors, it is recommended sampling at least 40,000 posterior samples.", call. = FALSE, immediate. = TRUE ) } if (inherits(mods[[1]], "blavaan")) { res <- .bayesfactor_models_stan_SEM(mods, denominator, verbose) bf_method <- "marginal likelihoods (Laplace approximation)" unsupported_models <- TRUE } else { res <- .bayesfactor_models_stan_REG(mods, denominator, verbose) bf_method <- "marginal likelihoods (bridgesampling)" unsupported_models <- FALSE } .bf_models_output(res, denominator = denominator, bf_method = bf_method, unsupported_models = unsupported_models ) } #' @keywords internal .bayesfactor_models_stan_REG <- function(mods, denominator, verbose = TRUE) { insight::check_if_installed("bridgesampling") # Test that all is good: resps <- lapply(mods, insight::get_response) from_same_data_as_den <- sapply(resps[-denominator], identical, y = resps[[denominator]] ) if (!all(from_same_data_as_den)) { stop("Models were not computed from the same data.") } # Get BF if (verbose) { message("Computation of Bayes factors: estimating marginal likelihood, please wait...") } mML <- lapply(mods, function(x) { bridgesampling::bridge_sampler(x, silent = TRUE) }) mBFs <- sapply(mML, function(x) { bf <- bridgesampling::bf(x, mML[[denominator]], log = TRUE) bf[["bf"]] }) # Get formula mforms <- sapply(mods, .find_full_formula) res <- data.frame( Model = mforms, log_BF = mBFs, stringsAsFactors = FALSE ) } .bayesfactor_models_stan_SEM <- function(mods, denominator, verbose = TRUE) { utils::capture.output( suppressWarnings( mBFs <- sapply(mods, function(m) { blavaan::blavCompare(m, mods[[denominator]])[["bf"]][1] }) ) ) res <- data.frame( Model = names(mods), log_BF = unname(mBFs), stringsAsFactors = FALSE ) } #' @export bayesfactor_models.stanreg <- function(..., denominator = 1, verbose = TRUE) { insight::check_if_installed("rstanarm") # Organize the models and their names mods <- list(...) denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) names(mods) <- sapply(cl$`...`, .safe_deparse) names(denominator) <- .safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) denominator <- attr(mods, "denominator", exact = TRUE) .bayesfactor_models_stan(mods, denominator = denominator, verbose = verbose) } #' @export bayesfactor_models.brmsfit <- function(..., denominator = 1, verbose = TRUE) { insight::check_if_installed("brms") # Organize the models and their names mods <- list(...) denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) names(mods) <- sapply(cl$`...`, .safe_deparse) names(denominator) <- .safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) denominator <- attr(mods, "denominator", exact = TRUE) .bayesfactor_models_stan(mods, denominator = denominator, verbose = verbose) } #' @export bayesfactor_models.blavaan <- function(..., denominator = 1, verbose = TRUE) { insight::check_if_installed("blavaan") # Organize the models and their names mods <- list(...) denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) names(mods) <- sapply(cl$`...`, .safe_deparse) names(denominator) <- .safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) denominator <- attr(mods, "denominator", exact = TRUE) .bayesfactor_models_stan(mods, denominator = denominator, verbose = verbose) } #' @export bayesfactor_models.BFBayesFactor <- function(..., verbose = TRUE) { models <- c(...) insight::check_if_installed("BayesFactor") mBFs <- c(0, BayesFactor::extractBF(models, TRUE, TRUE)) mforms <- sapply(c(models@denominator, models@numerator), function(x) x@shortName) if (!"BFlinearModel" %in% class(models@denominator)) { mforms <- .clean_non_linBF_mods(mforms) } else { mforms[mforms == "Intercept only"] <- "1" } res <- data.frame( Model = unname(mforms), log_BF = mBFs, stringsAsFactors = FALSE ) .bf_models_output(res, denominator = 1, bf_method = "JZS (BayesFactor)", unsupported_models = !"BFlinearModel" %in% class(models@denominator) ) } # Methods ----------------------------------------------------------------- #' @rdname bayesfactor_models #' @export update.bayesfactor_models <- function(object, subset = NULL, reference = NULL, ...) { if (!is.null(reference)) { if (reference == "top") { reference <- which.max(object$log_BF) } else if (reference == "bottom") { reference <- which.min(object$log_BF) } object$log_BF <- object$log_BF - object$log_BF[reference] attr(object, "denominator") <- reference } denominator <- attr(object, "denominator") if (!is.null(subset)) { if (all(subset < 0)) { subset <- seq_len(nrow(object))[subset] } object_subset <- object[subset, ] if (denominator %in% subset) { attr(object_subset, "denominator") <- which(denominator == subset) } else { object_subset <- rbind(object[denominator, ], object_subset) attr(object_subset, "denominator") <- 1 } object <- object_subset } object } #' @rdname bayesfactor_models #' @export as.matrix.bayesfactor_models <- function(x, ...) { out <- -outer(x$log_BF, x$log_BF, FUN = "-") rownames(out) <- colnames(out) <- x$Model # out <- exp(out) class(out) <- c("bayesfactor_models_matrix", class(out)) out } # Helpers ----------------------------------------------------------------- #' @keywords internal .cleanup_BF_models <- function(mods, denominator, cl) { if (length(mods) == 1 && inherits(mods[[1]], "list")) { mods <- mods[[1]] mod_names <- tryCatch( { sapply(cl$`...`[[1]][-1], .safe_deparse) }, error = function(e) { NULL } ) if (!is.null(mod_names) && length(mod_names) == length(mods)) { names(mods) <- mod_names } } if (!is.numeric(denominator[[1]])) { denominator_model <- which(names(mods) == names(denominator)) if (length(denominator_model) == 0) { mods <- c(mods, denominator) denominator <- length(mods) } else { denominator <- denominator_model } } else { denominator <- denominator[[1]] } attr(mods, "denominator") <- denominator mods } #' @keywords internal .bf_models_output <- function(res, denominator = 1, bf_method = "method", unsupported_models = FALSE, model_names = NULL) { attr(res, "denominator") <- denominator attr(res, "BF_method") <- bf_method attr(res, "unsupported_models") <- unsupported_models attr(res, "model_names") <- model_names class(res) <- c("bayesfactor_models", "see_bayesfactor_models", class(res)) res } #' @keywords internal .find_full_formula <- function(mod) { formulas <- insight::find_formula(mod) conditional <- random <- NULL if (!is.null(formulas$conditional)) { conditional <- as.character(formulas$conditional)[3] } if (!is.null(formulas$random)) { if (!is.list(formulas$random)) { formulas$random <- list(formulas$random) } random <- sapply(formulas$random, function(x) { paste0("(", as.character(x)[2], ")") }) } paste(c(conditional, random), collapse = " + ") } #' @keywords internal .BIC_list <- function(x) { sapply(x, function(m) { tryCatch( { bic <- stats::BIC(m, x[[1]]) bic$BIC[1] }, warning = function(w) { stop(conditionMessage(w), call. = FALSE) } ) }) } #' @keywords internal .clean_non_linBF_mods <- function(m_names) { tryCatch( { m_txt <- character(length = length(m_names)) ## Detect types ## is_null <- grepl("^Null", m_names) is_rho <- grepl("rho", m_names) is_mu <- grepl("mu", m_names) is_d <- grepl("d", m_names) is_p <- grepl("p", m_names) is_range <- grepl("<", m_names) ## Range Alts ## m_txt[!is_null & is_range] <- sub("^[^\\s]*\\s[^\\s]*\\s", "", m_names[!is_null & is_range]) ## Null models + Not nulls ## if (any(is_d & is_p)) { is_null <- !grepl("^Non", m_names) temp <- m_names[is_null][1] mi <- gregexpr("\\(.*\\)", temp) aa <- unlist(regmatches(temp, m = mi)) m_txt[is_null] <- sub("a=", "a = ", aa) m_txt[!is_null & !is_range] <- sub("a=", "a != ", aa) } else if (any(is_rho)) { m_txt[is_null] <- "rho = 0" m_txt[!is_null & !is_range] <- "rho != 0" m_txt <- sub("2`). See #' info on specifying correct priors for factors with more than 2 levels in #' [the #' Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @inheritParams stats::contr.treatment #' #' @details #' When `contrasts = FALSE`, the returned contrasts are equivalent to #' `contr.treatment(, contrasts = FALSE)`, as suggested by McElreath (also known #' as one-hot encoding). #' #' ## Setting Priors #' It is recommended to set 0-centered identically scaled priors of the dummy #' coded variables produced by this method. These priors then represent the #' distance the mean of one of the levels might have from the overall mean. #' #' ### Contrasts #' This method guarantees that any set of contrasts between the *k* groups will #' have the same multivariate prior regardless of level order; However, #' different contrasts within a set contrasts can have different univariate #' prior shapes/scales. #' \cr\cr #' For example the contrasts `A - B` will have the same prior as `B - C`, as #' will `(A + C) - B` and `(B + A) - C`, but `A - B` and `(A + C) - B` will #' differ. #' #' #' #' @references #' - McElreath, R. (2020). Statistical rethinking: A Bayesian course with #' examples in R and Stan. CRC press. #' #' - Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). #' Default Bayes factors for ANOVA designs. *Journal of Mathematical #' Psychology*, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 #' #' @return A `matrix` with n rows and k columns, with k=n-1 if contrasts is #' `TRUE` and k=n if contrasts is `FALSE`. #' #' @aliases contr.bayes #' #' @examples #' contr.orthonorm(2) # Q_2 in Rouder et al. (2012, p. 363) #' #' contr.orthonorm(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) #' #' ## check decomposition #' Q3 <- contr.orthonorm(3) #' Q3 %*% t(Q3) ## 2/3 on diagonal and -1/3 on off-diagonal elements #' @export contr.orthonorm <- function(n, contrasts = TRUE, sparse = FALSE) { contr <- stats::contr.treatment(n, contrasts = FALSE, base = 1, sparse = sparse & !contrasts ) if (contrasts) { n <- ncol(contr) I_a <- diag(n) J_a <- matrix(1, nrow = n, ncol = n) Sigma_a <- I_a - J_a / n contr <- eigen(Sigma_a)$vectors[, seq_len(n - 1), drop = FALSE] } contr } # ---------- #' @export contr.bayes <- function(n, contrasts = TRUE) { .Deprecated(new = "contr.orthonorm", old = "contr.bayes") contr.orthonorm(n, contrasts = contrasts) } bayestestR/R/print.rope.R0000644000175000017500000000547214076521542015173 0ustar nileshnilesh#' @export print.rope <- function(x, digits = 2, ...) { orig_x <- x # If the model is multivariate, we have have different ROPES depending on # the outcome variable. is_multivariate <- length(unique(x$Response)) > 1 if (isTRUE(is_multivariate)) { insight::print_color(sprintf( "# Proportion%s of samples inside the ROPE.\nROPE with depends on outcome variable.\n\n", ifelse(all(x$CI[1] == x$CI), "", "s") ), "blue") } else { insight::print_color(sprintf( "# Proportion%s of samples inside the ROPE [%.*f, %.*f]:\n\n", ifelse(all(x$CI[1] == x$CI), "", "s"), digits, x$ROPE_low[1], digits, x$ROPE_high[1] ), "blue") } # I think this is something nobody will understand and we'll probably forget # why we did this, so I'll comment a bit... # These are the base columns we want to print cols <- c( "Parameter", "ROPE_Percentage", "Effects", "Component", if (is_multivariate) c("ROPE_low", "ROPE_high") ) # In case we have ropes for different CIs, we also want this information # So we first check if values in the CI column differ, and if so, we also # keep this column for printing if (!all(x$CI[1] == x$CI)) { cols <- c("CI", cols) } # Either way, we need to know the different CI-values, so we can # split the data frame for printing later... ci <- unique(x$CI) # now we check which of the requested columns are actually in our data frame "x" # "x" may differ, depending on if "rope()" was called with a model-object, # or with a simple vector. So we can't hard-code this x <- subset(x, select = intersect(cols, colnames(x))) # This is just cosmetics, to have nicer column names and values x$ROPE_Percentage <- sprintf("%.*f %%", digits, x$ROPE_Percentage * 100) colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" # Add ROPE width for multivariate models if (isTRUE(is_multivariate)) { # This is just cosmetics, to have nicer column names and values x$ROPE_low <- sprintf("[%.*f, %.*f]", digits, x$ROPE_low, digits, x$ROPE_high) colnames(x)[which(colnames(x) == "ROPE_low")] <- "ROPE width" x$ROPE_high <- NULL } # In case we have multiple CI values, we create a subset for each CI value. # Else, parameter-rows would be mixed up with both CIs, which is a bit # more difficult to read... if (length(ci) == 1) { # print complete data frame, because we have no different CI values here print_data_frame(x, digits = digits) } else { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] insight::print_color(sprintf("ROPE for the %s%% HDI:\n\n", 100 * i), "cyan") print_data_frame(xsub, digits = digits) cat("\n") } } invisible(orig_x) } bayestestR/R/rope_range.R0000644000175000017500000001205114106656140015200 0ustar nileshnilesh#' @title Find Default Equivalence (ROPE) Region Bounds #' #' @description This function attempts at automatically finding suitable "default" #' values for the Region Of Practical Equivalence (ROPE). #' #' @details \cite{Kruschke (2018)} suggests that the region of practical #' equivalence could be set, by default, to a range from `-0.1` to #' `0.1` of a standardized parameter (negligible effect size #' according to Cohen, 1988). #' #' \itemize{ #' \item For **linear models (lm)**, this can be generalised to #' \ifelse{html}{\out{-0.1 * SDy, 0.1 * #' SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}. #' #' \item For **logistic models**, the parameters expressed in log odds #' ratio can be converted to standardized difference through the formula #' \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a #' range of `-0.18` to `0.18`. #' #' \item For other models with **binary outcome**, it is strongly #' recommended to manually specify the rope argument. Currently, the same #' default is applied that for logistic models. #' #' \item For models from **count data**, the residual variance is used. #' This is a rather experimental threshold and is probably often similar to #' `-0.1, 0.1`, but should be used with care! #' #' \item For **t-tests**, the standard deviation of the response is #' used, similarly to linear models (see above). #' #' \item For **correlations**, `-0.05, 0.05` is used, i.e., half #' the value of a negligible correlation as suggested by Cohen's (1988) #' rules of thumb. #' #' \item For all other models, `-0.1, 0.1` is used to determine the #' ROPE limits, but it is strongly advised to specify it manually. #' } #' #' @param x A `stanreg`, `brmsfit` or `BFBayesFactor` object. #' @param verbose Toggle warnings. #' @inheritParams rope #' #' @examples #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' ) #' rope_range(model) #' #' model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) #' rope_range(model) #' } #' #' if (require("brms")) { #' model <- brm(mpg ~ wt + cyl, data = mtcars) #' rope_range(model) #' } #' #' if (require("BayesFactor")) { #' model <- ttestBF(mtcars[mtcars$vs == 1, "mpg"], mtcars[mtcars$vs == 0, "mpg"]) #' rope_range(model) #' #' model <- lmBF(mpg ~ vs, data = mtcars) #' rope_range(model) #' } #' } #' #' @references Kruschke, J. K. (2018). Rejecting or accepting parameter values #' in Bayesian estimation. Advances in Methods and Practices in Psychological #' Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' #' @export rope_range <- function(x, ...) { UseMethod("rope_range") } #' @rdname rope_range #' @export rope_range.default <- function(x, verbose = TRUE, ...) { response <- insight::get_response(x) information <- insight::model_info(x) if (insight::is_multivariate(x)) { ret <- mapply( function(i, j, ...) .rope_range(x, i, j), information, response, verbose, SIMPLIFY = FALSE ) return(ret) } else { .rope_range(x, information, response, verbose) } } # Exceptions -------------------------------------------------------------- #' @export rope_range.mlm <- function(x, verbose = TRUE, ...) { response <- insight::get_response(x) information <- insight::model_info(x) lapply(response, function(i) .rope_range(x, information, i, verbose)) } # helper ------------------ .rope_range <- function(x, information = NULL, response = NULL, verbose = TRUE) { # if(method != "legacy") { # message("Other ROPE range methods than 'legacy' are currently not implemented. See https://github.com/easystats/bayestestR/issues/364", call. = FALSE) # } negligible_value <- tryCatch( { if (!is.null(response) && information$link == "identity") { # Linear Models 0.1 * stats::sd(response, na.rm = TRUE) # 0.1 * stats::sigma(x) # https://github.com/easystats/bayestestR/issues/364 } else if (information$link == "logit") { # Logistic Models (any) # Sigma==pi / sqrt(3) 0.1 * pi / sqrt(3) } else if (information$link == "probit") { # Probit models # Sigma==1 0.1 * 1 } else if (information$is_correlation) { # Correlations # https://github.com/easystats/bayestestR/issues/121 0.05 } else if (information$is_count) { # Not sure about this sig <- stats::sigma(x) if (is.null(sig) || length(sig) == 0 || is.na(sig)) stop() 0.1 * sig } else { # Default stop() } }, error = function(e) { if (isTRUE(verbose)) { warning("Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.", call. = FALSE) } 0.1 } ) c(-1, 1) * negligible_value } bayestestR/R/zzz.R0000644000175000017500000000023414030365364013715 0ustar nileshnilesh.onAttach <- function(libname, pkgname) { if (format(Sys.time(), "%m%d") == "0504") { packageStartupMessage("May the fourth be with you!") } } bayestestR/R/describe_prior.R0000644000175000017500000000672314076521541016065 0ustar nileshnilesh#' Describe Priors #' #' Returns a summary of the priors used in the model. #' #' @param model A Bayesian model. #' @param ... Currently not used. #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' describe_prior(bf) #' } #' } #' @export describe_prior <- function(model, ...) { UseMethod("describe_prior") } #' @export describe_prior.brmsfit <- function(model, effects = c("fixed", "random", "all"), component = c( "conditional", "zi", "zero_inflated", "all", "location", "distributional", "auxiliary" ), parameters = NULL, ...) { .describe_prior(model, parameters = parameters) } # Internal ---------------------------------------------------------------- #' @keywords internal .describe_prior <- function(model, parameters = NULL, ...) { priors <- insight::get_priors(model, ...) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) # If the prior scale has been adjusted, it is the actual scale that was used. if ("Prior_Adjusted_Scale" %in% names(priors)) { priors$Prior_Scale[!is.na(priors$Prior_Adjusted_Scale)] <- priors$Prior_Adjusted_Scale[!is.na(priors$Prior_Adjusted_Scale)] priors$Prior_Adjusted_Scale <- NULL } if ("Prior_Response" %in% names(priors)) { names(priors)[names(priors) == "Prior_Response"] <- "Response" } # make sure parameter names match between prior output and model cp <- insight::clean_parameters(model) ## TODO for now, only fixed effects if ("Effects" %in% names(cp)) { cp <- cp[cp$Effects == "fixed", ] } if (!is.null(parameters) && !all(priors$Parameter %in% parameters)) { cp$Cleaned_Parameter <- gsub("(.*)(\\.|\\[)\\d+(\\.|\\])", "\\1", cp$Cleaned_Parameter) cp$Cleaned_Parameter[cp$Cleaned_Parameter == "Intercept"] <- "(Intercept)" colnames(priors)[1] <- "Cleaned_Parameter" out <- merge(cp, priors, by = "Cleaned_Parameter", all = TRUE) out <- out[!duplicated(out$Parameter), ] priors <- out[intersect(colnames(out), c("Parameter", "Prior_Distribution", "Prior_df", "Prior_Location", "Prior_Scale", "Response"))] } priors } #' @export describe_prior.stanreg <- .describe_prior #' @export describe_prior.bcplm <- .describe_prior #' @export describe_prior.blavaan <- .describe_prior #' @export describe_prior.mcmc.list <- function(model, ...) { NULL } #' @export describe_prior.BGGM <- function(model, ...) { NULL } #' @export describe_prior.bamlss <- function(model, ...) { NULL } #' @export describe_prior.BFBayesFactor <- function(model, ...) { priors <- insight::get_priors(model) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) priors } bayestestR/R/diagnostic_posterior.R0000644000175000017500000002744114114077153017322 0ustar nileshnilesh#' Posteriors Sampling Diagnostic #' #' Extract diagnostic metrics (Effective Sample Size (`ESS`), `Rhat` and Monte #' Carlo Standard Error `MCSE`). #' #' @param posteriors A stanreg or brms model. #' @param diagnostic Diagnostic metrics to compute. Character (vector) or list #' with one or more of these options: `"ESS"`, `"Rhat"`, `"MCSE"` or `"all"`. #' #' @details #' **Effective Sample (ESS)** should be as large as possible, although for #' most applications, an effective sample size greater than 1000 is sufficient #' for stable estimates (Bürkner, 2017). The ESS corresponds to the number of #' independent samples with the same estimation power as the N autocorrelated #' samples. It is is a measure of \dQuote{how much independent information #' there is in autocorrelated chains} (\cite{Kruschke 2015, p182-3}). #' \cr \cr #' **Rhat** should be the closest to 1. It should not be larger than 1.1 #' (\cite{Gelman and Rubin, 1992}) or 1.01 (\cite{Vehtari et al., 2019}). The #' split Rhat statistic quantifies the consistency of an ensemble of Markov #' chains. #' \cr \cr #' **Monte Carlo Standard Error (MCSE)** is another measure of accuracy of the #' chains. It is defined as standard deviation of the chains divided by their #' effective sample size (the formula for `mcse()` is from Kruschke 2015, p. #' 187). The MCSE \dQuote{provides a quantitative suggestion of how big the #' estimation noise is}. #' #' #' @examples #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm", quietly = TRUE)) { #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' diagnostic_posterior(model) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms", quietly = TRUE)) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' diagnostic_posterior(model) #' } #' } #' @references #' \itemize{ #' \item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472. #' \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., \& Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' } #' @export diagnostic_posterior <- function(posteriors, diagnostic = c("ESS", "Rhat"), ...) { UseMethod("diagnostic_posterior") } #' @export diagnostic_posterior.numeric <- function(posteriors, diagnostic = c("ESS", "Rhat"), ...) { stop("`diagnostic_posterior` only works with rstanarm or brms models.") } #' @export diagnostic_posterior.data.frame <- diagnostic_posterior.numeric #' @export diagnostic_posterior.BFBayesFactor <- diagnostic_posterior.numeric #' @inheritParams insight::get_parameters.BFBayesFactor #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.stanreg <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { # Find parameters effects <- match.arg(effects) component <- match.arg(component) params <- insight::find_parameters( posteriors, effects = effects, component = component, parameters = parameters, flatten = TRUE ) # If no diagnostic if (is.null(diagnostic)) { return(data.frame("Parameter" = params)) } diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") } else { diagnostic <- c(diagnostic) if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices and rename diagnostic_df <- as.data.frame(posteriors$stan_summary) diagnostic_df$Parameter <- row.names(diagnostic_df) if ("n_eff" %in% names(diagnostic_df)) { diagnostic_df$ESS <- diagnostic_df$n_eff } # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posteriors, effects = "all") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.stanmvreg <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), parameters = NULL, ...) { # Find parameters effects <- match.arg(effects) all_params <- insight::find_parameters( posteriors, effects = effects, parameters = parameters, flatten = FALSE ) params <- unlist(lapply(names(all_params), function(i) { all_params[[i]]$sigma <- NULL unlist(all_params[[i]]) })) # If no diagnostic if (is.null(diagnostic)) { return(data.frame("Parameter" = params)) } diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") } else { diagnostic <- c(diagnostic) if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices and rename diagnostic_df <- as.data.frame(posteriors$stan_summary) diagnostic_df$Parameter <- row.names(diagnostic_df) if ("n_eff" %in% names(diagnostic_df)) { diagnostic_df$ESS <- diagnostic_df$n_eff } # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posteriors, effects = effects) diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] diagnostic_df$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", diagnostic_df$Parameter) for (i in unique(diagnostic_df$Response)) { diagnostic_df$Parameter <- gsub(sprintf("%s|", i), "", diagnostic_df$Parameter, fixed = TRUE) } # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.brmsfit <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # Find parameters effects <- match.arg(effects) component <- match.arg(component) params <- insight::find_parameters(posteriors, effects = effects, component = component, parameters = parameters, flatten = TRUE ) # If no diagnostic if (is.null(diagnostic)) { return(data.frame("Parameter" = params)) } # Get diagnostic diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") # Add MCSE } else { if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } insight::check_if_installed("rstan") # Get indices and rename diagnostic_df <- as.data.frame(rstan::summary(posteriors$fit)$summary) diagnostic_df$Parameter <- row.names(diagnostic_df) diagnostic_df$ESS <- diagnostic_df$n_eff # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posteriors, effects = "all", component = "all") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @export diagnostic_posterior.stanfit <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), parameters = NULL, ...) { # Find parameters effects <- match.arg(effects) params <- insight::find_parameters(posteriors, effects = effects, parameters = parameters, flatten = TRUE) # If no diagnostic if (is.null(diagnostic)) { return(data.frame("Parameter" = params)) } # Get diagnostic diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") } insight::check_if_installed("rstan") all_params <- insight::find_parameters(posteriors, effects = effects, flatten = TRUE ) diagnostic_df <- data.frame( Parameter = all_params, stringsAsFactors = FALSE ) if ("ESS" %in% diagnostic) { diagnostic_df$ESS <- effective_sample(posteriors, effects = effects)$ESS } if ("MCSE" %in% diagnostic) { diagnostic_df$MCSE <- mcse(posteriors, effects = effects)$MCSE } if ("Rhat" %in% diagnostic) { s <- as.data.frame(rstan::summary(posteriors)$summary) diagnostic_df$Rhat <- s[rownames(s) %in% all_params, ]$Rhat } # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @export diagnostic_posterior.blavaan <- function(posteriors, diagnostic = "all", ...) { # Find parameters params <- suppressWarnings(insight::find_parameters(posteriors, flatten = TRUE)) out <- data.frame("Parameter" = params) # If no diagnostic if (is.null(diagnostic)) { return(out) } diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") } else { diagnostic <- c(diagnostic) if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices if ("Rhat" %in% diagnostic) { insight::check_if_installed("blavaan") Rhat <- blavaan::blavInspect(posteriors, what = "psrf") Rhat <- data.frame( Parameter = colnames(insight::get_parameters(posteriors)), Rhat = Rhat ) out <- merge(out, Rhat, by = "Parameter", all = TRUE) } if ("ESS" %in% diagnostic) { ESS <- effective_sample(posteriors) out <- merge(out, ESS, by = "Parameter", all = TRUE) } if ("MCSE" %in% diagnostic) { MCSE <- mcse(posteriors) out <- merge(out, MCSE, by = "Parameter", all = TRUE) } unique(out) } bayestestR/R/describe_posterior.R0000644000175000017500000011534314125235747016764 0ustar nileshnilesh#' Describe Posterior Distributions #' #' Compute indices relevant to describe and characterize the posterior distributions. #' #' @param posteriors A vector, data frame or model of posterior draws. #' @param ci_method The type of index used for Credible Interval. Can be #' `"HDI"` (default, see [bayestestR::hdi()]), `"ETI"` #' (see [bayestestR::eti()]), `"BCI"` (see #' [bayestestR::bci()]) or `"SI"` (see [bayestestR::si()]). #' @param test The indices of effect existence to compute. Character (vector) or #' list with one or more of these options: `"p_direction"` (or `"pd"`), #' `"rope"`, `"p_map"`, `"equivalence_test"` (or `"equitest"`), #' `"bayesfactor"` (or `"bf"`) or `"all"` to compute all tests. #' For each "test", the corresponding \pkg{bayestestR} function is called #' (e.g. [bayestestR::rope()] or [bayestestR::p_direction()]) and its results #' included in the summary output. #' @param rope_range ROPE's lower and higher bounds. Should be a list of two #' values (e.g., `c(-0.1, 0.1)`) or `"default"`. If `"default"`, #' the bounds are set to `x +- 0.1*SD(response)`. #' @param rope_ci The Credible Interval (CI) probability, corresponding to the #' proportion of HDI, to use for the percentage in ROPE. #' @param keep_iterations If `TRUE`, will keep all iterations (draws) of #' bootstrapped or Bayesian models. They will be added as additional columns #' named `iter_1, iter_2, ...`. You can reshape them to a long format by #' running [bayestestR::reshape_iterations()]. #' #' @inheritParams point_estimate #' @inheritParams ci #' @inheritParams si #' #' @details #' One or more components of point estimates (like posterior mean or median), #' intervals and tests can be omitted from the summary output by setting the #' related argument to `NULL`. For example, `test = NULL` and `centrality = #' NULL` would only return the HDI (or CI). #' #' @references #' \itemize{ #' \item [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) #' \item [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) #' \item [Bayes factors](https://easystats.github.io/bayestestR/articles/bayes_factors.html) #' } #' #' @examples #' library(bayestestR) #' #' if (require("logspline")) { #' x <- rnorm(1000) #' describe_posterior(x) #' describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(x, ci = c(0.80, 0.90)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' describe_posterior(df) #' describe_posterior(df, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(df, ci = c(0.80, 0.90)) #' #' df <- data.frame(replicate(4, rnorm(20))) #' head(reshape_iterations(describe_posterior(df, keep_iterations = TRUE))) #' } #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm") && require("emmeans")) { #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' describe_posterior(model) #' describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(model, ci = c(0.80, 0.90)) #' #' # emmeans estimates #' # ----------------------------------------------- #' describe_posterior(emtrends(model, ~1, "wt")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' describe_posterior(model) #' describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(model, ci = c(0.80, 0.90)) #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' describe_posterior(bf) #' describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(bf, ci = c(0.80, 0.90)) #' } #' } #' @export describe_posterior <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, ...) { UseMethod("describe_posterior") } #' @keywords internal .describe_posterior <- function(x, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, ...) { if (is.null(x)) { warning("Could not extract posterior samples.", call. = FALSE) return(NULL) } # Arguments fixes if (!is.null(centrality) && length(centrality) == 1 && (centrality == "none" || centrality == FALSE)) centrality <- NULL if (!is.null(ci) && length(ci) == 1 && (is.na(ci) || ci == FALSE)) ci <- NULL if (!is.null(test) && length(test) == 1 && (test == "none" || test == FALSE)) test <- NULL # Point-estimates if (!is.null(centrality)) { estimates <- point_estimate(x, centrality = centrality, dispersion = dispersion, ...) if (!"Parameter" %in% names(estimates)) { estimates <- cbind(data.frame("Parameter" = "Posterior"), estimates) } } else { estimates <- data.frame("Parameter" = NA) } # Uncertainty if (!is.null(ci)) { ci_method <- match.arg(tolower(ci_method), c("hdi", "quantile", "ci", "eti", "si", "bci", "bcai")) if (ci_method == "si") { uncertainty <- ci(x, BF = BF, method = ci_method, prior = bf_prior, ...) } else { uncertainty <- ci(x, ci = ci, method = ci_method, ...) } if (!"Parameter" %in% names(uncertainty)) { uncertainty <- cbind(data.frame("Parameter" = "Posterior"), uncertainty) } } else { uncertainty <- data.frame("Parameter" = NA) } # Effect Existence if (!is.null(test)) { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } ## TODO no BF for arm::sim if (inherits(x, c("sim", "sim.merMod", "mcmc", "stanfit"))) { test <- setdiff(test, "bf") } ## TODO enable once "rope()" works for multi-response models # no ROPE for multi-response models if (insight::is_multivariate(x)) { test <- setdiff(test, c("rope", "p_rope")) warning("Multivariate response models are not yet supported for tests 'rope' and 'p_rope'.", call. = FALSE) } # MAP-based p-value if (any(c("p_map", "p_pointnull") %in% test)) { test_pmap <- p_map(x, ...) if (!is.data.frame(test_pmap)) test_pmap <- data.frame("Parameter" = "Posterior", "p_map" = test_pmap) } else { test_pmap <- data.frame("Parameter" = NA) } # Probability of direction if (any(c("pd", "p_direction", "pdir", "mpe") %in% test)) { test_pd <- p_direction(x, ...) if (!is.data.frame(test_pd)) test_pd <- data.frame("Parameter" = "Posterior", "pd" = test_pd) } else { test_pd <- data.frame("Parameter" = NA) } # Probability of rope if (any(c("p_rope") %in% test)) { test_prope <- p_rope(x, range = rope_range, ...) if (!"Parameter" %in% names(test_prope)) { test_prope <- cbind(data.frame("Parameter" = "Posterior"), test_prope) } } else { test_prope <- data.frame("Parameter" = NA) } # Probability of significance if (any(c("ps", "p_sig", "p_significance") %in% test)) { test_psig <- p_significance(x, threshold = rope_range, ...) if (!is.data.frame(test_psig)) test_psig <- data.frame("Parameter" = "Posterior", "ps" = test_psig) } else { test_psig <- data.frame("Parameter" = NA) } # ROPE if (any(c("rope") %in% test)) { test_rope <- rope(x, range = rope_range, ci = rope_ci, ...) if (!"Parameter" %in% names(test_rope)) { test_rope <- cbind(data.frame("Parameter" = "Posterior"), test_rope) } names(test_rope)[names(test_rope) == "CI"] <- "ROPE_CI" } else { test_rope <- data.frame("Parameter" = NA) } # Equivalence test if (any(c("equivalence", "equivalence_test", "equitest") %in% test)) { if (any(c("rope") %in% test)) { equi_warnings <- FALSE } else { equi_warnings <- TRUE } test_equi <- equivalence_test(x, range = rope_range, ci = rope_ci, verbose = equi_warnings, ... ) test_equi$Cleaned_Parameter <- NULL if (!"Parameter" %in% names(test_equi)) { test_equi <- cbind(data.frame("Parameter" = "Posterior"), test_equi) } names(test_equi)[names(test_equi) == "CI"] <- "ROPE_CI" test_rope <- merge(test_rope, test_equi, all = TRUE) test_rope <- test_rope[!names(test_rope) %in% c("HDI_low", "HDI_high")] } # Bayes Factors if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test_bf <- bayesfactor_parameters(x, prior = bf_prior, ...) if (!"Parameter" %in% names(test_bf)) { test_bf <- cbind(data.frame("Parameter" = "Posterior"), test_bf) } } else { test_bf <- data.frame("Parameter" = NA) } } else { test_pd <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) test_rope <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) test_prope <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) test_psig <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) test_bf <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) test_pmap <- data.frame( "Parameter" = NA, "Effects" = NA, "Component" = NA, "Response" = NA ) } # for data frames or numeric, and even for some models, we don't # have the "Effects" or "Component" column for all data frames. # To make "merge()" work, we add those columns to all data frames, # filled with NA, and remove the columns later if necessary estimates <- .add_effects_component_column(estimates) uncertainty <- .add_effects_component_column(uncertainty) test_pmap <- .add_effects_component_column(test_pmap) test_pd <- .add_effects_component_column(test_pd) test_prope <- .add_effects_component_column(test_prope) test_psig <- .add_effects_component_column(test_psig) test_rope <- .add_effects_component_column(test_rope) test_bf <- .add_effects_component_column(test_bf) # at least one "valid" data frame needs a row id, to restore # row-order after merging if (!all(is.na(estimates$Parameter))) { estimates$.rowid <- 1:nrow(estimates) } else if (!all(is.na(test_pmap$Parameter))) { test_pmap$.rowid <- 1:nrow(test_pmap) } else if (!all(is.na(test_pd$Parameter))) { test_pd$.rowid <- 1:nrow(test_pd) } else if (!all(is.na(test_prope$Parameter))) { test_prope$.rowid <- 1:nrow(test_prope) } else if (!all(is.na(test_psig$Parameter))) { test_psig$.rowid <- 1:nrow(test_psig) } else if (!all(is.na(test_rope$Parameter))) { test_rope$.rowid <- 1:nrow(test_rope) } else if (!all(is.na(test_bf$Parameter))) { test_bf$.rowid <- 1:nrow(test_bf) } else { estimates$.rowid <- 1:nrow(estimates) } # remove duplicated columns if (all(c("rope", "p_rope") %in% test)) { test_prope$ROPE_low <- NULL test_prope$ROPE_high <- NULL } # merge all data frames merge_by <- c("Parameter", "Effects", "Component", "Response") # merge_by <- intersect(merge_by, colnames(estimates)) out <- merge(estimates, uncertainty, by = merge_by, all = TRUE) out <- merge(out, test_pmap, by = merge_by, all = TRUE) out <- merge(out, test_pd, by = merge_by, all = TRUE) out <- merge(out, test_prope, by = merge_by, all = TRUE) out <- merge(out, test_psig, by = merge_by, all = TRUE) out <- merge(out, test_rope, by = merge_by, all = TRUE) out <- merge(out, test_bf, by = merge_by, all = TRUE) out <- out[!is.na(out$Parameter), ] # check which columns can be removed at the end. In any case, we don't # need .rowid in the returned data frame, and when the Effects or Component # column consist only of missing values, we remove those columns as well remove_columns <- ".rowid" if (all(is.na(out$Effects)) || length(unique(out$Effects)) < 2) remove_columns <- c(remove_columns, "Effects") if (all(is.na(out$Component)) || length(unique(out$Component)) < 2) remove_columns <- c(remove_columns, "Component") if (all(is.na(out$Response)) || length(unique(out$Response)) < 2) remove_columns <- c(remove_columns, "Response") # Restore columns order out <- .remove_column(out[order(out$.rowid), ], remove_columns) # Add iterations if (keep_iterations == TRUE) { row_order <- out$Parameter if (insight::is_model(x)) { iter <- as.data.frame(t(insight::get_parameters(x, ...))) } else { iter <- as.data.frame(t(as.data.frame(x, ...))) } names(iter) <- paste0("iter_", 1:ncol(iter)) iter$Parameter <- row.names(iter) out <- merge(out, iter, all.x = TRUE, by = "Parameter") out <- out[match(row_order, out$Parameter), ] row.names(out) <- NULL } # Prepare output attr(out, "ci_method") <- ci_method out } #' @keywords internal .add_effects_component_column <- function(x) { if (!"Effects" %in% names(x)) x <- cbind(x, data.frame("Effects" = NA)) if (!"Component" %in% names(x)) x <- cbind(x, data.frame("Component" = NA)) if (!"Response" %in% names(x)) x <- cbind(x, data.frame("Response" = NA)) x } # Models based on simple data frame of posteriors --------------------- #' @rdname describe_posterior #' @param bf_prior Distribution representing a prior for the computation of #' Bayes factors / SI. Used if the input is a posterior, otherwise (in the #' case of models) ignored. #' @export describe_posterior.numeric <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, ...) { out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, ... ) class(out) <- unique(c("describe_posterior", "see_describe_posterior", class(out))) out } #' @export describe_posterior.double <- describe_posterior.numeric #' @export describe_posterior.data.frame <- describe_posterior.numeric #' @export describe_posterior.sim.merMod <- describe_posterior.numeric #' @export describe_posterior.sim <- describe_posterior.numeric #' @export describe_posterior.bayesQR <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, parameters = NULL, ...) { out <- .describe_posterior( insight::get_parameters(posteriors), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, ... ) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.blrm <- describe_posterior.bayesQR #' @export describe_posterior.mcmc <- describe_posterior.bayesQR #' @export describe_posterior.mcmc.list <- describe_posterior.bayesQR #' @export describe_posterior.BGGM <- describe_posterior.bayesQR # easystats methods ------------------------ #' @export describe_posterior.effectsize_std_params <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, ...) { class(posteriors) <- "data.frame" no_unique <- sapply(posteriors, function(col) { length(unique(col)) == 1 }) if (any(no_unique)) { no_unique <- which(no_unique) out <- describe_posterior.data.frame( posteriors[, -no_unique], centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, ... ) out_int <- data.frame(Parameter = colnames(posteriors)[no_unique]) col_diff <- setdiff(colnames(out), colnames(out_int)) out_int[, col_diff] <- NA out <- rbind(out_int, out) out <- out[order(match(out$Parameter, colnames(posteriors))), ] return(out) } describe_posterior.data.frame( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, ... ) } #' @export describe_posterior.get_predicted <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = NULL, ...) { if ("iterations" %in% names(attributes(posteriors))) { describe_posterior( as.data.frame(t(attributes(posteriors)$iterations)), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, ... ) } else { stop("No iterations present in the output.") } } # emmeans --------------------------- #' @export describe_posterior.emmGrid <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, ...) { if (any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) || "si" %in% tolower(ci_method)) { samps <- .clean_priors_and_posteriors(posteriors, bf_prior) bf_prior <- samps$prior posteriors <- samps$posterior } else { posteriors <- insight::get_parameters(posteriors) } out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, ... ) row.names(out) <- NULL # Reset row names class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) out } #' @export describe_posterior.emm_list <- describe_posterior.emmGrid # Stan ------------------------------ #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @param priors Add the prior used for each parameter. #' @rdname describe_posterior #' @export describe_posterior.stanreg <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, BF = 1, ...) { if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) | "si" %in% tolower(ci_method)) & is.null(bf_prior)) { bf_prior <- unupdate(posteriors) } effects <- match.arg(effects) component <- match.arg(component) out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, effects = effects, component = component, parameters = parameters, ... ) diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, component = component, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } out <- .add_clean_parameters_attribute(out, posteriors) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @param priors Add the prior used for each parameter. #' @rdname describe_posterior #' @export describe_posterior.stanmvreg <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = "p_direction", rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, effects = effects, parameters = parameters, ... ) if (is.null(out$Response)) { out$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", out$Parameter) } diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = c("Parameter", "Response"), all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, parameters = NULL, ...) priors_data$Parameter <- gsub("^(.*)\\|(.*)", replacement = "\\2", priors_data$Parameter) out <- .merge_and_sort(out, priors_data, by = c("Parameter", "Response"), all = TRUE) } out <- .add_clean_parameters_attribute(out, posteriors) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @export describe_posterior.stanfit <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, diagnostic = c("ESS", "Rhat"), effects = c("fixed", "random", "all"), parameters = NULL, priors = FALSE, ...) { effects <- match.arg(effects) out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = effects, parameters = parameters, ... ) diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams describe_posterior.stanreg #' @rdname describe_posterior #' @export describe_posterior.brmsfit <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all", "location", "distributional", "auxiliary"), parameters = NULL, BF = 1, priors = FALSE, ...) { effects <- match.arg(effects) component <- match.arg(component) if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) | "si" %in% tolower(ci_method)) & is.null(bf_prior)) { bf_prior <- unupdate(posteriors) } out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, effects = effects, component = component, parameters = parameters, ... ) if (!is.null(diagnostic)) { diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, component = component, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } out <- .add_clean_parameters_attribute(out, posteriors) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.blavaan <- describe_posterior.stanfit # other models -------------------------------- #' @inheritParams describe_posterior.stanreg #' @rdname describe_posterior #' @export describe_posterior.MCMCglmm <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, diagnostic = "ESS", parameters = NULL, ...) { out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, ... ) if (!is.null(diagnostic) && diagnostic == "ESS") { diagnostic <- effective_sample(posteriors, effects = "fixed", parameters = parameters, ...) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } out } #' @export describe_posterior.bcplm <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, priors = TRUE, parameters = NULL, ...) { out <- .describe_posterior( insight::get_parameters(posteriors), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, ... ) if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.bamlss <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, component = c("all", "conditional", "location"), parameters = NULL, ...) { component <- match.arg(component) out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, component = component, parameters = parameters, ... ) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } # BayesFactor -------------------- #' @rdname describe_posterior #' @export describe_posterior.BFBayesFactor <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("p_direction", "rope", "bf"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, priors = TRUE, verbose = TRUE, ...) { # Match test args to catch BFs if (!is.null(test)) { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } } # Remove BF from list if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test <- test[!test %in% c("bf", "bayesfactor", "bayes_factor")] if (length(test) == 0L) test <- NULL compute_bf <- TRUE } else { compute_bf <- FALSE } draws <- insight::get_parameters(posteriors) if (all(rope_range == "default")) { rope_range <- rope_range(posteriors, verbose = verbose) } # Describe posterior out <- .describe_posterior( draws, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, ... ) if (is.null(out)) { return(NULL) } # Compute and read BF a posteriori if (compute_bf) { tryCatch( { out$log_BF <- as.data.frame(bayesfactor_models(posteriors[1], ...))[-1, ]$log_BF out$BF <- exp(out$log_BF) }, error = function(e) { NULL } ) } # Add priors if (priors) { priors_data <- describe_prior(posteriors, ...) out <- .merge_and_sort(out, priors_data, by = intersect(names(out), names(priors_data)), all = TRUE) } attr(out, "ci_method") <- ci_method attr(out, "object_name") <- .safe_deparse(substitute(posteriors)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } # Helpers ----------------------------------------------------------------- #' @keywords internal .check_test_values <- function(test) { match.arg(tolower(test), c( "pd", "p_direction", "pdir", "mpe", "ps", "psig", "p_significance", "p_rope", "rope", "equivalence", "equivalence_test", "equitest", "bf", "bayesfactor", "bayes_factor", "p_map", "all" ), several.ok = TRUE) } bayestestR/R/rope.R0000644000175000017500000004535314106656140014037 0ustar nileshnilesh#' Region of Practical Equivalence (ROPE) #' #' Compute the proportion of the HDI (default to the `89%` HDI) of a posterior distribution that lies within a region of practical equivalence. #' #' @param x Vector representing a posterior distribution. Can also be a `stanreg` or `brmsfit` model. #' @param range ROPE's lower and higher bounds. Should be `"default"` or #' depending on the number of outcome variables a vector or a list. In models with one response, #' `range` should be a vector of length two (e.g., `c(-0.1, 0.1)`). In #' multivariate models, `range` should be a list with a numeric vectors for #' each response variable. Vector names should correspond to the name of the response #' variables. If `"default"` and input is a vector, the range is set to `c(-0.1, #' 0.1)`. If `"default"` and input is a Bayesian model, #' [`rope_range()`][rope_range] is used. #' @param ci The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE. #' @param ci_method The type of interval to use to quantify the percentage in ROPE. Can be 'HDI' (default) or 'ETI'. See [ci()]. #' #' @inheritParams hdi #' #' @details #' \subsection{ROPE}{ #' Statistically, the probability of a posterior distribution of being #' different from 0 does not make much sense (the probability of a single value #' null hypothesis in a continuous distribution is 0). Therefore, the idea #' underlining ROPE is to let the user define an area around the null value #' enclosing values that are *equivalent to the null* value for practical #' purposes (\cite{Kruschke 2010, 2011, 2014}). #' \cr \cr #' Kruschke (2018) suggests that such null value could be set, by default, #' to the -0.1 to 0.1 range of a standardized parameter (negligible effect #' size according to Cohen, 1988). This could be generalized: For instance, #' for linear models, the ROPE could be set as `0 +/- .1 * sd(y)`. #' This ROPE range can be automatically computed for models using the #' [rope_range] function. #' \cr \cr #' Kruschke (2010, 2011, 2014) suggests using the proportion of the `95%` #' (or `89%`, considered more stable) [HDI][hdi] that falls within the #' ROPE as an index for "null-hypothesis" testing (as understood under the #' Bayesian framework, see [`equivalence_test()`][equivalence_test]). #' } #' \subsection{Sensitivity to parameter's scale}{ #' It is important to consider the unit (i.e., the scale) of the predictors #' when using an index based on the ROPE, as the correct interpretation of the #' ROPE as representing a region of practical equivalence to zero is dependent #' on the scale of the predictors. Indeed, the percentage in ROPE depend on #' the unit of its parameter. In other words, as the ROPE represents a fixed #' portion of the response's scale, its proximity with a coefficient depends #' on the scale of the coefficient itself. #' } #' \subsection{Multicollinearity: Non-independent covariates}{ #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or #' away from the ROPE. Collinearity invalidates ROPE and hypothesis #' testing based on univariate marginals, as the probabilities are conditional #' on independence. Most problematic are parameters that only have partial #' overlap with the ROPE region. In case of collinearity, the (joint) distributions #' of these parameters may either get an increased or decreased ROPE, which #' means that inferences based on `rope()` are inappropriate #' (\cite{Kruschke 2014, 340f}). #' \cr \cr #' `rope()` performs a simple check for pairwise correlations between #' parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (\cite{Piironen and Vehtari 2017}). #' } #' \subsection{Strengths and Limitations}{ #' **Strengths:** Provides information related to the practical relevance of the effects. #' \cr \cr #' **Limitations:** A ROPE range needs to be arbitrarily defined. Sensitive to the scale (the unit) of the predictors. Not sensitive to highly significant effects. #' } #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references \itemize{ #' \item Cohen, J. (1988). Statistical power analysis for the behavioural sciences. #' \item Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. #' \item Kruschke, J. K. (2011). Bayesian assessment of null values via parameter estimation and model comparison. Perspectives on Psychological Science, 6(3), 299-312. \doi{10.1177/1745691611406925}. #' \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. #' \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} #' } #' #' @examples #' library(bayestestR) #' #' rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 1), ci = c(.90, .95)) #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' rope(model) #' rope(model, ci = c(.90, .95)) #' #' library(emmeans) #' rope(emtrends(model, ~1, "wt"), ci = c(.90, .95)) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' rope(model) #' rope(model, ci = c(.90, .95)) #' #' library(brms) #' model <- brms::brm(brms::mvbind(mpg, disp) ~ wt + cyl, data = mtcars) #' rope(model) #' rope(model, ci = c(.90, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' rope(bf) #' rope(bf, ci = c(.90, .95)) #' } #' @export rope <- function(x, ...) { UseMethod("rope") } #' @method as.double rope #' @export as.double.rope <- function(x, ...) { x$ROPE_Percentage } #' @rdname rope #' @export rope.default <- function(x, ...) { NULL } #' @rdname rope #' @export rope.numeric <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { if (all(range == "default")) { range <- c(-0.1, 0.1) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } rope_values <- lapply(ci, function(i) { .rope(x, range = range, ci = i, ci_method = ci_method, verbose = verbose) }) # "do.call(rbind)" does not bind attribute values together # so we need to capture the information about HDI separately out <- do.call(rbind, rope_values) if (nrow(out) > 1) { out$ROPE_Percentage <- as.numeric(out$ROPE_Percentage) } # Attributes hdi_area <- cbind(CI = ci, data.frame(do.call(rbind, lapply(rope_values, attr, "HDI_area")))) names(hdi_area) <- c("CI", "CI_low", "CI_high") attr(out, "HDI_area") <- hdi_area attr(out, "data") <- x class(out) <- unique(c("rope", "see_rope", class(out))) out } #' @rdname rope #' @export rope.data.frame <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { out <- .prepare_rope_df(x, range, ci, ci_method, verbose) HDI_area_attributes <- .compact_list(out$HDI_area) dat <- data.frame( Parameter = rep(names(HDI_area_attributes), each = length(ci)), out$tmp, stringsAsFactors = FALSE ) row.names(dat) <- NULL attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- .safe_deparse(substitute(x)) class(dat) <- c("rope", "see_rope", "data.frame") dat } #' @rdname rope #' @export rope.emmGrid <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- rope(xdf, range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @export rope.emm_list <- rope.emmGrid #' @rdname rope #' @export rope.BFBayesFactor <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } out <- rope(insight::get_parameters(x), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export rope.bamlss <- rope.BFBayesFactor #' @rdname rope #' @export rope.MCMCglmm <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { nF <- x$Fixed$nfl out <- rope(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export rope.mcmc <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { out <- rope(as.data.frame(x), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- NULL attr(out, "data") <- .safe_deparse(substitute(x)) out } #' @export rope.bcplm <- function(x, range = "default", ci = 0.95, ci_method = "HDI", verbose = TRUE, ...) { out <- rope(insight::get_parameters(x), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- NULL attr(out, "data") <- .safe_deparse(substitute(x)) out } #' @export rope.bayesQR <- rope.bcplm #' @export rope.blrm <- rope.bcplm #' @export rope.BGGM <- rope.bcplm #' @export rope.mcmc.list <- rope.bcplm #' @keywords internal .rope <- function(x, range = c(-0.1, 0.1), ci = 0.95, ci_method = "HDI", verbose = TRUE) { ci_bounds <- ci(x, ci = ci, method = ci_method, verbose = verbose) if (anyNA(ci_bounds)) { rope_percentage <- NA } else { HDI_area <- x[x >= ci_bounds$CI_low & x <= ci_bounds$CI_high] area_within <- HDI_area[HDI_area >= min(range) & HDI_area <= max(range)] rope_percentage <- length(area_within) / length(HDI_area) } rope <- data.frame( "CI" = ci, "ROPE_low" = range[1], "ROPE_high" = range[2], "ROPE_Percentage" = rope_percentage ) attr(rope, "HDI_area") <- c(ci_bounds$CI_low, ci_bounds$CI_high) attr(rope, "CI_bounds") <- c(ci_bounds$CI_low, ci_bounds$CI_high) class(rope) <- unique(c("rope", "see_rope", class(rope))) rope } #' @rdname rope #' @export rope.stanreg <- function(x, range = "default", ci = 0.95, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # check for possible collinearity that might bias ROPE if (verbose && !inherits(x, "blavaan")) .check_multicollinearity(x, "rope") rope_data <- rope( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ... ) out <- .prepare_output(rope_data, insight::clean_parameters(x), inherits(x, "stanmvreg")) attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(rope_data) out } #' @export rope.stanfit <- rope.stanreg #' @export rope.blavaan <- rope.stanreg #' @rdname rope #' @export rope.brmsfit <- function(x, range = "default", ci = 0.95, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) # check range argument if (all(range == "default")) { range <- rope_range(x, verbose = verbose) # we expect a list with named vectors (length two) in the multivariate case. # Names state the response variable. } else if (insight::is_multivariate(x)) { if ( !is.list(range) || length(range) < length(insight::find_response(x)) || !all(names(range) %in% insight::find_response(x)) ) { stop("With a multivariate model, `range` should be 'default' or a list of named numeric vectors with length 2.") } } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # check for possible collinearity that might bias ROPE and print a warning if (verbose) .check_multicollinearity(x, "rope") # calc rope if (insight::is_multivariate(x)) { dv <- insight::find_response(x) # ROPE range / width differs between response varialbe. Thus ROPE is # calculated for every variable on its own. rope_data <- lapply( dv, function(dv_item) { ret <- rope( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), range = range[[dv_item]], ci = ci, ci_method = ci_method, verbose = verbose, ... ) # It's a waste of performance to calculate ROPE for all parameters # with the ROPE width of a specific response variable and to throw # away the unwanted results. However, performance impact should not be # too high and this way it is much easier to handle the `parameters` # argument. ret[grepl(paste0("(.*)", dv_item), ret$Parameter), ] } ) rope_data <- do.call(rbind, rope_data) out <- .prepare_output(rope_data, insight::clean_parameters(x), is_brms_mv = TRUE) } else { rope_data <- rope( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ... ) out <- .prepare_output(rope_data, insight::clean_parameters(x)) } attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(rope_data) out } #' @export rope.sim.merMod <- function(x, range = "default", ci = 0.95, ci_method = "HDI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } list <- lapply(c("fixed", "random"), function(.x) { parms <- insight::get_parameters(x, effects = .x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, verbose) tmp <- getropedata$tmp HDI_area <- getropedata$HDI_area if (!.is_empty_object(tmp)) { tmp <- .clean_up_tmp_stanreg( tmp, group = .x, cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Group"), parms = names(parms) ) if (!.is_empty_object(HDI_area)) { attr(tmp, "HDI_area") <- HDI_area } } else { tmp <- NULL } tmp }) dat <- do.call(rbind, args = c(.compact_list(list), make.row.names = FALSE)) dat <- switch(effects, fixed = .select_rows(dat, "Group", "fixed"), random = .select_rows(dat, "Group", "random"), dat ) if (all(dat$Group == dat$Group[1])) { dat <- .remove_column(dat, "Group") } HDI_area_attributes <- lapply(.compact_list(list), attr, "HDI_area") if (effects != "all") { HDI_area_attributes <- HDI_area_attributes[[1]] } else { names(HDI_area_attributes) <- c("fixed", "random") } attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @export rope.sim <- function(x, range = "default", ci = 0.95, ci_method = "HDI", parameters = NULL, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } parms <- insight::get_parameters(x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, verbose) dat <- getropedata$tmp HDI_area <- getropedata$HDI_area if (!.is_empty_object(dat)) { dat <- .clean_up_tmp_stanreg( dat, group = "fixed", cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage"), parms = names(parms) ) if (!.is_empty_object(HDI_area)) { attr(dat, "HDI_area") <- HDI_area } } else { dat <- NULL } attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @keywords internal .prepare_rope_df <- function(parms, range, ci, ci_method, verbose) { tmp <- sapply( parms, rope, range = range, ci = ci, ci_method = ci_method, verbose = verbose, simplify = FALSE ) HDI_area <- lapply(tmp, function(.x) { attr(.x, "HDI_area") }) # HDI_area <- lapply(HDI_area, function(.x) { # dat <- cbind(CI = ci, data.frame(do.call(rbind, .x))) # colnames(dat) <- c("CI", "HDI_low", "HDI_high") # dat # }) list( tmp = do.call(rbind, tmp), HDI_area = HDI_area ) } bayestestR/R/diagnostic_draws.R0000644000175000017500000000305314101116425016375 0ustar nileshnilesh#' Diagnostic values for each iteration #' #' Returns the accumulated log-posterior, the average Metropolis acceptance rate, divergent transitions, treedepth rather than terminated its evolution normally. #' @inheritParams diagnostic_posterior #' #' @examples #' \dontrun{ #' set.seed(333) #' #' if (require("brms", quietly = TRUE)) { #' model <- brm(mpg ~ wt * cyl * vs, #' data = mtcars, #' iter = 100, control = list(adapt_delta = 0.80), #' refresh = 0 #' ) #' diagnostic_draws(model) #' } #' } #' #' @export diagnostic_draws <- function(posteriors, ...) { UseMethod("diagnostic_draws") } #' @export diagnostic_draws.brmsfit <- function(posteriors, ...) { insight::check_if_installed("brms") data <- brms::nuts_params(posteriors) data$idvar <- paste0(data$Chain, "_", data$Iteration) out <- stats::reshape( data, v.names = "Value", idvar = "idvar", timevar = "Parameter", direction = "wide" ) out$idvar <- NULL out <- merge(out, brms::log_posterior(posteriors), by = c("Chain", "Iteration"), sort = FALSE) # Rename names(out)[names(out) == "Value.accept_stat__"] <- "Acceptance_Rate" names(out)[names(out) == "Value.treedepth__"] <- "Tree_Depth" names(out)[names(out) == "Value.stepsize__"] <- "Step_Size" names(out)[names(out) == "Value.divergent__"] <- "Divergent" names(out)[names(out) == "Value.n_leapfrog__"] <- "n_Leapfrog" names(out)[names(out) == "Value.energy__"] <- "Energy" names(out)[names(out) == "Value"] <- "LogPosterior" out } bayestestR/R/backports.R0000644000175000017500000000025214001445230015035 0ustar nileshnileshisTRUE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && x } isFALSE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && !x } bayestestR/R/sexit_thresholds.R0000644000175000017500000001152314101116425016445 0ustar nileshnilesh#' @title Find Effect Size Thresholds #' #' @description This function attempts at automatically finding suitable default values for a "significant" (i.e., non-negligible) and "large" effect. This is to be used with care, and the chosen threshold should always be explicitly reported and justified. See the detail section in [`sexit()`][sexit] for more information. #' #' #' @inheritParams rope #' #' @examples #' sexit_thresholds(rnorm(1000)) #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' ) #' sexit_thresholds(model) #' #' model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) #' sexit_thresholds(model) #' } #' #' if (require("brms")) { #' model <- brm(mpg ~ wt + cyl, data = mtcars) #' sexit_thresholds(model) #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' sexit_thresholds(bf) #' } #' } #' @references Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' #' @export sexit_thresholds <- function(x, ...) { UseMethod("sexit_thresholds") } #' @export sexit_thresholds.brmsfit <- function(x, ...) { response <- insight::get_response(x) information <- insight::model_info(x) if (insight::is_multivariate(x)) { mapply(function(i, j) .sexit_thresholds(i, j), x, information, response) } else { .sexit_thresholds(x, information, response) } } #' @export sexit_thresholds.stanreg <- sexit_thresholds.brmsfit #' @export sexit_thresholds.BFBayesFactor <- function(x, ...) { fac <- 1 if (inherits(x@numerator[[1]], "BFlinearModel")) { response <- tryCatch( { insight::get_response(x) }, error = function(e) { NULL } ) if (!is.null(response)) { fac <- stats::sd(response, na.rm = TRUE) } } fac * .sexit_thresholds(x) } #' @export sexit_thresholds.lm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.glm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.merMod <- sexit_thresholds.brmsfit #' @export sexit_thresholds.glmmTMB <- sexit_thresholds.brmsfit #' @export sexit_thresholds.mixed <- sexit_thresholds.brmsfit #' @export sexit_thresholds.MixMod <- sexit_thresholds.brmsfit #' @export sexit_thresholds.wbm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.feis <- sexit_thresholds.brmsfit #' @export sexit_thresholds.gee <- sexit_thresholds.brmsfit #' @export sexit_thresholds.geeglm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.lme <- sexit_thresholds.brmsfit #' @export sexit_thresholds.felm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.fixest <- sexit_thresholds.brmsfit #' @export sexit_thresholds.gls <- sexit_thresholds.brmsfit #' @export sexit_thresholds.hurdle <- sexit_thresholds.brmsfit #' @export sexit_thresholds.zeroinfl <- sexit_thresholds.brmsfit #' @export sexit_thresholds.bayesQR <- sexit_thresholds.brmsfit #' @export sexit_thresholds.default <- function(x, ...) { .sexit_thresholds(x) } #' @export sexit_thresholds.mlm <- function(x, ...) { response <- insight::get_response(x) information <- insight::model_info(x) lapply(response, function(i) .sexit_thresholds(x, information, i)) } # helper ------------------ .sexit_thresholds <- function(x, information = NULL, response = NULL) { if (is.null(information) && is.null(response)) { norm <- 1 } else { norm <- tryCatch( { # Linear Models if (information$is_linear) { stats::sd(response, na.rm = TRUE) # Logistic Regression Models } else if (information$is_binomial) { pi / sqrt(3) # Count Models } else if (information$is_count) { sig <- stats::sigma(x) if (!is.null(sig) && length(sig) > 0 && !is.na(sig)) { sig } else { 1 } # T-tests } else if (information$is_ttest) { if ("BFBayesFactor" %in% class(x)) { stats::sd(x@data[, 1]) } else { warning("Could not estimate good thresholds, using default values.", call. = FALSE) 1 } # Correlations } else if (information$is_correlation) { # https://github.com/easystats/bayestestR/issues/121 1 # Default } else { 1 } }, error = function(e) { warning("Could not estimate good thresholds, using default values.", call. = FALSE) 1 } ) } c(0.05, 0.3) * norm } bayestestR/R/print.R0000644000175000017500000001545014101116425014211 0ustar nileshnilesh#' @export print.describe_posterior <- function(x, digits = 2, caption = "Summary of Posterior Distribution", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.point_estimate <- function(x, digits = 2, caption = "Point Estimate", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_direction <- function(x, digits = 2, caption = "Probability of Direction", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.map_estimate <- function(x, digits = 2, caption = "MAP Estimate", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_rope <- function(x, digits = 2, ...) { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL .print_default( x = x, digits = digits, caption = caption, ci_string = "ROPE", ... ) } #' @export print.p_significance <- function(x, digits = 2, ...) { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { .print_default( x = x, digits = digits, caption = caption, ci_string = "HDI", ... ) } #' @export print.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_default( x = x, digits = digits, caption = caption, ci_string = "ETI", ... ) } #' @export print.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_default( x = x, digits = digits, caption = caption, ci_string = "SI", ... ) } # special handling for bayes factors ------------------ #' @export print.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { show_names <- show_names & !attr(x, "unsupported_models") .print_bf_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = c("llr"), ... ) } #' @export print.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_default( x = x, digits = digits, log = log, caption = caption, ... ) } #' @export print.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_default( x = x, digits = digits, log = log, caption = caption, ... ) } #' @export print.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "text", ... ) cat(insight::export_table(formatted_table, format = "text")) invisible(x) } # util --------------------- .print_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "text", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # check if we have a 1x1 data frame (i.e. a numeric input) if (is.data.frame(formatted_table) && nrow(formatted_table) == 1 && ncol(formatted_table) == 1) { # print for numeric caption <- attr(formatted_table, "table_caption") # if we have no useful column name and a caption, use caption if (!is.null(caption) && !grepl(paste0(ci_string, "$"), colnames(formatted_table))) { cat(paste0(caption, ": ")) } else { cat(paste0(colnames(formatted_table), ": ")) } cat(formatted_table[1, 1]) } else { # print for data frame cat(insight::export_table( formatted_table, caption = caption )) } invisible(x) } .print_bf_default <- function(x, digits = 3, log = FALSE, caption = NULL, align = NULL, ...) { # format data frame and columns formatted_table <- format( x, digits = digits, log = log, format = "text", caption = caption, ... # pass show_names ) cat(insight::export_table( formatted_table, sep = " ", header = NULL, format = "text", align = align, )) invisible(x) } bayestestR/R/simulate_data.R0000644000175000017500000001104114057264527015702 0ustar nileshnilesh#' Data Simulation #' #' Simulate data with specific characteristics. #' #' @param n The number of observations to be generated. #' @param r A value or vector corresponding to the desired correlation #' coefficients. #' @param d A value or vector corresponding to the desired difference between #' the groups. #' @param mean A value or vector corresponding to the mean of the variables. #' @param sd A value or vector corresponding to the SD of the variables. #' @param names A character vector of desired variable names. #' @param ... Arguments passed to or from other methods. #' @examples #' #' # Correlation -------------------------------- #' data <- simulate_correlation(r = 0.5) #' plot(data$V1, data$V2) #' cor.test(data$V1, data$V2) #' summary(lm(V2 ~ V1, data = data)) #' #' # Specify mean and SD #' data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) #' cor.test(data$V1, data$V2) #' round(c(mean(data$V1), sd(data$V1)), 1) #' round(c(mean(data$V2), sd(data$V2)), 1) #' summary(lm(V2 ~ V1, data = data)) #' #' # Generate multiple variables #' cor_matrix <- matrix(c( #' 1.0, 0.2, 0.4, #' 0.2, 1.0, 0.3, #' 0.4, 0.3, 1.0 #' ), #' nrow = 3 #' ) #' #' data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) #' cor(data) #' summary(lm(y ~ x1, data = data)) #' #' # t-test -------------------------------- #' data <- simulate_ttest(n = 30, d = 0.3) #' plot(data$V1, data$V0) #' round(c(mean(data$V1), sd(data$V1)), 1) #' diff(t.test(data$V1 ~ data$V0)$estimate) #' summary(lm(V1 ~ V0, data = data)) #' summary(glm(V0 ~ V1, data = data, family = "binomial")) #' #' # Difference -------------------------------- #' data <- simulate_difference(n = 30, d = 0.3) #' plot(data$V1, data$V0) #' round(c(mean(data$V1), sd(data$V1)), 1) #' diff(t.test(data$V1 ~ data$V0)$estimate) #' summary(lm(V1 ~ V0, data = data)) #' summary(glm(V0 ~ V1, data = data, family = "binomial")) #' @export simulate_correlation <- function(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) { insight::check_if_installed("MASS") # Define matrix if (is.matrix(r)) { if (isSymmetric(r)) { if (any(r > 1)) { stop("'r' should only contain values between -1 and 1.") } else { sigma <- r } } else { stop("'r' should be a symetric matrix (relative to the diagonal).") } } else if (length(r) == 1) { if (abs(r) > 1) { stop("'r' should only contain values between -1 and 1.") } else { sigma <- matrix(c(1, r, r, 1), nrow = 2) } } else { stop("'r' should be a value (e.g., r = 0.5) or a square matrix.") } # Get data data <- MASS::mvrnorm( n = n, mu = rep_len(0, ncol(sigma)), # Means of variables Sigma = sigma, empirical = TRUE ) # Adjust scale if (any(sd != 1)) { data <- t(t(data) * rep_len(sd, ncol(sigma))) } # Adjust mean if (any(mean != 0)) { data <- t(t(data) + rep_len(mean, ncol(sigma))) } data <- as.data.frame(data) # Rename if (!is.null(names)) { if (length(names) == ncol(data)) { names(data) <- names } } data } #' @rdname simulate_correlation #' @export simulate_ttest <- function(n = 100, d = 0.5, names = NULL, ...) { x <- distribution_normal(n, 0, 1) # Continuous variables z <- 0 + d * x # Linear combination pr <- 1 / (1 + exp(-z)) # Pass it through an inverse logit function y <- distribution_binomial(n, 1, pr, random = 3) # Bernoulli response variable data <- data.frame(y = as.factor(y), x = x) names(data) <- paste0("V", 0:(ncol(data) - 1)) if (!is.null(names)) { if (length(names) == ncol(data)) { names(data) <- names } } data } #' @rdname simulate_correlation #' @export simulate_difference <- function(n = 100, d = 0.5, names = NULL, ...) { x <- distribution_normal(round(n / 2), -d / 2, 1) y <- distribution_normal(round(n / 2), d / 2, 1) data <- data.frame( y = as.factor(rep(c(0, 1), each = round(n / 2))), x = c(x, y) ) names(data) <- paste0("V", 0:(ncol(data) - 1)) if (!is.null(names)) { if (length(names) == ncol(data)) { names(data) <- names } } data } # Simulate regression: see https://stats.stackexchange.com/questions/363623/simulate-regression-with-specified-standardized-coefficients/508107#508107 bayestestR/R/z_old_print_methods.R0000644000175000017500000002257314030221266017130 0ustar nileshnilesh# old print methods -------------------- # print.describe_posterior <- function(x, digits = 3, ...) { # print_data_frame(format(x, digits = digits, ...), digits = digits, ...) # invisible(x) # } # print.bayestestR_hdi <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else { # .print_hdi(x, digits, title = "Highest Density Interval", ci_string = "HDI", ...) # } # invisible(orig_x) # } # print.bayestestR_eti <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else { # .print_hdi(x, digits, title = "Equal-Tailed Interval", ci_string = "ETI", ...) # } # invisible(orig_x) # } # print.bayestestR_si <- function(x, digits = 2, ...) { # orig_x <- x # .print_hdi(x, digits, title = "Support Interval", ci_string = "SI", ...) # invisible(orig_x) # } # print.bayestestR_ci <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else { # .print_hdi(x, digits, title = "Credible Interval", ci_string = "CI", ...) # } # invisible(orig_x) # } # format.describe_posterior <- function(x, digits = 3, ...) { # if ("data_plot" %in% class(x)) { # return(as.data.frame(x), digits = digits) # } # # if ("CI" %in% colnames(x)) { # is_SI <- !is.null(attributes(x)$ci_method) && tolower(attributes(x)$ci_method) == "si" # # ci <- unique(x$CI) * 100 # if (length(ci) > 1) { # x$CI <- insight::format_ci(x$CI_low, x$CI_high, ci = x$CI, digits = digits, width = "auto") # # if (is_SI) { # x$CI <- paste0("BF = ", gsub("% CI", " SI", x$CI)) # colnames(x)[colnames(x) == "CI"] <- "SI" # } # } else { # x$CI <- insight::format_ci(x$CI_low, x$CI_high, ci = NULL, digits = digits, width = "auto") # # if (is.null(ci)) { # if (is_SI) colnames(x)[colnames(x) == "CI"] <- "SI" # } else { # if (is_SI) { # colnames(x)[colnames(x) == "CI"] <- sprintf("BF = %.5g SI", ci) # } else { # colnames(x)[colnames(x) == "CI"] <- sprintf("%.5g%% CI", ci) # } # } # } # # x <- .remove_column(x, c("CI_low", "CI_high")) # } # # # if ("ROPE_CI" %in% colnames(x)) { # rci <- unique(x$ROPE_CI) # if (length(rci) > 1) { # x$ROPE_CI <- insight::format_ci(x$ROPE_low, x$ROPE_high, ci = rci / 100, digits = digits, width = "auto") # } else { # x$ROPE_CI <- insight::format_ci(x$ROPE_low, x$ROPE_high, ci = NULL, digits = digits, width = "auto") # colnames(x)[colnames(x) == "ROPE_CI"] <- sprintf("%.5g%% ROPE", rci) # } # x <- .remove_column(x, c("ROPE_low", "ROPE_high")) # } # # x <- insight::format_table(x, digits = digits, ...) # x # } # .print_hdi <- function(x, digits, title, ci_string, ...) { # insight::print_color(sprintf( # "# %s%s\n\n", # title, # ifelse(all(x$CI[1] == x$CI), "", "s") # ), "blue") # # ci <- unique(x$CI) # x$HDI <- insight::format_ci(x$CI_low, x$CI_high, ci = NULL, digits = digits, width = "auto", missing = "NA") # # if (length(ci) == 1) { # xsub <- .remove_column(x, c("CI", "CI_low", "CI_high")) # colnames(xsub)[ncol(xsub)] <- sprintf("%.5g%% %s", 100 * ci, ci_string) # if (inherits(x, "bayestestR_si")) colnames(xsub)[ncol(xsub)] <- sprintf("BF = %.5g %s", ci, ci_string) # print_data_frame(xsub, digits = digits) # } else { # for (i in ci) { # xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] # xsub <- .remove_column(xsub, c("CI", "CI_low", "CI_high")) # colnames(xsub)[ncol(xsub)] <- sprintf("%.5g%% %s", 100 * i, ci_string) # if (inherits(x, "bayestestR_si")) colnames(xsub)[ncol(xsub)] <- sprintf("BF = %.5g %s", i, ci_string) # print_data_frame(xsub, digits = digits) # cat("\n") # } # } # } # print.point_estimate <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else if ("data.frame" %in% class(x)) { # insight::print_color("# Point Estimates\n\n", "blue") # print_data_frame(x, digits = digits) # } else { # print(unclass(x)) # } # invisible(orig_x) # } # print.p_direction <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else if ("data.frame" %in% class(x)) { # .print_pd(x, digits, ...) # } else { # cat(sprintf("pd = %s%%", insight::format_value(x * 100, digits = digits))) # } # invisible(orig_x) # } # # .print_pd <- function(x, digits, ...) { # insight::print_color("# Probability of Direction (pd)\n\n", "blue") # x$Parameter <- as.character(x$Parameter) # x$pd <- sprintf("%s%%", insight::format_value(x$pd * 100, digits = digits)) # print_data_frame(x, digits = digits) # } # print.p_map <- function(x, digits = 3, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else if ("data.frame" %in% class(x)) { # insight::print_color("# MAP-based p-value\n\n", "blue") # print_data_frame(x, digits = digits) # } else { # cat(sprintf("p (MAP) = %.*f", digits, x)) # } # invisible(orig_x) # } # print.p_significance <- function(x, digits = 2, ...) { # orig_x <- x # if ("data_plot" %in% class(x)) { # print(as.data.frame(x)) # } else if ("data.frame" %in% class(x)) { # .print_ps(x, digits, ...) # } else { # cat(sprintf( # "ps [%s] = %s%%", # insight::format_value(attributes(x)$threshold, digits = digits), # insight::format_value(x * 100, digits = digits) # )) # } # invisible(orig_x) # } # # .print_ps <- function(x, digits, ...) { # insight::print_color(sprintf( # "# Probability of Significance (ps [%s])\n\n", # insight::format_value(attributes(x)$threshold, digits = digits) # ), "blue") # x$Parameter <- as.character(x$Parameter) # x$ps <- sprintf("%s%%", insight::format_value(x$ps * 100, digits = digits)) # print_data_frame(x, digits = digits) # } # print.map_estimate <- function(x, ...) { # orig_x <- x # if (inherits(x, "data.frame")) { # print.data.frame(x) # } else { # cat(sprintf("MAP = %.2f", x)) # } # invisible(orig_x) # } # print.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # null <- attr(x, "hypothesis") # direction <- attr(x, "direction") # # # format table # BFE <- as.data.frame(x) # if (log) { # BFE$BF <- log(BFE$BF) # } # BFE$BF <- insight::format_value(BFE$BF, digits = digits, missing = "NA") # # caption <- c(sprintf( # "# Bayes Factor (%s)\n\n", # if (length(null) == 1) "Savage-Dickey density ratio" else "Null-Interval" # ), "blue") # # footer <- list( # c("* Evidence Against The Null: "), # c(sprintf("[%s]", paste0(round(null, digits), collapse = ", ")), "cyan"), # if (direction) c("\n* Direction: "), # if (direction < 0) c("Left-Sided test", "cyan"), # if (direction > 0) c("Right-Sided test", "cyan"), # if (log) c("\n\nBayes Factors are on the log-scale.\n", "red") # ) # # { # insight::print_color(caption[1], caption[2]) # print_data_frame(BFE, digits = digits) # lapply(footer, function(txt) { # if (length(txt) == 2) { # insight::print_color(txt[1], txt[2]) # } else { # cat(txt) # } # NULL # }) # } # # # invisible(x) # } # # print.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, ...) { # priorOdds <- attr(x, "priorOdds") # matched <- attr(x, "matched") # # # format table # BFE <- as.data.frame(x) # if (log) { # BFE$BF <- log(BFE$BF) # } # BFE$BF <- insight::format_value(BFE$BF, digits = digits, missing = "NA") # BFE <- cbind(rownames(BFE), BFE) # colnames(BFE) <- c("", "Pr(prior)", "Pr(posterior)", "Inclusion BF") # # # # footer # footer <- list( # c("\n* Compared among: "), # c(if (matched) "matched models only" else "all models", "cyan"), # c("\n* Priors odds: "), # c(if (!is.null(priorOdds)) "custom" else "uniform-equal", "cyan"), # if (log) c("\n\nBayes Factors are on the log-scale.", "red") # ) # # cat(insight::export_table( # BFE, # digits = digits, sep = " ", header = NULL, # caption = c("# Inclusion Bayes Factors (Model Averaged)", "blue"), # footer = footer # )) # # invisible(x) # } # print.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, ...) { # BFE <- as.data.frame(x) # # # Format # if (log) { # BFE$BF <- log(BFE$BF) # } # BFE$BF <- insight::format_value(BFE$BF, digits = digits, missing = "NA") # colnames(BFE) <- c("Hypothesis", "P(Prior)", "P(Posterior)", "BF") # # # footer # footer <- list( # c("\n* Bayes factors for the restricted model vs. the un-restricted model.\n"), # if (log) c("\nBayes Factors are on the log-scale.\n", "red") # ) # # # cat(insight::export_table( # BFE, # digits = digits, sep = " ", header = NULL, # caption = c("# Bayes Factor (Order-Restriction)", "blue"), # footer = footer # )) # # # invisible(x) # } bayestestR/R/mediation.R0000644000175000017500000003252614101116425015031 0ustar nileshnilesh#' @title Summary of Bayesian multivariate-response mediation-models #' @name mediation #' #' @description `mediation()` is a short summary for multivariate-response #' mediation-models, i.e. this function computes average direct and average #' causal mediation effects of multivariate response models. #' #' @param model A `brmsfit` or `stanmvreg` object. #' @param treatment Character, name of the treatment variable (or direct effect) #' in a (multivariate response) mediator-model. If missing, `mediation()` #' tries to find the treatment variable automatically, however, this may fail. #' @param mediator Character, name of the mediator variable in a (multivariate #' response) mediator-model. If missing, `mediation()` tries to find the #' treatment variable automatically, however, this may fail. #' @param response A named character vector, indicating the names of the response #' variables to be used for the mediation analysis. Usually can be `NULL`, #' in which case these variables are retrieved automatically. If not `NULL`, #' names should match the names of the model formulas, #' `names(insight::find_response(model, combine = TRUE))`. This can be #' useful if, for instance, the mediator variable used as predictor has a different #' name from the mediator variable used as response. This might occur when the #' mediator is transformed in one model, but used "as is" as response variable #' in the other model. Example: The mediator `m` is used as response variable, #' but the centered version `m_center` is used as mediator variable. The #' second response variable (for the treatment model, with the mediator as #' additional predictor), `y`, is not transformed. Then we could use #' `response` like this: `mediation(model, response = c(m = "m_center", y = "y"))`. #' @param ... Not used. #' @inheritParams ci #' @inheritParams describe_posterior #' #' @return A data frame with direct, indirect, mediator and #' total effect of a multivariate-response mediation-model, as well as the #' proportion mediated. The effect sizes are median values of the posterior #' samples (use `centrality` for other centrality indices). #' #' @details `mediation()` returns a data frame with information on the #' *direct effect* (mean value of posterior samples from `treatment` #' of the outcome model), *mediator effect* (mean value of posterior #' samples from `mediator` of the outcome model), *indirect effect* #' (mean value of the multiplication of the posterior samples from #' `mediator` of the outcome model and the posterior samples from #' `treatment` of the mediation model) and the total effect (mean #' value of sums of posterior samples used for the direct and indirect #' effect). The *proportion mediated* is the indirect effect divided #' by the total effect. #' \cr \cr #' For all values, the `89%` credible intervals are calculated by default. #' Use `ci` to calculate a different interval. #' \cr \cr #' The arguments `treatment` and `mediator` do not necessarily #' need to be specified. If missing, `mediation()` tries to find the #' treatment and mediator variable automatically. If this does not work, #' specify these variables. #' \cr \cr #' The direct effect is also called *average direct effect* (ADE), #' the indirect effect is also called *average causal mediation effects* #' (ACME). See also \cite{Tingley et al. 2014} and \cite{Imai et al. 2010}. #' #' @note There is an `as.data.frame()` method that returns the posterior #' samples of the effects, which can be used for further processing in the #' different \pkg{bayestestR} package. #' #' @references #' \itemize{ #' \item Imai, K., Keele, L. and Tingley, D. (2010) A General Approach to Causal #' Mediation Analysis, Psychological Methods, Vol. 15, No. 4 (December), pp. #' 309-334. #' #' \item Tingley, D., Yamamoto, T., Hirose, K., Imai, K. and Keele, L. (2014). #' mediation: R package for Causal Mediation Analysis, Journal of Statistical #' Software, Vol. 59, No. 5, pp. 1-38. #' } #' #' @seealso The \pkg{mediation} package for a causal mediation analysis in #' the frequentist framework. #' #' @examples #' \dontrun{ #' library(mediation) #' library(brms) #' library(rstanarm) #' #' # load sample data #' data(jobs) #' set.seed(123) #' #' # linear models, for mediation analysis #' b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) #' b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) #' # mediation analysis, for comparison with Stan models #' m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") #' #' # Fit Bayesian mediation model in brms #' f1 <- bf(job_seek ~ treat + econ_hard + sex + age) #' f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) #' m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4, refresh = 0) #' #' # Fit Bayesian mediation model in rstanarm #' m3 <- stan_mvmer( #' list( #' job_seek ~ treat + econ_hard + sex + age + (1 | occp), #' depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp) #' ), #' data = jobs, #' cores = 4, #' refresh = 0 #' ) #' #' summary(m1) #' mediation(m2, centrality = "mean", ci = .95) #' mediation(m3, centrality = "mean", ci = .95) #' } #' @export mediation <- function(model, ...) { UseMethod("mediation") } #' @rdname mediation #' @export mediation.brmsfit <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ...) { .mediation( model = model, treatment = treatment, mediator = mediator, response = response, centrality = centrality, ci = ci, method = method, pattern = "b_%s_%s", ... ) } #' @rdname mediation #' @export mediation.stanmvreg <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ...) { .mediation( model = model, treatment = treatment, mediator = mediator, response = response, centrality = centrality, ci = ci, method = method, pattern = "%s|%s", ... ) } # workhorse --------------------------------- .mediation <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", pattern = "b_%s_%s", ...) { # only one HDI interval if (length(ci) > 1) ci <- ci[1] # check for binary response. In this case, user should rescale variables modelinfo <- insight::model_info(model) if (any(sapply(modelinfo, function(i) i$is_binomial, simplify = TRUE))) { message("One of moderator or outcome is binary, so direct and indirect effects may be on different scales. Consider rescaling model predictors, e.g. with `effectsize::standardize()`.") } # model responses if (is.null(response)) { response <- insight::find_response(model, combine = TRUE) } fix_mediator <- FALSE # find mediator, if not specified if (missing(mediator)) { predictors <- insight::find_predictors(model, flatten = TRUE) mediator <- predictors[predictors %in% response] fix_mediator <- TRUE } # find treatment, if not specified if (missing(treatment)) { predictors <- lapply( insight::find_predictors(model), function(.f) .f$conditional ) treatment <- predictors[[1]][predictors[[1]] %in% predictors[[2]]][1] treatment <- .fix_factor_name(model, treatment) } mediator.model <- which(response == mediator) treatment.model <- which(response != mediator) if (fix_mediator) mediator <- .fix_factor_name(model, mediator) if (inherits(model, "brmsfit")) { response_name <- names(response) } else { response_name <- unname(response) } # brms removes underscores from variable names when naming estimates # so we need to fix variable names here response <- names(response) # Direct effect: coef(treatment) from model_y_treatment coef_treatment <- sprintf(pattern, response[treatment.model], treatment) effect_direct <- insight::get_parameters(model)[[coef_treatment]] # Mediator effect: coef(mediator) from model_y_treatment coef_mediator <- sprintf(pattern, response[treatment.model], mediator) effect_mediator <- insight::get_parameters(model)[[coef_mediator]] # Indirect effect: coef(treament) from model_m_mediator * coef(mediator) from model_y_treatment coef_indirect <- sprintf(pattern, response[mediator.model], treatment) tmp.indirect <- insight::get_parameters(model)[c(coef_indirect, coef_mediator)] effect_indirect <- tmp.indirect[[coef_indirect]] * tmp.indirect[[coef_mediator]] # Total effect effect_total <- effect_indirect + effect_direct # proportion mediated: indirect effect / total effect proportion_mediated <- as.numeric(point_estimate(effect_indirect, centrality = centrality)) / as.numeric(point_estimate(effect_total, centrality = centrality)) hdi_eff <- ci(effect_indirect / effect_total, ci = ci, method = method) prop_mediated_se <- (hdi_eff$CI_high - hdi_eff$CI_low) / 2 prop_mediated_ci <- proportion_mediated + c(-1, 1) * prop_mediated_se res <- cbind( data.frame( Effect = c("Direct Effect (ADE)", "Indirect Effect (ACME)", "Mediator Effect", "Total Effect", "Proportion Mediated"), Estimate = c( as.numeric(point_estimate(effect_direct, centrality = centrality)), as.numeric(point_estimate(effect_indirect, centrality = centrality)), as.numeric(point_estimate(effect_mediator, centrality = centrality)), as.numeric(point_estimate(effect_total, centrality = centrality)), proportion_mediated ), stringsAsFactors = FALSE ), as.data.frame(rbind( ci(effect_direct, ci = ci, method = method)[, -1], ci(effect_indirect, ci = ci, method = method)[, -1], ci(effect_mediator, ci = ci, method = method)[, -1], ci(effect_total, ci = ci, method = method)[, -1], prop_mediated_ci )) ) colnames(res) <- c("Effect", "Estimate", "CI_low", "CI_high") samples <- data.frame( effect_direct, effect_indirect, effect_mediator, effect_total, proportion_mediated = effect_indirect / effect_total ) attr(res, "ci") <- ci attr(res, "ci_method") <- method attr(res, "treatment") <- treatment attr(res, "mediator") <- mediator attr(res, "response") <- response_name[treatment.model] attr(res, "data") <- samples class(res) <- c("bayestestR_mediation", "see_bayestestR_mediation", class(res)) res } # methods --------------------- #' @export as.data.frame.bayestestR_mediation <- function(x, ...) { attributes(x)$data } # helper --------------------------------- .fix_factor_name <- function(model, variable) { # check for categorical. if user has not specified a treatment variable # and this variable is categorical, the posterior samples contain the # samples from each category of the treatment variable - so we need to # fix the variable name mf <- insight::get_data(model) if (variable %in% colnames(mf)) { check_fac <- mf[[variable]] if (is.factor(check_fac)) { variable <- sprintf("%s%s", variable, levels(check_fac)[nlevels(check_fac)]) } else if (is.logical(check_fac)) { variable <- sprintf("%sTRUE", variable) } } variable } # S3 --------------------------------- #' @export print.bayestestR_mediation <- function(x, digits = 3, ...) { attr(x, "data") <- NULL insight::print_color("# Causal Mediation Analysis for Stan Model\n\n", "blue") cat(sprintf( " Treatment: %s\n Mediator : %s\n Response : %s\n\n", attr(x, "treatment", exact = TRUE), attr(x, "mediator", exact = TRUE), attr(x, "response", exact = TRUE) )) prop_mediated <- prop_mediated_ori <- x[nrow(x), ] x <- x[-nrow(x), ] x$CI <- insight::format_ci(x$CI_low, x$CI_high, ci = NULL, digits = digits, width = "auto", missing = "NA") x <- .remove_column(x, c("CI_low", "CI_high")) colnames(x)[ncol(x)] <- sprintf("%.5g%% %s", 100 * attributes(x)$ci, attributes(x)$ci_method) # remove class, to avoid conflicts with "as.data.frame.bayestestR_mediation()" class(x) <- "data.frame" cat(insight::export_table(x, digits = digits)) cat("\n") prop_mediated[] <- lapply(prop_mediated, function(i) insight::format_value(i, as_percent = TRUE)) insight::print_color( sprintf( "Proportion mediated: %s [%s, %s]\n", prop_mediated$Estimate, prop_mediated$CI_low, prop_mediated$CI_high ), "red" ) if (any(prop_mediated_ori$Estimate < 0)) { message("\nDirect and indirect effects have opposite directions. The proportion mediated is not meaningful.") } } #' @export plot.bayestestR_mediation <- function(x, ...) { insight::check_if_installed("see", "to plot results from mediation analysis") NextMethod() } bayestestR/R/cwi.R0000644000175000017500000000517314135670435013655 0ustar nileshnilesh#' Curvewise Intervals (CWI) #' #' Compute the **Curvewise interval (CWI)** (also called the "simultaneous interval" or "joint interval") of posterior distributions using \code{ggdist::curve_interval()}. #' Whereas the more typical "pointwise intervals" contain xx% of the posterior for a single parameter, #' joint/curvewise intervals contain xx% of the posterior distribution for **all** parameters. #' #' Applied model predictions, pointwise intervals contain xx% of the predicted response values **conditional** on specific predictor values. #' In contrast, curvewise intervals contain xx% of the predicted response values across all predictor values. #' Put another way, curvewise intervals contain xx% of the full **prediction lines** from the model. #' #' For more details, see the [*ggdist* documentation on curvewise intervals](https://mjskay.github.io/ggdist/articles/lineribbon.html#curve-boxplots-aka-lineribbons-with-joint-intervals-or-curvewise-intervals-). #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' @inherit hdi seealso #' @family ci #' #' @examples #' \donttest{ #' library(bayestestR) #' #' if (require("ggplot2") && require("rstanarm") && require("ggdist")) { #' #' # Generate data ============================================= #' k = 11 # number of curves (iterations) #' n = 201 # number of rows #' data <- data.frame(x = seq(-15,15,length.out = n)) #' #' # Simulate iterations as new columns #' for(i in 1:k) { #' data[paste0("iter_", i)] <- dnorm(data$x, seq(-5,5, length.out = k)[i], 3) #' } #' #' # Note: first, we need to transpose the data to have iters as rows #' iters <- datawizard::data_transpose(data[paste0("iter_", 1:k)]) #' #' # Compute Median #' data$Median <- point_estimate(iters)[["Median"]] #' #' # Compute Credible Intervals ================================ #' #' # Compute ETI (default type of CI) #' data[c("ETI_low", "ETI_high")] <- eti(iters, ci = 0.5)[c("CI_low", "CI_high")] #' #' # Compute CWI #' # ggdist::curve_interval(reshape_iterations(data), iter_value .width = c(.5)) #' #' # Visualization ============================================= #' ggplot(data, aes(x = x, y = Median)) + #' geom_ribbon(aes(ymin = ETI_low, ymax = ETI_high), fill = "red", alpha = 0.3) + #' geom_line(size = 1) + #' geom_line(data = reshape_iterations(data), #' aes(y = iter_value, group = iter_group), #' alpha = 0.3) #' } #' } #' @export cwi <- function(x, ...) { UseMethod("cwi") } #' @rdname cwi #' @export cwi.data.frame <- function(x, ci = 0.95, ...) { insight::check_if_installed("ggdist") print("Comming soon!") }bayestestR/R/print_md.R0000644000175000017500000001251614076521542014704 0ustar nileshnilesh# Reexports models ------------------------ #' @importFrom insight print_md #' @export insight::print_md #' @export print_md.describe_posterior <- function(x, digits = 2, caption = "Summary of Posterior Distribution", ...) { .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.point_estimate <- function(x, digits = 2, caption = "Point Estimate", ...) { .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.map_estimate <- function(x, digits = 2, caption = "MAP Estimate", ...) { .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.p_direction <- function(x, digits = 2, caption = "Probability of Direction (pd)", ...) { .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) { .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.p_rope <- function(x, digits = 2, ...) { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL .print_md_default(x = x, digits = digits, caption = caption, ci_string = "ROPE", ...) } #' @export print_md.p_significance <- function(x, digits = 2, ...) { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) .print_md_default(x = x, digits = digits, caption = caption, ...) } #' @export print_md.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "HDI", ...) } #' @export print_md.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "ETI", ...) } #' @export print_md.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "SI", ...) } # special handling for bayes factors ------------------ #' @export print_md.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { .print_bf_md_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = c("llr"), ... ) } #' @export print_md.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_md_default( x = x, digits = digits, log = log, caption = caption, align = c("lrrr"), ... ) } #' @export print_md.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_md_default(x = x, digits = digits, log = log, caption = caption, ...) } #' @export print_md.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "markdown", ... ) insight::export_table(formatted_table, format = "markdown") } # util --------------- .print_md_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "markdown", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # print for data frame - I don't think we need a special handling for # numeric values to have a markdown-table output insight::export_table( formatted_table, caption = caption, format = "markdown" ) } .print_bf_md_default <- function(x, digits = 3, log = FALSE, show_names = NULL, caption = NULL, align = NULL, ...) { formatted_table <- format( x, digits = digits, log = log, show_names = show_names, caption = caption, format = "markdown", ... ) insight::export_table( formatted_table, align = align, caption = caption, format = "markdown" ) } bayestestR/R/bayesfactor_inclusion.R0000644000175000017500000001600114101116425017433 0ustar nileshnilesh#' Inclusion Bayes Factors for testing predictors across Bayesian models #' #' The `bf_*` function is an alias of the main function. #' \cr \cr #' For more info, see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @author Mattan S. Ben-Shachar #' @param models An object of class [bayesfactor_models()] or `BFBayesFactor`. #' @param match_models See details. #' @param prior_odds Optional vector of prior odds for the models. See #' `BayesFactor::priorOdds<-`. #' @param ... Arguments passed to or from other methods. #' #' @return a data frame containing the prior and posterior probabilities, and #' log(BF) for each effect. #' #' @details Inclusion Bayes factors answer the question: Are the observed data #' more probable under models with a particular effect, than they are under #' models without that particular effect? In other words, on average - are #' models with effect \eqn{X} more likely to have produced the observed data #' than models without effect \eqn{X}? #' #' \subsection{Match Models}{ #' If `match_models=FALSE` (default), Inclusion BFs are computed by comparing #' all models with a term against all models without that term. If `TRUE`, #' comparison is restricted to models that (1) do not include any interactions #' with the term of interest; (2) for interaction terms, averaging is done only #' across models that containe the main effect terms from which the interaction #' term is comprised. #' } #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @note Random effects in the `lmer` style are converted to interaction terms: #' i.e., `(X|G)` will become the terms `1:G` and `X:G`. #' #' @seealso [weighted_posteriors()] for Bayesian parameter averaging. #' #' @examples #' library(bayestestR) #' #' # Using bayesfactor_models: #' # ------------------------------ #' mo0 <- lm(Sepal.Length ~ 1, data = iris) #' mo1 <- lm(Sepal.Length ~ Species, data = iris) #' mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' #' BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) #' bayesfactor_inclusion(BFmodels) #' \dontrun{ #' # BayesFactor #' # ------------------------------- #' library(BayesFactor) #' #' BF <- generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) #' #' bayesfactor_inclusion(BF) #' #' # compare only matched models: #' bayesfactor_inclusion(BF, match_models = TRUE) #' } #' @references #' \itemize{ #' \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} #' \item Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80-101. #' \item Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP [Blog post](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp). #' } #' #' #' @export bayesfactor_inclusion <- function(models, match_models = FALSE, prior_odds = NULL, ...) { UseMethod("bayesfactor_inclusion") } #' @rdname bayesfactor_inclusion #' @export bf_inclusion <- bayesfactor_inclusion #' @export bayesfactor_inclusion.bayesfactor_models <- function(models, match_models = FALSE, prior_odds = NULL, ...) { if (isTRUE(attr(models, "unsupported_models"))) { stop("Can not compute inclusion Bayes factors - passed models are not (yet) supported.", call. = FALSE) } # Build Models Table # df.model <- .get_model_table(models, priorOdds = prior_odds) effnames <- colnames(df.model)[-(1:3)] # Build Interaction Matrix # if (isTRUE(match_models)) { effects.matrix <- as.matrix(df.model[, -c(1:3)]) df.interaction <- data.frame(effnames, stringsAsFactors = FALSE) for (eff in effnames) { df.interaction[, eff] <- sapply(effnames, function(x) .includes_interaction(x, eff)) } rownames(df.interaction) <- effnames df.interaction <- as.matrix(df.interaction[, -1]) } # Build Effect Table # df.effect <- data.frame( effnames, Pinc = rep(NA, length(effnames)), PincD = rep(NA, length(effnames)), log_BF = rep(NA, length(effnames)), stringsAsFactors = FALSE ) for (eff in effnames) { if (isTRUE(match_models)) { idx1 <- df.interaction[eff, ] idx2 <- df.interaction[, eff] has_not_high_order_interactions <- !apply(effects.matrix[, idx1, drop = FALSE], 1, any) ind_include <- has_not_high_order_interactions & effects.matrix[, eff] ind_exclude <- apply(effects.matrix[, idx2, drop = FALSE], 1, all) & has_not_high_order_interactions & !effects.matrix[, eff] df.model_temp <- df.model[ind_include | ind_exclude, , drop = FALSE] } else { df.model_temp <- df.model } # models with effect mwith <- which(df.model_temp[[eff]]) mwithprior <- sum(df.model_temp[mwith, "priorProbs"]) mwithpost <- sum(df.model_temp[mwith, "postProbs"]) # models without effect mwithoutprior <- sum(df.model_temp[-mwith, "priorProbs"]) mwithoutpost <- sum(df.model_temp[-mwith, "postProbs"]) # Save results df.effect$Pinc[effnames == eff] <- mwithprior df.effect$PincD[effnames == eff] <- mwithpost df.effect$log_BF[effnames == eff] <- (log(mwithpost) - log(mwithoutpost)) - (log(mwithprior) - log(mwithoutprior)) } df.effect <- df.effect[, -1, drop = FALSE] colnames(df.effect) <- c("p_prior", "p_posterior", "log_BF") rownames(df.effect) <- effnames class(df.effect) <- c("bayesfactor_inclusion", class(df.effect)) attr(df.effect, "matched") <- match_models attr(df.effect, "priorOdds") <- prior_odds return(df.effect) } #' @export bayesfactor_inclusion.BFBayesFactor <- function(models, match_models = FALSE, prior_odds = NULL, ...) { models <- bayesfactor_models.BFBayesFactor(models) bayesfactor_inclusion.bayesfactor_models(models, match_models = match_models, prior_odds = prior_odds ) } #' @keywords internal .includes_interaction <- function(eff, effnames) { eff_b <- strsplit(eff, "\\:") effnames_b <- strsplit(effnames, "\\:") is_int <- sapply(effnames_b, function(x) length(x) > 1) temp <- logical(length(effnames)) for (rr in seq_along(effnames)) { if (is_int[rr]) { temp[rr] <- all(eff_b[[1]] %in% effnames_b[[rr]]) & !all(effnames_b[[rr]] %in% eff_b[[1]]) } } temp } bayestestR/R/unupdate.R0000644000175000017500000000744114101116425014703 0ustar nileshnilesh#' Un-update Bayesian models to their prior-to-data state #' #' As posteriors are priors that have been updated after observing some data, #' the goal of this function is to un-update the posteriors to obtain models #' representing the priors. These models can then be used to examine the prior #' predictive distribution, or to compare priors with posteriors. #' \cr\cr #' This function in used internally to compute Bayes factors. #' #' @param model A fitted Bayesian model. #' @param verbose Toggle warnings. #' @param newdata List of `data.frames` to update the model with new data. Required even if the original data should be used. #' @param ... Not used #' #' @return A model un-fitted to the data, representing the prior model. #' #' @keywords internal #' @export unupdate <- function(model, verbose = TRUE, ...) { UseMethod("unupdate") } #' @export #' @rdname unupdate unupdate.stanreg <- function(model, verbose = TRUE, ...) { insight::check_if_installed("rstanarm") prior_PD <- stats::getCall(model)$prior_PD if (!is.null(prior_PD) && isTRUE(eval(parse(text = prior_PD)))) { return(model) } if (verbose) { message("Sampling priors, please wait...") } prior_dists <- sapply(rstanarm::prior_summary(model), `[[`, "dist") if (anyNA(prior_dists)) { stop( "Cannot sample from flat priors (such as when priors are ", "set to 'NULL' in a 'stanreg' model).", call. = FALSE ) } model_prior <- suppressWarnings( stats::update(model, prior_PD = TRUE, refresh = 0) ) model_prior } #' @export #' @rdname unupdate unupdate.brmsfit <- function(model, verbose = TRUE, ...) { insight::check_if_installed("brms") if (isTRUE(attr(model$prior, "sample_prior") == "only")) { return(model) } if (verbose) { message("Sampling priors, please wait...") } utils::capture.output( model_prior <- try(suppressMessages(suppressWarnings( stats::update(model, sample_prior = "only", refresh = 0) )), silent = TRUE) ) if (methods::is(model_prior, "try-error")) { if (grepl("proper priors", model_prior)) { stop( "Cannot sample from flat priors (such as the default ", "priors for fixed-effects in a 'brmsfit' model).", call. = FALSE ) } else { stop(model_prior) } } model_prior } #' @export #' @rdname unupdate unupdate.brmsfit_multiple <- function(model, verbose = TRUE, newdata = NULL, ...) { insight::check_if_installed("brms") if (isTRUE(attr(model$prior, "sample_prior") == "only")) { return(model) } if (verbose) { message("Sampling priors, please wait...") } utils::capture.output(model_prior <- try(suppressMessages(suppressWarnings( stats::update( model, sample_prior = "only", newdata = newdata, refresh = 0 ) )), silent = TRUE)) if (methods::is(model_prior, "try-error")) { if (grepl("proper priors", model_prior)) { stop( "Cannot sample from flat priors (such as the default ", "priors for fixed-effects in a 'brmsfit' model).", call. = FALSE ) } else { stop(model_prior) } } model_prior } #' @export #' @rdname unupdate unupdate.blavaan <- function(model, verbose = TRUE, ...) { insight::check_if_installed("blavaan") cl <- model@call if (isTRUE(eval(cl$prisamp))) { return(model) } if (verbose) { message("Sampling priors, please wait...") } cl$prisamp <- TRUE suppressMessages(suppressWarnings( utils::capture.output(model_prior <- eval(cl)) )) model_prior } bayestestR/R/eti.R0000644000175000017500000001504614111634401013637 0ustar nileshnilesh#' Equal-Tailed Interval (ETI) #' #' Compute the **Equal-Tailed Interval (ETI)** of posterior distributions using the quantiles method. The probability of being below this interval is equal to the probability of being above it. The ETI can be used in the context of uncertainty characterisation of posterior distributions as **Credible Interval (CI)**. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' @inherit hdi seealso #' @family ci #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' eti(posterior) #' eti(posterior, ci = c(.80, .89, .95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' eti(df) #' eti(df, ci = c(.80, .89, .95)) #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' eti(model) #' eti(model, ci = c(.80, .89, .95)) #' #' library(emmeans) #' eti(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' eti(model) #' eti(model, ci = c(.80, .89, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' eti(bf) #' eti(bf, ci = c(.80, .89, .95)) #' } #' #' @export eti <- function(x, ...) { UseMethod("eti") } #' @rdname eti #' @export eti.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .eti(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_eti", "see_eti", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname eti #' @export eti.data.frame <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "eti") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname eti #' @export eti.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export eti.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export eti.bamlss <- function(x, ci = 0.95, component = c("all", "conditional", "location"), verbose = TRUE, ...) { component <- match.arg(component) d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export eti.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export eti.bayesQR <- eti.bcplm #' @export eti.blrm <- eti.bcplm #' @export eti.mcmc.list <- eti.bcplm #' @export eti.BGGM <- eti.bcplm #' @rdname eti #' @export eti.sim.merMod <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) dat <- .compute_interval_simMerMod(x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "eti") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname eti #' @export eti.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim(x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "eti") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname eti #' @export eti.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- eti(xdf, ci = ci, verbose = verbose, ...) attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @export eti.emm_list <- eti.emmGrid #' @rdname eti #' @export eti.stanreg <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( eti(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x), inherits(x, "stanmvreg") ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export eti.stanfit <- eti.stanreg #' @export eti.blavaan <- eti.stanreg #' @rdname eti #' @export eti.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( eti(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname eti #' @export eti.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- eti(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export eti.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { out <- hdi(as.data.frame(t(attributes(x)$iterations)), ...) } else { stop("No iterations present in the output.") } attr(out, "object_name") <- .safe_deparse(substitute(x)) out } # Helper ------------------------------------------------------------------ .eti <- function(x, ci, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } results <- as.vector(stats::quantile( x, probs = c((1 - ci) / 2, (1 + ci) / 2), names = FALSE )) data.frame( "CI" = ci, "CI_low" = results[1], "CI_high" = results[2] ) } bayestestR/R/map_estimate.R0000644000175000017500000001432414101116425015524 0ustar nileshnilesh#' Maximum A Posteriori probability estimate (MAP) #' #' Find the **Highest Maximum A Posteriori probability estimate (MAP)** of a posterior, i.e., the value associated with the highest probability density (the "peak" of the posterior distribution). In other words, it is an estimation of the *mode* for continuous parameters. Note that this function relies on [estimate_density], which by default uses a different smoothing bandwidth (`"SJ"`) compared to the legacy default implemented the base R [density] function (`"nrd0"`). #' #' @inheritParams hdi #' @inheritParams estimate_density #' #' @return A numeric value if `posterior` is a vector. If `posterior` #' is a model-object, returns a data frame with following columns: #' \itemize{ #' \item `Parameter` The model parameter(s), if `x` is a model-object. If `x` is a vector, this column is missing. #' \item `MAP_Estimate` The MAP estimate for the posterior or each model parameter. #' } #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' posterior <- rnorm(10000) #' map_estimate(posterior) #' #' plot(density(posterior)) #' abline(v = map_estimate(posterior), col = "red") #' #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' } #' #' @export map_estimate <- function(x, precision = 2^10, method = "kernel", ...) { UseMethod("map_estimate") } #' @rdname map_estimate #' @export map_estimate.numeric <- function(x, precision = 2^10, method = "kernel", ...) { d <- estimate_density(x, precision = precision, method = method, ...) hdp_x <- d$x[which.max(d$y)] hdp_y <- max(d$y) out <- hdp_x attr(out, "MAP_density") <- hdp_y attr(out, "data") <- x attr(out, "centrality") <- "map" class(out) <- unique(c("map_estimate", "see_point_estimate", class(out))) out } #' @rdname map_estimate #' @export map_estimate.bayesQR <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.BGGM <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.mcmc <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.bamlss <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.bcplm <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.blrm <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @export map_estimate.mcmc.list <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method) } #' @keywords internal .map_estimate_models <- function(x, precision, method, ...) { l <- sapply(x, map_estimate, precision = precision, method = method, simplify = FALSE, ...) out <- data.frame( Parameter = colnames(x), MAP_Estimate = unlist(l), stringsAsFactors = FALSE, row.names = NULL ) out <- .add_clean_parameters_attribute(out, x) attr(out, "MAP_density") <- sapply(l, attr, "MAP_density") attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) attr(out, "centrality") <- "map" class(out) <- unique(c("map_estimate", "see_point_estimate", class(out))) out } #' @rdname map_estimate #' @export map_estimate.stanreg <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) .map_estimate_models( x = insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method ) } #' @export map_estimate.stanfit <- map_estimate.stanreg #' @export map_estimate.blavaan <- map_estimate.stanreg #' @rdname map_estimate #' @export map_estimate.brmsfit <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) .map_estimate_models( x = insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method ) } #' @rdname map_estimate #' @export map_estimate.data.frame <- function(x, precision = 2^10, method = "kernel", ...) { .map_estimate_models(x, precision = precision, method = method) } #' @rdname map_estimate #' @export map_estimate.emmGrid <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) .map_estimate_models(x, precision = precision, method = method) } #' @export map_estimate.emm_list <- map_estimate.emmGrid #' @export map_estimate.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { map_estimate(as.data.frame(t(attributes(x)$iterations)), ...) } else { stop("No iterations present in the output.") } } # Methods ----------------------------------------------------------------- #' @rdname as.numeric.p_direction #' @method as.numeric map_estimate #' @export as.numeric.map_estimate <- function(x, ...) { if (inherits(x, "data.frame")) { me <- as.numeric(as.vector(x$MAP_Estimate)) names(me) <- x$Parameter me } else { as.vector(x) } } #' @method as.double map_estimate #' @export as.double.map_estimate <- as.numeric.map_estimate bayestestR/R/weighted_posteriors.R0000644000175000017500000002343714101116425017152 0ustar nileshnilesh#' Generate posterior distributions weighted across models #' #' Extract posterior samples of parameters, weighted across models. Weighting is #' done by comparing posterior model probabilities, via [bayesfactor_models()]. #' #' @param ... Fitted models (see details), all fit on the same data, or a single #' `BFBayesFactor` object. #' @param missing An optional numeric value to use if a model does not contain a #' parameter that appears in other models. Defaults to 0. #' @param prior_odds Optional vector of prior odds for the models compared to #' the first model (or the denominator, for `BFBayesFactor` objects). For #' `data.frame`s, this will be used as the basis of weighting. #' @param iterations For `BayesFactor` models, how many posterior samples to draw. #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_parameters #' #' @details #' Note that across models some parameters might play different roles. For #' example, the parameter `A` plays a different role in the model `Y ~ A + B` #' (where it is a main effect) than it does in the model `Y ~ A + B + A:B` #' (where it is a simple effect). In many cases centering of predictors (mean #' subtracting for continuous variables, and effects coding via `contr.sum` or #' orthonormal coding via {[contr.orthonorm()]} for factors) can reduce this #' issue. In any case you should be mindful of this issue. #' \cr\cr #' See [bayesfactor_models()] details for more info on passed models. #' \cr\cr #' Note that for `BayesFactor` models, posterior samples cannot be generated #' from intercept only models. #' \cr\cr #' This function is similar in function to `brms::posterior_average`. #' #' @note For `BayesFactor < 0.9.12-4.3`, in some instances there might be #' some problems of duplicate columns of random effects in the resulting data #' frame. #' #' @return A data frame with posterior distributions (weighted across models) . #' #' @seealso [bayesfactor_inclusion()] for Bayesian model averaging. #' #' @examples #' \donttest{ #' if (require("rstanarm") && require("see")) { #' stan_m0 <- stan_glm(extra ~ 1, #' data = sleep, #' family = gaussian(), #' refresh = 0, #' diagnostic_file = file.path(tempdir(), "df0.csv") #' ) #' #' stan_m1 <- stan_glm(extra ~ group, #' data = sleep, #' family = gaussian(), #' refresh = 0, #' diagnostic_file = file.path(tempdir(), "df1.csv") #' ) #' #' #' res <- weighted_posteriors(stan_m0, stan_m1) #' #' plot(eti(res)) #' } #' #' ## With BayesFactor #' if (require("BayesFactor")) { #' extra_sleep <- ttestBF(formula = extra ~ group, data = sleep) #' #' wp <- weighted_posteriors(extra_sleep) #' #' describe_posterior(extra_sleep, test = NULL) #' describe_posterior(wp$delta, test = NULL) # also considers the null #' } #' #' #' ## weighted prediction distributions via data.frames #' if (require("rstanarm")) { #' m0 <- stan_glm( #' mpg ~ 1, #' data = mtcars, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df0.csv"), #' refresh = 0 #' ) #' #' m1 <- stan_glm( #' mpg ~ carb, #' data = mtcars, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df1.csv"), #' refresh = 0 #' ) #' #' # Predictions: #' pred_m0 <- data.frame(posterior_predict(m0)) #' pred_m1 <- data.frame(posterior_predict(m1)) #' #' BFmods <- bayesfactor_models(m0, m1) #' #' wp <- weighted_posteriors(pred_m0, pred_m1, #' prior_odds = BFmods$BF[2] #' ) #' #' # look at first 5 prediction intervals #' hdi(pred_m0[1:5]) #' hdi(pred_m1[1:5]) #' hdi(wp[1:5]) # between, but closer to pred_m1 #' } #' } #' @references #' \itemize{ #' \item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via #' orthogonalized model mixing. Journal of the American Statistical #' Association, 91(435), 1197-1208. #' #' \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. #' (2019, March 25). A conceptual introduction to Bayesian Model Averaging. #' \doi{10.31234/osf.io/wgb64} #' #' \item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian #' inference for psychology, part IV: Parameter estimation and Bayes factors. #' Psychonomic bulletin & review, 25(1), 102-113. #' #' \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, #' E. J. (2019). A cautionary note on estimating effect size. #' } #' #' @export weighted_posteriors <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE) { UseMethod("weighted_posteriors") } #' @export #' @rdname weighted_posteriors weighted_posteriors.data.frame <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE) { Mods <- list(...) mnames <- sapply(match.call(expand.dots = FALSE)$`...`, .safe_deparse) # find min nrow iterations <- min(sapply(Mods, nrow)) # make weights from prior_odds if (!is.null(prior_odds)) { prior_odds <- c(1, prior_odds) } else { warning("'prior_odds = NULL'; Using uniform priors odds.\n", "For weighted data frame, 'prior_odds' should be specified as a numeric vector.", call. = FALSE ) prior_odds <- rep(1, length(Mods)) } Probs <- prior_odds / sum(prior_odds) weighted_samps <- round(iterations * Probs) # pass to .weighted_posteriors res <- .weighted_posteriors(Mods, weighted_samps, missing) # make weights table attr(res, "weights") <- data.frame(Model = mnames, weights = weighted_samps) return(res) } #' @export #' @rdname weighted_posteriors weighted_posteriors.stanreg <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL) { Mods <- list(...) effects <- match.arg(effects) component <- match.arg(component) # Get Bayes factors BFMods <- bayesfactor_models(..., denominator = 1, verbose = verbose) # Compute posterior model probabilities model_tab <- .get_model_table(BFMods, priorOdds = prior_odds) postProbs <- model_tab$postProbs # Compute weighted number of samples iterations <- min(sapply(Mods, .total_samps)) weighted_samps <- round(iterations * postProbs) # extract parameters params <- lapply(Mods, insight::get_parameters, effects = effects, component = component, parameters = parameters ) res <- .weighted_posteriors(params, weighted_samps, missing) attr(res, "weights") <- data.frame(Model = BFMods$Model, weights = weighted_samps) return(res) } #' @export #' @rdname weighted_posteriors weighted_posteriors.brmsfit <- weighted_posteriors.stanreg #' @export #' @rdname weighted_posteriors weighted_posteriors.blavaan <- weighted_posteriors.stanreg #' @export #' @rdname weighted_posteriors weighted_posteriors.BFBayesFactor <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE, iterations = 4000) { Mods <- c(...) # Get Bayes factors BFMods <- bayesfactor_models(Mods, verbose = verbose) # Compute posterior model probabilities model_tab <- .get_model_table(BFMods, priorOdds = prior_odds, add_effects_table = FALSE) postProbs <- model_tab$postProbs # Compute weighted number of samples weighted_samps <- round(iterations * postProbs) # extract parameters intercept_only <- which(BFMods$Model == "1") params <- vector(mode = "list", length = nrow(BFMods)) for (m in seq_along(params)) { if (length(intercept_only) && m == intercept_only) { # warning( # "Cannot sample from BFBayesFactor model with intercept only (model prob = ", # round(postProbs[m], 3) * 100, "%).\n", # "Omitting the intercept model.", # call. = FALSE # ) params[[m]] <- data.frame( mu = rep(NA, iterations), sig2 = rep(NA, iterations), g = rep(NA, iterations) ) } else if (m == 1) { # If the model is the "den" model params[[m]] <- BayesFactor::posterior(1 / Mods[1], iterations = iterations, progress = FALSE) } else { params[[m]] <- BayesFactor::posterior( Mods[m - 1], iterations = iterations, progress = FALSE ) } } params <- lapply(params, data.frame) res <- .weighted_posteriors(params, weighted_samps, missing) attr(res, "weights") <- data.frame(Model = BFMods$Model, weights = weighted_samps) return(res) } .weighted_posteriors <- function(params, weighted_samps, missing) { par_names <- unique(unlist(sapply(params, colnames), recursive = TRUE)) # remove empty (0 sample) models params <- params[weighted_samps != 0] weighted_samps <- weighted_samps[weighted_samps != 0] for (m in seq_along(weighted_samps)) { temp_params <- params[[m]] i <- sample(nrow(temp_params), size = weighted_samps[m]) temp_params <- temp_params[i, , drop = FALSE] # If any parameters not estimated in the model, they are assumed to be 0 (the default value of `missing`) missing_pars <- setdiff(par_names, colnames(temp_params)) temp_params[, missing_pars] <- missing params[[m]] <- temp_params } # combine all do.call("rbind", params) } #' @keywords internal .total_samps <- function(mod) { x <- insight::find_algorithm(mod) if (is.null(x$iterations)) x$iterations <- x$sample x$chains * (x$iterations - x$warmup) } bayestestR/R/p_direction.R0000644000175000017500000003520214101116425015351 0ustar nileshnilesh#' Probability of Direction (pd) #' #' Compute the **Probability of Direction** (***pd***, also known #' as the Maximum Probability of Effect - *MPE*). It varies between `50%` #' and `100%` (*i.e.*, `0.5` and `1`) and can be interpreted as #' the probability (expressed in percentage) that a parameter (described by its #' posterior distribution) is strictly positive or negative (whichever is the #' most probable). It is mathematically defined as the proportion of the #' posterior distribution that is of the median's sign. Although differently #' expressed, this index is fairly similar (*i.e.*, is strongly correlated) #' to the frequentist **p-value**. #' \cr\cr #' Note that in some (rare) cases, especially when used with model averaged #' posteriors (see [weighted_posteriors()] or #' `brms::posterior_average`), `pd` can be smaller than `0.5`, #' reflecting high credibility of `0`. #' #' @param x Vector representing a posterior distribution. Can also be a Bayesian model (`stanreg`, `brmsfit` or `BayesFactor`). #' @param method Can be `"direct"` or one of methods of [density estimation][estimate_density], such as `"kernel"`, `"logspline"` or `"KernSmooth"`. If `"direct"` (default), the computation is based on the raw ratio of samples superior and inferior to 0. Else, the result is based on the [Area under the Curve (AUC)][auc] of the estimated [density][estimate_density] function. #' @param null The value considered as a "null" effect. Traditionally 0, but could also be 1 in the case of ratios. #' @inheritParams hdi #' #' @details #' \subsection{What is the *pd*?}{ #' The Probability of Direction (pd) is an index of effect existence, ranging #' from `50%` to `100%`, representing the certainty with which an effect goes in #' a particular direction (*i.e.*, is positive or negative). Beyond its #' simplicity of interpretation, understanding and computation, this index also #' presents other interesting properties: #' \itemize{ #' \item It is independent from the model: It is solely based on the posterior #' distributions and does not require any additional information from the data #' or the model. #' \item It is robust to the scale of both the response variable and the predictors. #' \item It is strongly correlated with the frequentist p-value, and can thus #' be used to draw parallels and give some reference to readers non-familiar #' with Bayesian statistics. #' } #' } #' \subsection{Relationship with the p-value}{ #' In most cases, it seems that the *pd* has a direct correspondence with the frequentist one-sided *p*-value through the formula \ifelse{html}{\out{pone sided = 1 - p(d)/100}}{\eqn{p_{one sided}=1-\frac{p_{d}}{100}}} and to the two-sided p-value (the most commonly reported one) through the formula \ifelse{html}{\out{ptwo sided = 2 * (1 - p(d)/100)}}{\eqn{p_{two sided}=2*(1-\frac{p_{d}}{100})}}. Thus, a two-sided p-value of respectively `.1`, `.05`, `.01` and `.001` would correspond approximately to a *pd* of `95%`, `97.5%`, `99.5%` and `99.95%`. See also [pd_to_p()]. #' } #' \subsection{Methods of computation}{ #' The most simple and direct way to compute the *pd* is to 1) look at the #' median's sign, 2) select the portion of the posterior of the same sign and #' 3) compute the percentage that this portion represents. This "simple" method #' is the most straightforward, but its precision is directly tied to the #' number of posterior draws. The second approach relies on [density #' estimation][estimate_density]. It starts by estimating the density function #' (for which many methods are available), and then computing the [area under #' the curve][area_under_curve] (AUC) of the density curve on the other side of #' 0. #' } #' \subsection{Strengths and Limitations}{ #' **Strengths:** Straightforward computation and interpretation. Objective #' property of the posterior distribution. 1:1 correspondence with the #' frequentist p-value. #' \cr \cr #' **Limitations:** Limited information favoring the null hypothesis. #' } #' #' @return #' Values between 0.5 and 1 corresponding to the probability of direction (pd). #' \cr\cr #' Note that in some (rare) cases, especially when used with model averaged #' posteriors (see [weighted_posteriors()] or #' `brms::posterior_average`), `pd` can be smaller than `0.5`, #' reflecting high credibility of `0`. To detect such cases, the #' `method = "direct"` must be used. #' #' @seealso [pd_to_p()] to convert between Probability of Direction (pd) and p-value. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references #' Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect #' Existence and Significance in the Bayesian Framework. Frontiers in Psychology #' 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' #' @examples #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_direction(posterior) #' p_direction(posterior, method = "kernel") #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_direction(df) #' p_direction(df, method = "kernel") #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_direction(model) #' p_direction(model, method = "kernel") #' } #' #' # emmeans #' # ----------------------------------------------- #' if (require("emmeans")) { #' p_direction(emtrends(model, ~1, "wt")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_direction(model) #' p_direction(model, method = "kernel") #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' p_direction(bf) #' p_direction(bf, method = "kernel") #' } #' } #' @export p_direction <- function(x, ...) { UseMethod("p_direction") } #' @rdname p_direction #' @export pd <- p_direction #' @rdname p_direction #' @export p_direction.numeric <- function(x, method = "direct", null = 0, ...) { if (method == "direct") { pdir <- max( c( length(x[x > null]) / length(x), # pd positive length(x[x < null]) / length(x) # pd negative ) ) } else { dens <- estimate_density(x, method = method, precision = 2^10, extend = TRUE, ...) if (length(x[x > null]) > length(x[x < null])) { dens <- dens[dens$x > null, ] } else { dens <- dens[dens$x < null, ] } pdir <- area_under_curve(dens$x, dens$y, method = "spline") if (pdir >= 1) pdir <- 1 # Enforce bounds } attr(pdir, "method") <- method attr(pdir, "data") <- x class(pdir) <- unique(c("p_direction", "see_p_direction", class(pdir))) pdir } #' @rdname p_direction #' @export p_direction.data.frame <- function(x, method = "direct", null = 0, ...) { obj_name <- .safe_deparse(substitute(x)) x <- .select_nums(x) if (ncol(x) == 1) { pd <- p_direction(x[, 1], method = method, null = null, ...) } else { pd <- sapply(x, p_direction, method = method, null = null, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "pd" = pd, row.names = NULL, stringsAsFactors = FALSE ) attr(out, "object_name") <- obj_name class(out) <- unique(c("p_direction", "see_p_direction", class(out))) out } #' @rdname p_direction #' @export p_direction.MCMCglmm <- function(x, method = "direct", null = 0, ...) { nF <- x$Fixed$nfl out <- p_direction(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, null = null, ... ) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_direction.mcmc <- function(x, method = "direct", null = 0, ...) { p_direction(as.data.frame(x), method = method, null = null, ...) } #' @export p_direction.BGGM <- function(x, method = "direct", null = 0, ...) { p_direction(as.data.frame(x), method = method, null = null, ...) } #' @export p_direction.bcplm <- function(x, method = "direct", null = 0, ...) { p_direction(insight::get_parameters(x), method = method, null = null, ...) } #' @export p_direction.mcmc.list <- p_direction.bcplm #' @export p_direction.blrm <- p_direction.bcplm #' @export p_direction.bayesQR <- p_direction.bcplm #' @export p_direction.bamlss <- function(x, method = "direct", null = 0, component = c("all", "conditional", "location"), ...) { component <- match.arg(component) out <- p_direction( insight::get_parameters(x, component = component), method = method, null = null, ... ) out <- .add_clean_parameters_attribute(out, x) out } #' @rdname p_direction #' @export p_direction.emmGrid <- function(x, method = "direct", null = 0, ...) { xdf <- insight::get_parameters(x) out <- p_direction(xdf, method = method, null = null, ...) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @export p_direction.emm_list <- p_direction.emmGrid #' @keywords internal .p_direction_models <- function(x, effects, component, parameters, method = "direct", null = 0, ...) { p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, null = null, ... ) } #' @export p_direction.sim.merMod <- function(x, effects = c("fixed", "random", "all"), parameters = NULL, method = "direct", null = 0, ...) { effects <- match.arg(effects) out <- .p_direction_models( x = x, effects = effects, component = "conditional", parameters = parameters, method = method, null = null, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) out } #' @export p_direction.sim <- function(x, parameters = NULL, method = "direct", null = 0, ...) { out <- .p_direction_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, method = method, null = null, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @rdname p_direction #' @export p_direction.stanreg <- function(x, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, method = "direct", null = 0, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, null = null, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_direction.stanfit <- p_direction.stanreg #' @export p_direction.blavaan <- p_direction.stanreg #' @rdname p_direction #' @export p_direction.brmsfit <- function(x, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, method = "direct", null = 0, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, null = null, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_direction #' @export p_direction.BFBayesFactor <- function(x, method = "direct", null = 0, ...) { out <- p_direction(insight::get_parameters(x), method = method, null = null, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_direction.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { out <- p_direction(as.data.frame(t(attributes(x)$iterations)), ...) } else { stop("No iterations present in the output.") } attr(out, "object_name") <- .safe_deparse(substitute(x)) out } # Methods ----------------------------------------------------------------- #' Convert to Numeric #' #' @inheritParams base::as.numeric #' @method as.numeric p_direction #' @export as.numeric.p_direction <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$pd))) } else { return(as.vector(x)) } } #' @method as.double p_direction #' @export as.double.p_direction <- as.numeric.p_direction bayestestR/R/sensitivity_to_prior.R0000644000175000017500000000711214101116425017360 0ustar nileshnilesh#' Sensitivity to Prior #' #' Computes the sensitivity to priors specification. This represents the proportion of change in some indices when the model is fitted with an antagonistic prior (a prior of same shape located on the opposite of the effect). #' #' @param model A Bayesian model (`stanreg` or `brmsfit`). #' @param index The indices from which to compute the sensitivity. Can be one or multiple names of the columns returned by `describe_posterior`. The case is important here (e.g., write 'Median' instead of 'median'). #' @param magnitude This represent the magnitude by which to shift the antagonistic prior (to test the sensitivity). For instance, a magnitude of 10 (default) means that the mode wil be updated with a prior located at 10 standard deviations from its original location. #' @param ... Arguments passed to or from other methods. #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) #' sensitivity_to_prior(model) #' #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' sensitivity_to_prior(model, index = c("Median", "MAP")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' # sensitivity_to_prior(model) #' } #' } #' @seealso DescTools #' @export sensitivity_to_prior <- function(model, index = "Median", magnitude = 10, ...) { UseMethod("sensitivity_to_prior") } #' @export sensitivity_to_prior.stanreg <- function(model, index = "Median", magnitude = 10, ...) { # Original params <- .extract_parameters(model, index = index, ...) # Priors priors <- .extract_priors_rstanarm(model) new_priors <- .prior_new_location(prior = priors$prior, sign = sign(params$Median), magnitude = magnitude) model_updated <- stats::update(model, data = insight::get_data(model), prior = new_priors, refresh = 0) # New model params_updated <- .extract_parameters(model_updated, index = index, ...) # Compute index sensitivity <- abs(as.matrix(params_updated[-1]) - as.matrix(params[-1])) / abs(as.matrix(params[-1])) # Clean up sensitivity <- as.data.frame(sensitivity) names(sensitivity) <- paste0("Sensitivity_", names(params_updated)[-1]) sensitivity <- cbind(params_updated[1], sensitivity) row.names(sensitivity) <- NULL sensitivity } #' @keywords internal .extract_parameters <- function(model, index = "Median", ...) { # Handle BF test <- c("pd", "rope", "p_map") if (any(c("bf", "bayesfactor", "bayes_factor") %in% c(index))) { test <- c(test, "bf") } params <- suppressMessages(describe_posterior(model, centrality = "all", dispersion = TRUE, test = test, ...)) params <- params[params$Parameter != "(Intercept)", ] params[unique(c("Parameter", "Median", index))] } #' Set a new location for a prior #' @keywords internal .prior_new_location <- function(prior, sign, magnitude = 10) { prior$location <- -1 * sign * magnitude * prior$scale prior } #' Extract and Returns the priors formatted for rstanarm #' @keywords internal .extract_priors_rstanarm <- function(model, ...) { priors <- rstanarm::prior_summary(model) # Deal with adjusted scale if (!is.null(priors$prior$adjusted_scale)) { priors$prior$scale <- priors$prior$adjusted_scale priors$prior$adjusted_scale <- NULL } priors$prior$autoscale <- FALSE priors } bayestestR/R/print.equivalence_test.R0000644000175000017500000001126314076521542017561 0ustar nileshnilesh#' @export print.equivalence_test <- function(x, digits = 2, ...) { orig_x <- x insight::print_color("# Test for Practical Equivalence\n\n", "blue") cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, x$ROPE_low[1], digits, x$ROPE_high[1])) # fix "sd" pattern model <- .retrieve_model(x) if (!is.null(model)) { cp <- insight::clean_parameters(model) if (!is.null(cp$Group) && any(grepl("^SD/Cor", cp$Group))) { cp <- cp[grepl("^SD/Cor", cp$Group), ] matches <- match(cp$Parameter, x$Parameter) if (length(matches)) x$Parameter[matches] <- paste0("SD/Cor: ", cp$Cleaned_Parameter[stats::na.omit(match(x$Parameter, cp$Parameter))]) } } # find the longest HDI-value, so we can align the brackets in the ouput x$HDI_low <- sprintf("%.*f", digits, x$HDI_low) x$HDI_high <- sprintf("%.*f", digits, x$HDI_high) maxlen_low <- max(nchar(x$HDI_low)) maxlen_high <- max(nchar(x$HDI_high)) x$ROPE_Percentage <- sprintf("%.*f %%", digits, x$ROPE_Percentage * 100) x$HDI <- sprintf("[%*s %*s]", maxlen_low, x$HDI_low, maxlen_high, x$HDI_high) # clean parameter names # if ("Parameter" %in% colnames(x) && "Cleaned_Parameter" %in% colnames(x)) { # x$Parameter <- x$Cleaned_Parameter # } ci <- unique(x$CI) keep.columns <- c("CI", "Parameter", "ROPE_Equivalence", "ROPE_Percentage", "HDI", "Effects", "Component") x <- x[, intersect(keep.columns, colnames(x))] colnames(x)[which(colnames(x) == "ROPE_Equivalence")] <- "H0" colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" .print_equivalence_component(x, ci, digits) # split_column <- "" # split_column <- c(split_column, ifelse("Component" %in% names(x) && length(unique(x$Component)) > 1, "Component", "")) # split_column <- c(split_column, ifelse("Effects" %in% names(x) && length(unique(x$Effects)) > 1, "Effects", "")) # split_column <- split_column[nchar(split_column) > 0] # # if (length(split_column)) { # # # set up split-factor # if (length(split_column) > 1) { # split_by <- lapply(split_column, function(i) x[[i]]) # } else { # split_by <- list(x[[split_column]]) # } # names(split_by) <- split_column # # # # make sure we have correct sorting here... # tables <- split(x, f = split_by) # # for (type in names(tables)) { # # # Don't print Component column # tables[[type]][["Effects"]] <- NULL # tables[[type]][["Component"]] <- NULL # # component_name <- switch( # type, # "fixed" = , # "conditional" = "Fixed Effects", # "random" = "Random Effects", # "conditional.fixed" = "Fixed Effects (Count Model)", # "conditional.random" = "Random Effects (Count Model)", # "zero_inflated" = "Zero-Inflated", # "zero_inflated.fixed" = "Fixed Effects (Zero-Inflated Model)", # "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", # "smooth_sd" = "Smooth Terms (SD)", # "smooth_terms" = "Smooth Terms", # type # ) # # insight::print_color(sprintf(" %s\n\n", component_name), "red") # .print_equivalence_component(tables[[type]], ci, digits) # } # } else { # type <- paste0(unique(x$Component), ".", unique(x$Effects)) # component_name <- switch( # type, # "conditional.fixed" = "Fixed Effects", # "conditional.random" = "Random Effects", # "zero_inflated.fixed" = "Fixed Effects (Zero-Inflated Model)", # "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", # type # ) # # x$Effects <- NULL # x$Component <- NULL # # insight::print_color(sprintf(" %s\n\n", component_name), "red") # .print_equivalence_component(x, ci, digits) # } invisible(orig_x) } .print_equivalence_component <- function(x, ci, digits) { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] colnames(xsub)[colnames(xsub) == "HDI"] <- sprintf("%i%% HDI", 100 * i) print_data_frame(xsub, digits = digits) cat("\n") } } .retrieve_model <- function(x) { # retrieve model obj_name <- attr(x, "object_name", exact = TRUE) model <- NULL if (!is.null(obj_name)) { # first try, parent frame model <- tryCatch( { get(obj_name, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(model)) { # second try, global env model <- tryCatch( { get(obj_name, envir = globalenv()) }, error = function(e) { NULL } ) } } model } bayestestR/R/utils_check_collinearity.R0000644000175000017500000000473414101116425020133 0ustar nileshnilesh#' @keywords internal .check_multicollinearity <- function(model, method = "equivalence_test", threshold = 0.7, ...) { valid_parameters <- insight::find_parameters(model, parameters = "^(?!(r_|sd_|prior_|cor_|lp__|b\\[))", flatten = TRUE) if (inherits(model, "stanfit")) { dat <- insight::get_parameters(model)[, valid_parameters, drop = FALSE] } else { dat <- as.data.frame(model, optional = FALSE)[, valid_parameters, drop = FALSE] } # need at least three columns, one is removed anyway... if (ncol(dat) > 2) { dat <- dat[, -1, drop = FALSE] if (ncol(dat) > 1) { parameter_correlation <- stats::cor(dat) parameter <- expand.grid(colnames(dat), colnames(dat), stringsAsFactors = FALSE) results <- cbind( parameter, corr = abs(as.vector(expand.grid(parameter_correlation)[[1]])), pvalue = apply(parameter, 1, function(r) stats::cor.test(dat[[r[1]]], dat[[r[2]]])$p.value) ) # Filter results <- results[results$pvalue < 0.05 & results$Var1 != results$Var2, ] if (nrow(results) > 0) { # Remove duplicates results$where <- paste0(results$Var1, " and ", results$Var2) results$where2 <- paste0(results$Var2, " and ", results$Var1) to_remove <- c() for (i in 1:nrow(results)) { if (results$where2[i] %in% results$where[1:i]) { to_remove <- c(to_remove, i) } } results <- results[-to_remove, ] # Filter by first threshold threshold <- ifelse(threshold >= .9, .9, threshold) results <- results[results$corr > threshold & results$corr <= .9, ] if (nrow(results) > 0) { where <- paste0("between ", paste0(paste0(results$where, " (r = ", round(results$corr, 2), ")"), collapse = ", "), "") message("Possible multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'.") } # Filter by second threshold results <- results[results$corr > .9, ] if (nrow(results) > 0) { where <- paste0("between ", paste0(paste0(results$where, " (r = ", round(results$corr, 2), ")"), collapse = ", "), "") warning("Probable multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'.", call. = FALSE) } } } } } bayestestR/R/print_html.R0000644000175000017500000001263614076521542015253 0ustar nileshnilesh# Reexports models ------------------------ #' @importFrom insight print_html #' @export insight::print_html #' @export print_html.describe_posterior <- function(x, digits = 2, caption = "Summary of Posterior Distribution", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.point_estimate <- function(x, digits = 2, caption = "Point Estimate", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.map_estimate <- function(x, digits = 2, caption = "MAP Estimate", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_direction <- function(x, digits = 2, caption = "Probability of Direction (pd)", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_rope <- function(x, digits = 2, ...) { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL .print_html_default(x = x, digits = digits, caption = caption, ci_string = "ROPE", ...) } #' @export print_html.p_significance <- function(x, digits = 2, ...) { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "HDI", ...) } #' @export print_html.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "ETI", ...) } #' @export print_html.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "SI", ...) } # special handling for bayes factors ------------------ #' @export print_html.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { .print_bf_html_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = c("llr"), ... ) } #' @export print_html.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_html_default( x = x, digits = digits, log = log, caption = caption, align = c("lrrr"), ... ) } #' @export print_html.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_html_default(x = x, digits = digits, log = log, caption = caption, ...) } #' @export print_html.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "html", ... ) insight::export_table(formatted_table, format = "html") } # util --------------- .print_html_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "html", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # print for data frame - I don't think we need a special handling for # numeric values to have a markdown-table output insight::export_table( formatted_table, caption = caption, format = "html" ) } .print_bf_html_default <- function(x, digits = 3, log = FALSE, show_names = NULL, caption = NULL, align = NULL, ...) { formatted_table <- format( x, digits = digits, log = log, show_names = show_names, caption = caption, format = "html", ... ) insight::export_table( formatted_table, align = align, caption = caption, format = "html" ) } bayestestR/R/overlap.R0000644000175000017500000000434614101116425014527 0ustar nileshnilesh#' Overlap Coefficient #' #' A method to calculate the overlap coefficient between two empirical distributions (that can be used as a measure of similarity between two samples). #' #' @param x Vector of x values. #' @param y Vector of x values. #' @param method_auc Area Under the Curve (AUC) estimation method. See [area_under_curve()]. #' @param method_density Density estimation method. See [estimate_density()]. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' #' x <- distribution_normal(1000, 2, 0.5) #' y <- distribution_normal(1000, 0, 1) #' #' overlap(x, y) #' plot(overlap(x, y)) #' @export overlap <- function(x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ...) { # Generate densities dx <- estimate_density(x, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ...) dy <- estimate_density(y, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ...) # Create density estimation functions fx <- stats::approxfun(dx$x, dx$y, method = "linear", rule = 2) fy <- stats::approxfun(dy$x, dy$y, method = "linear", rule = 2) x_axis <- seq(min(c(dx$x, dy$x)), max(c(dx$x, dy$x)), length.out = precision) data <- data.frame(x = x_axis, y1 = fx(x_axis), y2 = fy(x_axis)) # calculate intersection densities data$intersection <- pmin(data$y1, data$y2) data$exclusion <- pmax(data$y1, data$y2) # integrate areas under curves area_intersection <- area_under_curve(data$x, data$intersection, method = method_auc) # area_exclusion <- area_under_curve(data$x, data$exclusion, method = method_auc) # compute overlap coefficient overlap <- area_intersection attr(overlap, "data") <- data class(overlap) <- c("overlap", class(overlap)) overlap } #' @export print.overlap <- function(x, ...) { insight::print_color("# Overlap\n\n", "blue") cat(sprintf("%.2f", as.numeric(x))) } #' @export plot.overlap <- function(x, ...) { # Can be improved through see data <- attributes(x)$data plot(data$x, data$exclusion, type = "l") graphics::polygon(data$x, data$intersection, col = "red") } bayestestR/R/utils_bayesfactor.R0000644000175000017500000002743714076521542016622 0ustar nileshnilesh # clean priors and posteriors --------------------------------------------- #' @keywords internal .clean_priors_and_posteriors <- function(posterior, prior, verbose = TRUE, ...) { UseMethod(".clean_priors_and_posteriors") } #' @keywords internal .clean_priors_and_posteriors.stanreg <- function(posterior, prior, verbose = TRUE, effects, component, ...) { # Get Priors if (is.null(prior)) { prior <- posterior } prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (methods::is(prior, "try-error")) { if (grepl("flat priors", prior)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } stop(prior, call. = FALSE) } prior <- insight::get_parameters(prior, effects = effects, component = component, ...) posterior <- insight::get_parameters(posterior, effects = effects, component = component, ...) list( posterior = posterior, prior = prior ) } #' @keywords internal .clean_priors_and_posteriors.brmsfit <- .clean_priors_and_posteriors.stanreg #' @keywords internal .clean_priors_and_posteriors.blavaan <- function(posterior, prior, verbose = TRUE, ...) { # Get Priors if (is.null(prior)) { prior <- posterior } prior <- unupdate(prior, verbose = verbose) prior <- insight::get_parameters(prior) posterior <- insight::get_parameters(posterior) list( posterior = posterior, prior = prior ) } #' @keywords internal .clean_priors_and_posteriors.emmGrid <- function(posterior, prior, verbose = TRUE) { insight::check_if_installed("emmeans") if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please provide the original model to get meaningful results." ) } else if (!inherits(prior, "emmGrid")) { # then is it a model prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (methods::is(prior, "try-error")) { if (grepl("flat priors", prior)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } stop(prior, call. = FALSE) } prior <- emmeans::ref_grid(prior) prior <- prior@post.beta if (!isTRUE(all.equal(colnames(prior), colnames(posterior@post.beta)))) { stop( "Unable to reconstruct prior estimates.\n", "Perhaps the emmGrid object has been transformed? See function details.\n", call. = FALSE ) } prior <- stats::update(posterior, post.beta = prior) } prior <- insight::get_parameters(prior) posterior <- insight::get_parameters(posterior) list( posterior = posterior, prior = prior ) } .clean_priors_and_posteriors.emm_list <- function(posterior, prior, verbose = TRUE) { if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please provide the original model to get meaningful results." ) } else if (!inherits(prior, "emm_list")) { prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (methods::is(prior, "try-error")) { if (grepl("flat priors", prior)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } stop(prior, call. = FALSE) } } # prior is now a model, or emm_list # is it a model? pass_em <- inherits(prior, "emm_list") res <- lapply(seq_along(posterior), function(i) { .clean_priors_and_posteriors.emmGrid( posterior[[i]], prior = if (pass_em) prior[[i]] else prior, verbose = verbose ) }) posterior <- do.call("cbind", lapply(res, "[[", "posterior")) prior <- do.call("cbind", lapply(res, "[[", "prior")) list( posterior = posterior, prior = prior ) } # BMA --------------------------------------------------------------------- #' @keywords internal .get_model_table <- function(BFGrid, priorOdds = NULL, add_effects_table = TRUE, ...) { denominator <- attr(BFGrid, "denominator") BFGrid <- rbind(BFGrid[denominator, ], BFGrid[-denominator, ]) attr(BFGrid, "denominator") <- 1 # This looks like it does nothing, but this is needed to prevent Inf in large BFs. # Small BFs are better than large BFs BFGrid <- stats::update(BFGrid, reference = "top") # Prior and post odds Modelnames <- BFGrid$Model if (!is.null(priorOdds)) { priorOdds <- c(1, priorOdds) } else { priorOdds <- rep(1, length(Modelnames)) } prior_logodds <- log(priorOdds) posterior_logodds <- prior_logodds + BFGrid$log_BF # norm prior_logodds <- prior_logodds - log(sum(exp(prior_logodds))) posterior_logodds <- posterior_logodds - log(sum(exp(posterior_logodds))) df.model <- data.frame( Modelnames, priorProbs = exp(prior_logodds), postProbs = exp(posterior_logodds), stringsAsFactors = FALSE ) # add effects table if (add_effects_table) { for (m in seq_len(nrow(df.model))) { tmp_terms <- .make_terms(df.model$Modelnames[m]) if (length(tmp_terms) > 0) { missing_terms <- !tmp_terms %in% colnames(df.model) # For R < 3.6.0 if (any(missing_terms)) df.model[, tmp_terms[missing_terms]] <- NA # For R < 3.6.0 df.model[m, tmp_terms] <- TRUE } } } df.model[is.na(df.model)] <- FALSE df.model } #' @keywords internal .make_terms <- function(formula) { sort_interactions <- function(x) { if (grepl("\\:", x)) { effs <- unlist(strsplit(x, "\\:")) x <- paste0(sort(effs), collapse = ":") } x } formula.f <- stats::as.formula(paste0("~", formula)) all.terms <- attr(stats::terms(formula.f), "term.labels") # Fixed fix_trms <- all.terms[!grepl("\\|", all.terms)] # no random if (length(fix_trms) > 0) { fix_trms <- sapply(fix_trms, sort_interactions) } # Random random_parts <- paste0(all.terms[grepl("\\|", all.terms)]) # only random if (length(random_parts) == 0) { return(fix_trms) } random_units <- sub("^.+\\|\\s+", "", random_parts) tmp_random <- lapply( sub("\\|.+$", "", random_parts), function(x) stats::as.formula(paste0("~", x)) ) rand_trms <- vector("list", length(random_parts)) for (i in seq_along(random_parts)) { tmp_trms <- attr(stats::terms.formula(tmp_random[[i]]), "term.labels") tmp_trms <- sapply(tmp_trms, sort_interactions) if (!any(unlist(strsplit(as.character(tmp_random[[i]])[[2]], " \\+ ")) == "0")) { tmp_trms <- c("1", tmp_trms) } rand_trms[[i]] <- paste0(tmp_trms, ":", random_units[[i]]) } c(fix_trms, unlist(rand_trms)) } # make_BF_plot_data ------------------------------------------------------- #' @keywords internal .make_BF_plot_data <- function(posterior, prior, direction, null, extend_scale = 0.05, precision = 2^8, ...) { insight::check_if_installed("logspline") estimate_samples_density <- function(samples) { nm <- .safe_deparse(substitute(samples)) samples <- utils::stack(samples) samples <- split(samples, samples$ind) samples <- lapply(samples, function(data) { # 1. estimate density x <- data$values x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) x_range <- range(c(x_range, null)[!is.infinite(c(x_range, null))]) extension_scale <- diff(x_range) * extend_scale x_range <- x_range + c(-1, 1) * extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) # x_axis <- sort(unique(c(x_axis, null))) f_x <- .logspline(x, ...) y <- logspline::dlogspline(x_axis, f_x) d_points <- data.frame(x = x_axis, y = y) # 2. estimate points d_null <- stats::approx(d_points$x, d_points$y, xout = null) d_null$y[is.na(d_null$y)] <- 0 # 3. direction? if (direction > 0) { d_points <- d_points[d_points$x >= min(null), , drop = FALSE] if (is.infinite(min(null))) { norm_factor <- 1 } else { norm_factor <- 1 - logspline::plogspline(min(null), f_x) } d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } else if (direction < 0) { d_points <- d_points[d_points$x <= max(null), , drop = FALSE] if (is.infinite(max(null))) { norm_factor <- 1 } else { norm_factor <- logspline::plogspline(max(null), f_x) } d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } d_points$ind <- d_null$ind <- data$ind[1] list(d_points, d_null) }) # 4a. organize point0 <- lapply(samples, function(.) as.data.frame(.[[2]])) point0 <- do.call("rbind", point0) samplesX <- lapply(samples, function(.) .[[1]]) samplesX <- do.call("rbind", samplesX) samplesX$Distribution <- point0$Distribution <- nm rownames(samplesX) <- rownames(point0) <- c() list(samplesX, point0) } # 4b. orgenize posterior <- estimate_samples_density(posterior) prior <- estimate_samples_density(prior) list( plot_data = rbind(posterior[[1]], prior[[1]]), d_points = rbind(posterior[[2]], prior[[2]]) ) } # As numeric vector ------------------------------------------------------- #' @export as.numeric.bayesfactor_inclusion <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$BF))) } else { return(as.vector(x)) } } #' @export as.numeric.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_inclusion <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion # logspline --------------------------------------------------------------- #' @keywords internal .logspline <- function(x, ...) { insight::check_if_installed("logspline") # arg_names <- names(formals(logspline::logspline, envir = parent.frame())) arg_names <- names(formals(logspline::logspline)) # support R<3.6.0 in_args <- list(...) in_args <- in_args[names(in_args) %in% arg_names] in_args <- c(list(x = x), in_args) suppressWarnings(do.call(logspline::logspline, in_args)) } bayestestR/R/as.list.R0000644000175000017500000000140414023526535014436 0ustar nileshnilesh # as.list ----------------------------------------------------------------- #' @export as.list.bayestestR_hdi <- function(x, ...) { if (nrow(x) == 1) { out <- list(CI = x$CI, CI_low = x$CI_low, CI_high = x$CI_high) out$Parameter <- x$Parameter } else { out <- list() for (param in x$Parameter) { out[[param]] <- list() out[[param]][["CI"]] <- x[x$Parameter == param, "CI"] out[[param]][["CI_low"]] <- x[x$Parameter == param, "CI_low"] out[[param]][["CI_high"]] <- x[x$Parameter == param, "CI_high"] } } out } #' @export as.list.bayestestR_eti <- as.list.bayestestR_hdi #' @export as.list.bayestestR_si <- as.list.bayestestR_hdi #' @export as.list.bayestestR_ci <- as.list.bayestestR_hdi bayestestR/R/estimate_density.R0000644000175000017500000003636014101116425016432 0ustar nileshnilesh#' Density Estimation #' #' This function is a wrapper over different methods of density estimation. By default, it uses the base R `density` with by default uses a different smoothing bandwidth (`"SJ"`) from the legacy default implemented the base R `density` function (`"nrd0"`). However, Deng \& Wickham suggest that `method = "KernSmooth"` is the fastest and the most accurate. #' #' @inheritParams hdi #' @inheritParams stats::density #' @param bw See the eponymous argument in `density`. Here, the default has been changed for `"SJ"`, which is recommended. #' @param ci The confidence interval threshold. Only used when `method = "kernel"`. #' @param method Density estimation method. Can be `"kernel"` (default), `"logspline"` or `"KernSmooth"`. #' @param precision Number of points of density data. See the `n` parameter in `density`. #' @param extend Extend the range of the x axis by a factor of `extend_scale`. #' @param extend_scale Ratio of range by which to extend the x axis. A value of `0.1` means that the x axis will be extended by `1/10` of the range of the data. #' @param group_by Optional character vector. If not `NULL` and `x` is a data frame, density estimation is performed for each group (subset) indicated by `group_by`. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(bayestestR) #' #' set.seed(1) #' x <- rnorm(250, mean = 1) #' #' # Basic usage #' density_kernel <- estimate_density(x) # default method is "kernel" #' #' hist(x, prob = TRUE) #' lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) #' lines(density_kernel$x, density_kernel$CI_low, col = "gray", lty = 2) #' lines(density_kernel$x, density_kernel$CI_high, col = "gray", lty = 2) #' legend("topright", #' legend = c("Estimate", "95% CI"), #' col = c("black", "gray"), lwd = 2, lty = c(1, 2) #' ) #' #' # Other Methods #' density_logspline <- estimate_density(x, method = "logspline") #' density_KernSmooth <- estimate_density(x, method = "KernSmooth") #' density_mixture <- estimate_density(x, method = "mixture") #' #' hist(x, prob = TRUE) #' lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) #' lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) #' lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) #' lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) #' #' # Extension #' density_extended <- estimate_density(x, extend = TRUE) #' density_default <- estimate_density(x, extend = FALSE) #' #' hist(x, prob = TRUE) #' lines(density_extended$x, density_extended$y, col = "red", lwd = 3) #' lines(density_default$x, density_default$y, col = "black", lwd = 3) #' #' # Multiple columns #' df <- data.frame(replicate(4, rnorm(100))) #' head(estimate_density(df)) #' #' # Grouped data #' estimate_density(iris, group_by = "Species") #' estimate_density(iris$Petal.Width, group_by = iris$Species) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' head(estimate_density(model)) #' #' library(emmeans) #' head(estimate_density(emtrends(model, ~1, "wt"))) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' estimate_density(model) #' } #' #' @references Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. #' #' @export estimate_density <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { UseMethod("estimate_density") } #' @keywords internal .estimate_density <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, ...) { method <- match.arg(tolower(method), c("kernel", "logspline", "kernsmooth", "smooth", "mixture", "mclust")) # Remove NA x <- x[!is.na(x)] # Range x_range <- range(x) if (extend) { extension_scale <- diff(x_range) * extend_scale x_range[1] <- x_range[1] - extension_scale x_range[2] <- x_range[2] + extension_scale } # Replace inf values if needed x_range[is.infinite(x_range)] <- 5.565423e+156 # Kernel if (method == "kernel") { kde <- .estimate_density_kernel(x, x_range, precision, bw, ci, ...) # Logspline } else if (method == "logspline") { kde <- .estimate_density_logspline(x, x_range, precision, ...) # KernSmooth } else if (method %in% c("kernsmooth", "smooth")) { kde <- .estimate_density_KernSmooth(x, x_range, precision, ...) # Mixture } else if (method %in% c("mixture", "mclust")) { kde <- .estimate_density_mixture(x, x_range, precision, ...) } else { stop("method should be one of 'kernel', 'logspline', 'KernSmooth' or 'mixture'.") } kde } # Methods ----------------------------------------------------------------- #' @export estimate_density.numeric <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, group_by = NULL, ...) { if (!is.null(group_by)) { if (length(group_by) == 1) { stop("`group_by` must be either the name of a group column if a data.frame is entered as input, or in this case (where a single vector was passed) a vector of same length.") } out <- estimate_density(data.frame(V1 = x, Group = group_by), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, group_by = "Group", ...) out$Parameter <- NULL return(out) } out <- .estimate_density(x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, ...) class(out) <- .set_density_class(out) out } #' @rdname estimate_density #' @export estimate_density.data.frame <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, group_by = NULL, ...) { if (is.null(group_by)) { out <- .estimate_density_df(x = x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, ...) } else { xlist <- split(x, x[group_by]) out <- lapply(names(xlist), function(group) { dens <- .estimate_density_df(x = xlist[[group]], method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, ...) dens$Group <- group dens }) out <- do.call(rbind, out) } class(out) <- .set_density_df_class(out) out } .estimate_density_df <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, ...) { x <- .select_nums(x) out <- sapply(x, estimate_density, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, simplify = FALSE) for (i in names(out)) { out[[i]]$Parameter <- i } out <- do.call(rbind, out) row.names(out) <- NULL out[, c("Parameter", "x", "y")] } #' @export estimate_density.grouped_df <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, ...) { groups <- .group_vars(x) ungrouped_x <- as.data.frame(x) xlist <- split(ungrouped_x, ungrouped_x[groups]) out <- lapply(names(xlist), function(group) { dens <- estimate_density(xlist[[group]], method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci) dens$Group <- group dens }) do.call(rbind, out) } #' @export estimate_density.emmGrid <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { x <- insight::get_parameters(x) out <- estimate_density(x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) class(out) <- .set_density_class(out) out } #' @export estimate_density.emm_list <- estimate_density.emmGrid #' @export estimate_density.stanreg <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- estimate_density(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.stanfit <- estimate_density.stanreg #' @export estimate_density.blavaan <- estimate_density.stanreg #' @export estimate_density.brmsfit <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- estimate_density(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.MCMCglmm <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { nF <- x$Fixed$nfl out <- estimate_density(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.mcmc <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { out <- estimate_density(insight::get_parameters(x, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.bayesQR <- estimate_density.mcmc #' @export estimate_density.blrm <- estimate_density.mcmc #' @export estimate_density.bcplm <- estimate_density.mcmc #' @export estimate_density.BGGM <- estimate_density.mcmc #' @export estimate_density.mcmc.list <- estimate_density.mcmc #' @export estimate_density.bamlss <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", component = c("all", "conditional", "location"), parameters = NULL, ...) { component <- match.arg(component) out <- estimate_density(insight::get_parameters(x, component = component, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- .set_density_class(out) out } #' Coerce to a Data Frame #' #' @inheritParams base::as.data.frame #' @method as.data.frame density #' @export as.data.frame.density <- function(x, ...) { data.frame(x = x$x, y = x$y) } #' Density Probability at a Given Value #' #' Compute the density value at a given point of a distribution (i.e., the value of the `y` axis of a value `x` of a distribution). #' #' @param posterior Vector representing a posterior distribution. #' @param x The value of which to get the approximate probability. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(n = 10) #' density_at(posterior, 0) #' density_at(posterior, c(0, 1)) #' @export density_at <- function(posterior, x, precision = 2^10, method = "kernel", ...) { density <- estimate_density(posterior, precision = precision, method = method, ...) stats::approx(density$x, density$y, xout = x)$y } # Different functions ----------------------------------------------------- .estimate_density_kernel <- function(x, x_range, precision, bw, ci = 0.95, ...) { # Get the kernel density estimation (KDE) kde <- stats::density(x, n = precision, bw = bw, from = x_range[1], to = x_range[2], ...) df <- as.data.frame(kde) # Get CI (https://bookdown.org/egarpor/NP-UC3M/app-kde-ci.html) if (!is.null(ci)) { h <- kde$bw # Selected bandwidth # R(K) for a normal Rk <- 1 / (2 * sqrt(pi)) # Estimate the SD sd_kde <- sqrt(df$y * Rk / (length(x) * h)) # CI with estimated variance z_alpha <- stats::qnorm(ci) df$CI_low <- df$y - z_alpha * sd_kde df$CI_high <- df$y + z_alpha * sd_kde } df } .estimate_density_logspline <- function(x, x_range, precision, ...) { if (!requireNamespace("logspline")) { if (interactive()) { readline("Package \"logspline\" needed for this function. Press ENTER to install or ESCAPE to abort.") utils::install.packages("logspline") } else { stop("Package \"logspline\" needed for this function. Press run 'install.packages(\"logspline\")'.") } } x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- logspline::dlogspline(x_axis, logspline::logspline(x, ...), ...) data.frame(x = x_axis, y = y) } .estimate_density_KernSmooth <- function(x, x_range, precision, ...) { if (!requireNamespace("KernSmooth")) { if (interactive()) { readline("Package \"KernSmooth\" needed for this function. Press ENTER to install or ESCAPE to abort.") utils::install.packages("KernSmooth") } else { stop("Package \"KernSmooth\" needed for this function. Press run 'install.packages(\"KernSmooth\")'.") } } as.data.frame(KernSmooth::bkde(x, range.x = x_range, gridsize = precision, truncate = TRUE, ...)) } .estimate_density_mixture <- function(x, x_range, precision, ...) { if (!requireNamespace("mclust")) { if (interactive()) { readline("Package \"mclust\" needed for this function. Press ENTER to install or ESCAPE to abort.") utils::install.packages("KernSmooth") } else { stop("Package \"mclust\" needed for this function. Press run 'install.packages(\"mclust\")'.") } } x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- stats::predict(mclust::densityMclust(x, verbose = FALSE, ...), newdata = x_axis, ...) data.frame(x = x_axis, y = y) } # helper ---------------------------------------------------------- .set_density_df_class <- function(out) { setdiff(unique(c("estimate_density_df", "see_estimate_density_df", class(out))), c("estimate_density", "see_estimate_density")) } .set_density_class <- function(out) { setdiff(unique(c("estimate_density", "see_estimate_density", class(out))), c("estimate_density_df", "see_estimate_density_df")) } bayestestR/R/convert_bayesian_to_frequentist.R0000644000175000017500000000367614101116425021552 0ustar nileshnilesh#' Convert (refit) a Bayesian model to frequentist #' #' Refit Bayesian model as frequentist. Can be useful for comparisons. #' #' @param model A Bayesian model. #' @param data Data used by the model. If `NULL`, will try to extract it #' from the model. #' @examples #' \donttest{ #' # Rstanarm ---------------------- #' if (require("rstanarm")) { #' # Simple regressions #' model <- stan_glm(Sepal.Length ~ Species, #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' } #' } #' #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm(vs ~ mpg, #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' # Mixed models #' model <- stan_glmer(Sepal.Length ~ Petal.Length + (1 | Species), #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' model <- stan_glmer(vs ~ mpg + (1 | cyl), #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' } #' } #' #' @export convert_bayesian_as_frequentist <- function(model, data = NULL) { if (is.null(data)) { data <- insight::get_data(model) } # info info <- insight::model_info(model) # Call called <- model$call # fun <- as.character(called)[1] formula <- called$formula family <- called$family if (info$is_mixed) { insight::check_if_installed("lme4") if (info$is_linear) { freq <- lme4::lmer(formula, data = data) } else { freq <- lme4::glmer(formula, data = data, family = family) } } else { if (info$is_linear) { freq <- stats::lm(formula, data = data) } else { freq <- stats::glm(formula, data = data, family = family) } } freq } #' @rdname convert_bayesian_as_frequentist #' @export bayesian_as_frequentist <- convert_bayesian_as_frequentist bayestestR/R/utils_hdi_ci.R0000644000175000017500000000506414004027567015526 0ustar nileshnilesh#' @keywords internal .check_ci_argument <- function(x, ci, verbose = TRUE) { if (ci > 1) { if (verbose) { warning("`ci` should be less than 1, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } if (ci == 1) { return(data.frame( "CI" = ci, "CI_low" = min(x, na.rm = TRUE), "CI_high" = max(x, na.rm = TRUE) )) } if (anyNA(x)) { if (verbose) { warning("The posterior contains NAs, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } if (length(x) < 3) { if (verbose) { warning("The posterior is too short, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } NULL } #' @keywords internal .compute_interval_dataframe <- function(x, ci, verbose, fun) { numeric_variables <- sapply(x, is.numeric, simplify = TRUE) out <- .compact_list(lapply( x[, numeric_variables, drop = FALSE], get(fun, asNamespace("bayestestR")), ci = ci, verbose = verbose )) dat <- data.frame( Parameter = rep(names(out), each = length(ci)), do.call(rbind, out), stringsAsFactors = FALSE, row.names = NULL ) class(dat) <- unique(c(paste0("bayestestR_", fun), paste0("see_", fun), class(dat))) dat } #' @keywords internal .compute_interval_simMerMod <- function(x, ci, effects, parameters, verbose, fun) { fixed <- fixed.data <- NULL random <- random.data <- NULL if (effects %in% c("fixed", "all")) { fixed.data <- insight::get_parameters(x, effects = "fixed", parameters = parameters) fixed <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) fixed$Group <- "fixed" } if (effects %in% c("random", "all")) { random.data <- insight::get_parameters(x, effects = "random", parameters = parameters) random <- .compute_interval_dataframe(random.data, ci, verbose, fun) random$Group <- "random" } d <- do.call(rbind, list(fixed, random)) if (length(unique(d$Group)) == 1) { d <- .remove_column(d, "Group") } list(result = d, data = do.call(cbind, .compact_list(list(fixed.data, random.data)))) } #' @keywords internal .compute_interval_sim <- function(x, ci, parameters, verbose, fun) { fixed.data <- insight::get_parameters(x, parameters = parameters) d <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) list(result = d, data = fixed.data) } bayestestR/R/convert_pd_to_p.R0000644000175000017500000000175114101116425016240 0ustar nileshnilesh#' Convert between Probability of Direction (pd) and p-value. #' #' Enables a conversion between Probability of Direction (pd) and p-value. #' #' @param pd A Probability of Direction (pd) value (between 0 and 1). #' @param p A p-value. #' @param direction What type of p-value is requested or provided. Can be #' `"two-sided"` (default, two tailed) or `"one-sided"` (one tailed). #' @param ... Arguments passed to or from other methods. #' #' @examples #' pd_to_p(pd = 0.95) #' pd_to_p(pd = 0.95, direction = "one-sided") #' @export pd_to_p <- function(pd, direction = "two-sided", ...) { p <- 1 - pmax(pd, 1 - pd) if (.get_direction(direction) == 0) { p <- 2 * p } p } #' @rdname pd_to_p #' @export p_to_pd <- function(p, direction = "two-sided", ...) { if (.get_direction(direction) == 0) { p <- p / 2 } (1 - p) } #' @rdname pd_to_p #' @export convert_p_to_pd <- p_to_pd #' @rdname pd_to_p #' @export convert_pd_to_p <- pd_to_p bayestestR/R/sexit.R0000644000175000017500000003337014101116425014212 0ustar nileshnilesh#' Sequential Effect eXistence and sIgnificance Testing (SEXIT) #' #' The SEXIT is a new framework to describe Bayesian effects, guiding which #' indices to use. Accordingly, the `sexit()` function returns the minimal (and #' optimal) required information to describe models' parameters under a Bayesian #' framework. It includes the following indices: #' \itemize{ #' \item{Centrality: the median of the posterior distribution. In #' probabilistic terms, there is `50%` of probability that the effect is higher #' and lower. See [`point_estimate()`][point_estimate].} #' \item{Uncertainty: the `95%` Highest Density Interval (HDI). In #' probabilistic terms, there is `95%` of probability that the effect is #' within this confidence interval. See [`ci()`][ci].} #' \item{Existence: The probability of direction allows to quantify the #' certainty by which an effect is positive or negative. It is a critical #' index to show that an effect of some manipulation is not harmful (for #' instance in clinical studies) or to assess the direction of a link. See #' [`p_direction()`][p_direction].} #' \item{Significance: Once existence is demonstrated with high certainty, we #' can assess whether the effect is of sufficient size to be considered as #' significant (i.e., not negligible). This is a useful index to determine #' which effects are actually important and worthy of discussion in a given #' process. See [`p_significance()`][p_significance].} #' \item{Size: Finally, this index gives an idea about the strength of an #' effect. However, beware, as studies have shown that a big effect size can #' be also suggestive of low statistical power (see details section).} #' } #' #' @inheritParams p_direction #' @inheritParams hdi #' @param significant,large The threshold values to use for significant and #' large probabilities. If left to 'default', will be selected through #' [`sexit_thresholds()`][sexit_thresholds]. See the details section below. #' #' @details #' #' \subsection{Rationale}{ #' The assessment of "significance" (in its broadest meaning) is a pervasive #' issue in science, and its historical index, the p-value, has been strongly #' criticized and deemed to have played an important role in the replicability #' crisis. In reaction, more and more scientists have tuned to Bayesian methods, #' offering an alternative set of tools to answer their questions. However, the #' Bayesian framework offers a wide variety of possible indices related to #' "significance", and the debate has been raging about which index is the best, #' and which one to report. #' #' This situation can lead to the mindless reporting of all possible indices #' (with the hopes that with that the reader will be satisfied), but often #' without having the writer understanding and interpreting them. It is indeed #' complicated to juggle between many indices with complicated definitions and #' subtle differences. #' #' SEXIT aims at offering a practical framework for Bayesian effects reporting, #' in which the focus is put on intuitiveness, explicitness and usefulness of #' the indices' interpretation. To that end, we suggest a system of description #' of parameters that would be intuitive, easy to learn and apply, #' mathematically accurate and useful for taking decision. #' #' Once the thresholds for significance (i.e., the ROPE) and the one for a #' "large" effect are explicitly defined, the SEXIT framework does not make any #' interpretation, i.e., it does not label the effects, but just sequentially #' gives 3 probabilities (of direction, of significance and of being large, #' respectively) as-is on top of the characteristics of the posterior (using the #' median and HDI for centrality and uncertainty description). Thus, it provides #' a lot of information about the posterior distribution (through the mass of #' different 'sections' of the posterior) in a clear and meaningful way. #' } #' #' \subsection{Threshold selection}{ #' One of the most important thing about the SEXIT framework is that it relies #' on two "arbitrary" thresholds (i.e., that have no absolute meaning). They #' are the ones related to effect size (an inherently subjective notion), #' namely the thresholds for significant and large effects. They are set, by #' default, to `0.05` and `0.3` of the standard deviation of the outcome #' variable (tiny and large effect sizes for correlations according to Funder #' \& Ozer, 2019). However, these defaults were chosen by lack of a better #' option, and might not be adapted to your case. Thus, they are to be handled #' with care, and the chosen thresholds should always be explicitly reported #' and justified. #' \itemize{ #' \item For **linear models (lm)**, this can be generalised to \ifelse{html}{\out{0.05 * SDy}}{\eqn{[0.05*SD_{y}]}} and \ifelse{html}{\out{0.3 * SDy}}{\eqn{[0.3*SD_{y}]}} for significant and large effects, respectively. #' \item For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting a threshold of `0.09` and `0.54`. #' \item For other models with **binary outcome**, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. #' \item For models from **count data**, the residual variance is used. This is a rather experimental threshold and is probably often similar to `0.05` and `0.3`, but should be used with care! #' \item For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). #' \item For **correlations**,`0.05` and `0.3` are used. #' \item For all other models, `0.05` and `0.3` are used, but it is strongly advised to specify it manually. #' } #' } #' \subsection{Examples}{ #' The three values for existence, significance and size provide a useful description of the posterior distribution of the effects. Some possible scenarios include: #' \itemize{ #' \item{The probability of existence is low, but the probability of being large is high: it suggests that the posterior is very wide (covering large territories on both side of 0). The statistical power might be too low, which should warrant any confident conclusion.} #' \item{The probability of existence and significance is high, but the probability of being large is very small: it suggests that the effect is, with high confidence, not large (the posterior is mostly contained between the significance and the large thresholds).} #' \item{The 3 indices are very low: this suggests that the effect is null with high confidence (the posterior is closely centred around 0).}}} #' #' @return A dataframe and text as attribute. #' #' @references \itemize{ #' \item{Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. \doi{10.21105/joss.01541}} #' \item{Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767}} #' } #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' s <- sexit(rnorm(1000, -1, 1)) #' s #' print(s, summary = TRUE) #' #' s <- sexit(iris) #' s #' print(s, summary = TRUE) #' #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt * cyl, #' data = mtcars, #' iter = 400, refresh = 0 #' ) #' s <- sexit(model) #' s #' print(s, summary = TRUE) #' } #' } #' @export sexit <- function(x, significant = "default", large = "default", ci = 0.95, ...) { thresholds <- .sexit_preprocess(x, significant, large, ...) significant <- thresholds$significant large <- thresholds$large thresholds_text <- thresholds$text # Description centrality <- point_estimate(x, "median") centrality$Effects <- centrality$Component <- NULL centrality_text <- paste0("Median = ", insight::format_value(centrality$Median)) direction <- ifelse(centrality$Median < 0, "negative", "positive") uncertainty <- ci(x, ci = ci, method = "HDI", ...)[c("CI", "CI_low", "CI_high")] uncertainty_text <- insight::format_ci(uncertainty$CI_low, uncertainty$CI_high, uncertainty$CI) # Indices existence_rez <- as.numeric(p_direction(x, ...)) existence_value <- insight::format_value(existence_rez, as_percent = TRUE) existence_threshold <- ifelse(direction == "negative", "< 0", "> 0") sig_rez <- as.numeric(p_significance(x, threshold = significant, ...)) sig_value <- insight::format_value(sig_rez, as_percent = TRUE) sig_threshold <- ifelse(direction == "negative", -1 * significant, significant) sig_threshold <- paste0(ifelse(direction == "negative", "< ", "> "), insight::format_value(sig_threshold)) large_rez <- as.numeric(p_significance(x, threshold = large, ...)) large_value <- insight::format_value(large_rez, as_percent = TRUE) large_threshold <- ifelse(direction == "negative", -1 * large, large) large_threshold <- paste0(ifelse(direction == "negative", "< ", "> "), insight::format_value(large_threshold)) if ("Parameter" %in% names(centrality)) { parameters <- centrality$Parameter } else { parameters <- "The effect" } text_full <- paste0( parameters, " (", centrality_text, ", ", uncertainty_text, ") has a ", existence_value, " probability of being ", direction, " (", existence_threshold, "), ", sig_value, " of being significant (", sig_threshold, "), and ", large_value, " of being large (", large_threshold, ")" ) text_short <- paste0( parameters, " (", centrality_text, ", ", uncertainty_text, ") has ", existence_value, ", ", sig_value, " and ", large_value, " probability of being ", direction, " (", existence_threshold, "), significant (", sig_threshold, ") and large (", large_threshold, ")" ) out <- cbind( centrality, as.data.frame(uncertainty), data.frame(Direction = existence_rez), data.frame(Significance = sig_rez), data.frame(Large = large_rez) ) # Prepare output attr(out, "sexit_info") <- "Following the Sequential Effect eXistence and sIgnificance Testing (SEXIT) framework, we report the median of the posterior distribution and its 95% CI (Highest Density Interval), along the probability of direction (pd), the probability of significance and the probability of being large." attr(out, "sexit_ci_method") <- "HDI" attr(out, "sexit_significance") <- significant attr(out, "sexit_large") <- large attr(out, "sexit_textlong") <- text_full attr(out, "sexit_textshort") <- text_short attr(out, "sexit_thresholds") <- thresholds_text pretty_cols <- c( "Median", paste0(insight::format_value(ci * 100, protect_integers = TRUE), "% CI"), "Direction", paste0("Significance (> |", insight::format_value(significant), "|)"), paste0("Large (> |", insight::format_value(large), "|)") ) if ("Parameter" %in% names(out)) pretty_cols <- c("Parameter", pretty_cols) attr(out, "pretty_cols") <- pretty_cols attr(out, "data") <- x class(out) <- unique(c("sexit", "see_sexit", class(out))) out } #' @keywords internal .sexit_preprocess <- function(x, significant = "default", large = "default", ...) { thresholds <- sexit_thresholds(x) if (significant == "default") significant <- thresholds[1] if (large == "default") large <- thresholds[2] suppressWarnings(resp <- tryCatch(insight::get_response(x), error = function(e) NULL)) suppressWarnings(info <- tryCatch(insight::model_info(x), error = function(e) NULL)) if (!is.null(resp) && !is.null(info) && info$is_linear) { sd1 <- significant / stats::sd(resp, na.rm = TRUE) sd2 <- large / stats::sd(resp, na.rm = TRUE) text_sd <- paste0( " (corresponding respectively to ", insight::format_value(sd1), " and ", insight::format_value(sd2), " of the outcome's SD)" ) } else { text_sd <- "" } thresholds <- paste0( "The thresholds beyond which the effect is considered ", "as significant (i.e., non-negligible) and large are |", insight::format_value(significant), "| and |", insight::format_value(large), "|", text_sd, "." ) list(significant = significant, large = large, text = thresholds) } #' @export print.sexit <- function(x, summary = FALSE, digits = 2, ...) { orig_x <- x # Long if (isFALSE(summary)) { insight::print_color(paste0("# ", attributes(x)$sexit_info, " ", attributes(x)$sexit_thresholds, "\n\n"), "blue") text <- attributes(x)$sexit_textlong if (length(text) > 1) text <- paste0(paste0("- ", text), collapse = "\n") insight::print_color(text, "yellow") cat("\n\n") df <- data.frame(Median = x$Median, CI = insight::format_ci(x$CI_low, x$CI_high, NULL)) if ("Parameter" %in% names(x)) { df <- cbind(data.frame(Parameter = x$Parameter), df, x[c("Direction", "Significance", "Large")]) } else { df <- cbind(df, x[c("Direction", "Significance", "Large")]) } names(df) <- attributes(x)$pretty_cols print_data_frame(df, digits = digits, ...) # Short } else { insight::print_color(paste0("# ", attributes(x)$sexit_thresholds, "\n\n"), "blue") text <- attributes(x)$sexit_textshort if (length(text) > 1) text <- paste0(paste0("- ", text), collapse = "\n") cat(text) } invisible(orig_x) } bayestestR/R/distribution.R0000644000175000017500000001552114101116425015573 0ustar nileshnilesh#' Empirical Distributions #' #' Generate a sequence of n-quantiles, i.e., a sample of size `n` with a #' near-perfect distribution. #' #' @param type Can be any of the names from base R's #' [Distributions][stats::Distributions], like `"cauchy"`, `"pois"` or #' `"beta"`. #' @param random Generate near-perfect or random (simple wrappers for the base R #' `r*` functions) distributions. #' @param ... Arguments passed to or from other methods. #' @inheritParams tweedie::rtweedie #' #' @examples #' library(bayestestR) #' x <- distribution(n = 10) #' plot(density(x)) #' #' x <- distribution(type = "gamma", n = 100, shape = 2) #' plot(density(x)) #' @export distribution <- function(type = "normal", ...) { basr_r_distributions <- c( "beta", "binom", "binomial", "cauchy", "chisq", "chisquared", "exp", "f", "gamma", "geom", "hyper", "lnorm", "multinom", "nbinom", "normal", "gaussian", "pois", "poisson", "student", "t", "student_t", "unif", "uniform", "weibull" ) switch(match.arg(arg = type, choices = basr_r_distributions), "beta" = distribution_beta(...), "binom" = , "binomial" = distribution_binomial(...), "cauchy" = distribution_cauchy(...), "chisq" = , "chisquared" = distribution_chisquared(...), "gamma" = distribution_gamma(...), "gaussian" = , "normal" = distribution_normal(...), "nbinom" = distribution_nbinom(...), "poisson" = distribution_poisson(...), "t" = , "student" = , "student_t" = distribution_student(...), "uniform" = distribution_uniform(...), distribution_custom(type = type, ...) ) } #' @rdname distribution #' @inheritParams distribution #' @export distribution_custom <- function(n, type = "norm", ..., random = FALSE) { if (random) { f <- match.fun(paste0("r", type)) f(n, ...) } else { f <- match.fun(paste0("q", type)) f(seq(1 / n, 1 - 1 / n, length.out = n), ...) } } #' @rdname distribution #' @inheritParams stats::rbeta #' @export distribution_beta <- function(n, shape1, shape2, ncp = 0, random = FALSE, ...) { if (random) { stats::rbeta(n, shape1, shape2, ncp = ncp) } else { stats::qbeta(seq(1 / n, 1 - 1 / n, length.out = n), shape1, shape2, ncp = ncp, ...) } } #' @rdname distribution #' @inheritParams stats::rbinom #' @export distribution_binomial <- function(n, size = 1, prob = 0.5, random = FALSE, ...) { if (random) { stats::rbinom(n, size, prob) } else { stats::qbinom(seq(1 / n, 1 - 1 / n, length.out = n), size, prob, ...) } } #' @rdname distribution #' @export distribution_binom <- distribution_binomial #' @rdname distribution #' @inheritParams stats::rcauchy #' @export distribution_cauchy <- function(n, location = 0, scale = 1, random = FALSE, ...) { if (random) { stats::rcauchy(n, location, scale) } else { stats::qcauchy(seq(1 / n, 1 - 1 / n, length.out = n), location, scale, ...) } } #' @rdname distribution #' @inheritParams stats::rchisq #' @export distribution_chisquared <- function(n, df, ncp = 0, random = FALSE, ...) { if (random) { stats::rchisq(n, df, ncp) } else { stats::qchisq(seq(1 / n, 1 - 1 / n, length.out = n), df, ncp, ...) } } #' @rdname distribution #' @export distribution_chisq <- distribution_chisquared #' @rdname distribution #' @inheritParams stats::rgamma #' @export distribution_gamma <- function(n, shape, scale = 1, random = FALSE, ...) { if (random) { stats::rgamma(n = n, shape = shape, scale = scale) } else { stats::qgamma(p = seq(1 / n, 1 - 1 / n, length.out = n), shape = shape, scale = scale) } } #' @rdname distribution #' @inheritParams stats::rnorm #' @export distribution_mixture_normal <- function(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) { n <- round(n / length(mean)) sd <- c(sd) if (length(sd) != length(mean)) { sd <- rep(sd, length.out = length(mean)) } x <- c() for (i in 1:length(mean)) { x <- c(x, distribution_normal(n = n, mean = mean[i], sd = sd[i], random = random)) } x } #' @rdname distribution #' @inheritParams stats::rnorm #' @export distribution_normal <- function(n, mean = 0, sd = 1, random = FALSE, ...) { if (random) { stats::rnorm(n, mean, sd) } else { stats::qnorm(seq(1 / n, 1 - 1 / n, length.out = n), mean, sd, ...) } } #' @rdname distribution #' @export distribution_gaussian <- distribution_normal #' @rdname distribution #' @inheritParams stats::rnbinom #' @param phi Corresponding to `glmmTMB`'s implementation of nbinom #' distribution, where `size=mu/phi`. #' @export distribution_nbinom <- function(n, size, prob, mu, phi, random = FALSE, ...) { if (missing(size)) { size <- mu / phi } if (random) { stats::rnbinom(n, size, prob, mu) } else { stats::qnbinom(seq(1 / n, 1 - 1 / n, length.out = n), size, prob, mu, ...) } } #' @rdname distribution #' @inheritParams stats::rpois #' @export distribution_poisson <- function(n, lambda = 1, random = FALSE, ...) { if (random) { stats::rpois(n, lambda) } else { stats::qpois(seq(1 / n, 1 - 1 / n, length.out = n), lambda, ...) } } #' @rdname distribution #' @inheritParams stats::rt #' @export distribution_student <- function(n, df, ncp, random = FALSE, ...) { if (random) { stats::rt(n, df, ncp) } else { stats::qt(seq(1 / n, 1 - 1 / n, length.out = n), df, ncp, ...) } } #' @rdname distribution #' @export distribution_t <- distribution_student #' @rdname distribution #' @export distribution_student_t <- distribution_student #' @rdname distribution #' @inheritParams tweedie::rtweedie #' @export distribution_tweedie <- function(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) { insight::check_if_installed("tweedie") if (random) { tweedie::rtweedie( n = n, xi = xi, mu = mu, phi = phi, power = power ) } else { tweedie::qtweedie( p = seq(1 / n, 1 - 1 / n, length.out = n), xi = xi, mu = mu, phi = phi, power = power ) } } #' @rdname distribution #' @inheritParams stats::runif #' @export distribution_uniform <- function(n, min = 0, max = 1, random = FALSE, ...) { if (random) { stats::runif(n, min, max) } else { stats::qunif(seq(1 / n, 1 - 1 / n, length.out = n), min, max, ...) } } #' @rdname distribution #' @inheritParams stats::rnorm #' @export rnorm_perfect <- function(n, mean = 0, sd = 1) { .Deprecated("distribution_normal") stats::qnorm(seq(1 / n, 1 - 1 / n, length.out = n), mean, sd) } bayestestR/R/bayesfactor.R0000644000175000017500000000651614101116425015362 0ustar nileshnilesh#' Bayes Factors (BF) #' #' This function compte the Bayes factors (BFs) that are appropriate to the #' input. For vectors or single models, it will compute [`BFs for single #' parameters()`][bayesfactor_parameters], or is `hypothesis` is specified, #' [`BFs for restricted models()`][bayesfactor_restricted]. For multiple models, #' it will return the BF corresponding to [`comparison between #' models()`][bayesfactor_models] and if a model comparison is passed, it will #' compute the [`inclusion BF()`][bayesfactor_inclusion]. #' \cr\cr #' For a complete overview of these functions, read the [Bayes factor vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @param ... A numeric vector, model object(s), or the output from #' `bayesfactor_models`. #' @inheritParams bayesfactor_parameters #' @inheritParams bayesfactor_restricted #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_inclusion #' #' @return Some type of Bayes factor, depending on the input. See [bayesfactor_parameters()], [bayesfactor_models()] or [bayesfactor_inclusion()] #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(bayestestR) #' #' if (require("logspline")) { #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' #' bayesfactor(posterior, prior = prior) #' } #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm")) { #' model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' bayesfactor(model) #' } #' } #' #' if (require("logspline")) { #' # Frequentist models #' # --------------- #' m0 <- lm(extra ~ 1, data = sleep) #' m1 <- lm(extra ~ group, data = sleep) #' m2 <- lm(extra ~ group + ID, data = sleep) #' #' comparison <- bayesfactor(m0, m1, m2) #' comparison #' #' bayesfactor(comparison) #' } #' @export bayesfactor <- function(..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = c("fixed", "random", "all"), verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL) { mods <- list(...) effects <- match.arg(effects) if (length(mods) > 1) { bayesfactor_models(..., denominator = denominator) } else if (inherits(mods[[1]], "bayesfactor_models")) { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else if (inherits(mods[[1]], "BFBayesFactor")) { if (class(mods[[1]]@numerator[[1]]) == "BFlinearModel") { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else { bayesfactor_models(...) } } else if (!is.null(hypothesis)) { bayesfactor_restricted(..., prior = prior, verbose = verbose, effects = effects ) } else { bayesfactor_parameters( ..., prior = prior, direction = direction, null = null, effects = effects, verbose = verbose ) } } bayestestR/R/bci.R0000644000175000017500000001552614135670530013627 0ustar nileshnilesh#' Bias Corrected and Accelerated Interval (BCa) #' #' Compute the **Bias Corrected and Accelerated Interval (BCa)** of posterior #' distributions. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' @inherit hdi seealso #' @family ci #' #' @references #' DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. #' Statistical Science. 11(3): 189–212. 10.1214/ss/1032280214 #' #' @examples #' posterior <- rnorm(1000) #' bci(posterior) #' bci(posterior, ci = c(.80, .89, .95)) #' @export bci <- function(x, ...) { UseMethod("bci") } #' @rdname bci #' @export bcai <- bci #' @rdname bci #' @export bci.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .bci(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_eti", "see_eti", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname bci #' @export bci.data.frame <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "bci") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname bci #' @export bci.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export bci.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export bci.bamlss <- function(x, ci = 0.95, component = c("all", "conditional", "location"), verbose = TRUE, ...) { component <- match.arg(component) d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export bci.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export bci.bayesQR <- bci.bcplm #' @export bci.blrm <- bci.bcplm #' @export bci.mcmc.list <- bci.bcplm #' @export bci.BGGM <- bci.bcplm #' @rdname bci #' @export bci.sim.merMod <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) dat <- .compute_interval_simMerMod( x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "bci" ) out <- dat$result attr(out, "data") <- dat$data out } #' @rdname bci #' @export bci.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim( x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "bci" ) out <- dat$result attr(out, "data") <- dat$data out } #' @rdname bci #' @export bci.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- eti(xdf, ci = ci, verbose = verbose, ...) attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @export bci.emm_list <- bci.emmGrid #' @rdname bci #' @export bci.stanreg <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( eti( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), insight::clean_parameters(x), inherits(x, "stanmvreg") ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export bci.stanfit <- bci.stanreg #' @export bci.blavaan <- bci.stanreg #' @rdname bci #' @export bci.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( eti(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname bci #' @export bci.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- eti(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export bci.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { out <- hdi(as.data.frame(t(attributes(x)$iterations)), ...) } else { stop("No iterations present in the output.") } attr(out, "object_name") <- .safe_deparse(substitute(x)) out } # Helper ------------------------------------------------------------------ .bci <- function(x, ci, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } low <- (1 - ci) / 2 high <- 1 - low sims <- length(x) z.inv <- length(x[x < mean(x, na.rm = TRUE)]) / sims z <- stats::qnorm(z.inv) U <- (sims - 1) * (mean(x, na.rm = TRUE) - x) top <- sum(U^3) under <- 6 * (sum(U^2))^1.5 a <- top / under lower.inv <- stats::pnorm(z + (z + stats::qnorm(low)) / (1 - a * (z + stats::qnorm(low)))) lower <- stats::quantile(x, lower.inv, names = FALSE) upper.inv <- stats::pnorm(z + (z + stats::qnorm(high)) / (1 - a * (z + stats::qnorm(high)))) upper <- stats::quantile(x, upper.inv, names = FALSE) data.frame( "CI" = ci, "CI_low" = lower, "CI_high" = upper ) } bayestestR/R/ci.R0000644000175000017500000001626214135670544013470 0ustar nileshnilesh#' Confidence/Credible/Compatibility Interval (CI) #' #' Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals (SI) for Bayesian and frequentist models. The Documentation is accessible for: #' #' \itemize{ #' \item [Bayesian models](https://easystats.github.io/bayestestR/articles/credible_interval.html) #' \item [Frequentist models](https://easystats.github.io/parameters/reference/ci.default.html) #' } #' #' @param x A `stanreg` or `brmsfit` model, or a vector representing a posterior distribution. #' @param method Can be ['ETI'][eti] (default), ['HDI'][hdi], ['BCI'][bci] or ['SI'][si]. #' @param ci Value or vector of probability of the CI (between 0 and 1) #' to be estimated. Default to `.95` (`95%`). #' @inheritParams hdi #' @inheritParams si #' @inherit hdi seealso #' @family ci #' #' @return A data frame with following columns: #' \itemize{ #' \item `Parameter` The model parameter(s), if `x` is a model-object. If `x` is a vector, this column is missing. #' \item `CI` The probability of the credible interval. #' \item `CI_low`, `CI_high` The lower and upper credible interval limits for the parameters. #' } #' #' @note When it comes to interpretation, we recommend thinking of the CI in terms of #' an "uncertainty" or "compatibility" interval, the latter being defined as #' \dQuote{Given any value in the interval and the background assumptions, #' the data should not seem very surprising} (\cite{Gelman & Greenland 2019}). #' \cr \cr #' There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ 2019;l5381. 10.1136/bmj.l5381 #' #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' ci(posterior, method = "ETI") #' ci(posterior, method = "HDI") #' #' df <- data.frame(replicate(4, rnorm(100))) #' ci(df, method = "ETI", ci = c(.80, .89, .95)) #' ci(df, method = "HDI", ci = c(.80, .89, .95)) #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ci(model, method = "ETI", ci = c(.80, .89)) #' ci(model, method = "HDI", ci = c(.80, .89)) #' ci(model, method = "SI") #' } #' #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' ci(model, method = "ETI") #' ci(model, method = "HDI") #' ci(model, method = "SI") #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' ci(bf, method = "ETI") #' ci(bf, method = "HDI") #' } #' #' if (require("emmeans")) { #' model <- emtrends(model, ~1, "wt") #' ci(model, method = "ETI") #' ci(model, method = "HDI") #' ci(model, method = "SI") #' } #' } #' @export ci <- function(x, ...) { UseMethod("ci") } #' @keywords internal .ci_bayesian <- function(x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ...) { if (tolower(method) %in% c("eti", "equal", "ci", "quantile")) { return(eti(x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else if (tolower(method) %in% c("bci", "bca", "bcai")) { return(bci(x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else if (tolower(method) %in% c("hdi")) { return(hdi(x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else if (tolower(method) %in% c("si")) { return(si(x, BF = BF, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else { stop("`method` should be 'ETI' (for equal-tailed interval),'HDI' (for highest density interval), `BCI` (for bias corrected and accelerated bootstrap intervals) or 'SI' (for support interval).") } } #' @rdname ci #' @export ci.numeric <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @export ci.data.frame <- ci.numeric #' @export ci.emmGrid <- function(x, ci = NULL, ...) { if (!.is_baysian_emmeans(x)) { if (!requireNamespace("parameters")) { stop("'parameters' required for this function to work.") } if (is.null(ci)) ci <- 0.95 return(parameters::ci(model = x, ci = ci, ...)) } if (is.null(ci)) ci <- 0.95 x <- insight::get_parameters(x) ci(x, ci = ci, ...) } #' @export ci.emm_list <- ci.emmGrid #' @rdname ci #' @export ci.sim.merMod <- function(x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, parameters = parameters, verbose = verbose, ...) } #' @rdname ci #' @export ci.sim <- function(x, ci = 0.95, method = "ETI", parameters = NULL, verbose = TRUE, ...) { .ci_bayesian(x, ci = ci, method = method, parameters = parameters, verbose = verbose, ...) } #' @rdname ci #' @export ci.stanreg <- function(x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, component = component, parameters = parameters, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @export ci.brmsfit <- function(x, ci = 0.95, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, component = component, parameters = parameters, verbose = verbose, BF = BF, ...) } #' @export ci.stanfit <- ci.stanreg #' @export ci.blavaan <- ci.stanreg #' @rdname ci #' @export ci.BFBayesFactor <- ci.numeric #' @rdname ci #' @export ci.MCMCglmm <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, ...) { nF <- x$Fixed$nfl ci(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), ci = ci, method = method, verbose = verbose, ...) } #' @export ci.bamlss <- function(x, ci = 0.95, method = "ETI", component = c("all", "conditional", "location"), verbose = TRUE, ...) { component <- match.arg(component) ci(insight::get_parameters(x, component = component), ci = ci, method = method, verbose = verbose, ...) } #' @export ci.bcplm <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, ...) { ci(insight::get_parameters(x), ci = ci, method = method, verbose = verbose, ...) } #' @export ci.blrm <- ci.bcplm #' @export ci.mcmc <- ci.bcplm #' @export ci.mcmc.list <- ci.bcplm #' @export ci.BGGM <- ci.bcplm #' @export ci.get_predicted <- ci.data.frame bayestestR/R/utils.R0000644000175000017500000001507014076521542014226 0ustar nileshnilesh# trim leading / trailing whitespace .trim <- function(x) gsub("^\\s+|\\s+$", "", x) # safe depare, also for very long strings .safe_deparse <- function(string) { paste0(sapply(deparse(string, width.cutoff = 500), .trim, simplify = TRUE), collapse = "") } # has object an element with given name? #' @keywords internal .obj_has_name <- function(x, name) { name %in% names(x) } # remove NULL elements from lists #' @keywords internal .compact_list <- function(x) x[!sapply(x, function(i) length(i) == 0 || is.null(i) || any(i == "NULL", na.rm = TRUE))] # is string empty? #' @keywords internal .is_empty_object <- function(x) { if (is.list(x)) { x <- tryCatch( { .compact_list(x) }, error = function(x) { x } ) } # this is an ugly fix because of ugly tibbles if (inherits(x, c("tbl_df", "tbl"))) x <- as.data.frame(x) x <- suppressWarnings(x[!is.na(x)]) length(x) == 0 || is.null(x) } # select rows where values in "variable" match "value" #' @keywords internal .select_rows <- function(data, variable, value) { data[which(data[[variable]] == value), ] } # remove column #' @keywords internal .remove_column <- function(data, variables) { data[variables] <- NULL data } #' @keywords internal .to_long <- function(x, names_to = "key", values_to = "value", columns = colnames(x)) { if (is.numeric(columns)) columns <- colnames(x)[columns] dat <- stats::reshape( as.data.frame(x), idvar = "id", ids = row.names(x), times = columns, timevar = names_to, v.names = values_to, varying = list(columns), direction = "long" ) if (is.factor(dat[[values_to]])) { dat[[values_to]] <- as.character(dat[[values_to]]) } dat[, 1:(ncol(dat) - 1), drop = FALSE] } #' select numerics columns #' @keywords internal .select_nums <- function(x) { x[unlist(lapply(x, is.numeric))] } ## TODO remove?!? # #' Used in describe_posterior # #' @keywords internal # .reorder_rows <- function(x, out, ci = NULL) { # if (!is.data.frame(out) || nrow(out) == 1) { # return(out) # } # # if (is.null(ci)) { # refdata <- point_estimate(x, centrality = "median", dispersion = FALSE) # order <- refdata$Parameter # out <- out[match(order, out$Parameter), ] # } else { # uncertainty <- ci(x, ci = ci) # order <- paste0(uncertainty$Parameter, uncertainty$CI) # out <- out[match(order, paste0(out$Parameter, out$CI)), ] # } # rownames(out) <- NULL # out # } #' @keywords internal .get_direction <- function(direction) { if (length(direction) > 1) warning("Using first 'direction' value.") if (is.numeric(direction[1])) { return(sign(direction[1])) } Value <- c( "left" = -1, "right" = 1, "two-sided" = 0, "twosided" = 0, "one-sided" = 1, "onesided" = 1, "<" = -1, ">" = 1, "=" = 0, "==" = 0, "-1" = -1, "0" = 0, "1" = 1, "+1" = 1 ) direction <- Value[tolower(direction[1])] if (is.na(direction)) { stop("Unrecognized 'direction' argument.") } direction } #' @keywords internal .prepare_output <- function(temp, cleaned_parameters, is_stan_mv = FALSE, is_brms_mv = FALSE) { if (isTRUE(is_stan_mv)) { temp$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", temp$Parameter) for (i in unique(temp$Response)) { temp$Parameter <- gsub(sprintf("%s|", i), "", temp$Parameter, fixed = TRUE) } merge_by <- c("Parameter", "Effects", "Component", "Response") remove_cols <- c("Group", "Cleaned_Parameter", "Function", ".roworder") } else if (isTRUE(is_brms_mv)) { temp$Response <- gsub("(.*)_(.*)_(.*)", "\\2", temp$Parameter) # temp$Parameter <- gsub("(.*)_(.*)_(.*)", "\\1_\\3", temp$Parameter) merge_by <- c("Parameter", "Effects", "Component", "Response") remove_cols <- c("Group", "Cleaned_Parameter", "Function", ".roworder") } else { merge_by <- c("Parameter", "Effects", "Component") remove_cols <- c("Group", "Cleaned_Parameter", "Response", "Function", ".roworder") } merge_by <- intersect(merge_by, colnames(temp)) temp$.roworder <- 1:nrow(temp) out <- merge(x = temp, y = cleaned_parameters, by = merge_by, all.x = TRUE) # hope this works for stanmvreg... if ((isTRUE(is_stan_mv) || isTRUE(is_brms_mv)) && all(is.na(out$Effects)) && all(is.na(out$Component))) { out$Effects <- cleaned_parameters$Effects[1:nrow(out)] out$Component <- cleaned_parameters$Component[1:nrow(out)] } # this here is required for multiple response models... if (all(is.na(out$Effects)) || all(is.na(out$Component))) { out <- out[!duplicated(out$.roworder), ] } else { out <- out[!is.na(out$Effects) & !is.na(out$Component) & !duplicated(out$.roworder), ] } attr(out, "Cleaned_Parameter") <- out$Cleaned_Parameter[order(out$.roworder)] .remove_column(out[order(out$.roworder), ], remove_cols) } #' @keywords internal .merge_and_sort <- function(x, y, by, all) { if (is.null(ncol(y))) { return(x) } x$.rowid <- 1:nrow(x) x <- merge(x, y, by = by, all = all) .remove_column(x[order(x$.rowid), ], ".rowid") } # returns the row-indices for grouped data frames #' @keywords internal .group_indices <- function(x) { # dplyr < 0.8.0 returns attribute "indices" grps <- attr(x, "groups", exact = TRUE) # dplyr < 0.8.0? if (is.null(grps)) { attr(x, "indices", exact = TRUE) } else { grps[[".rows"]] } } # returns the variables that were used for grouping data frames (dplyr::group_var()) #' @keywords internal .group_vars <- function(x) { # dplyr < 0.8.0 returns attribute "indices" grps <- attr(x, "groups", exact = TRUE) # dplyr < 0.8.0? if (is.null(grps)) { ## TODO fix for dplyr < 0.8 attr(x, "vars", exact = TRUE) } else { setdiff(colnames(grps), ".rows") } } #' @keywords internal .is_baysian_emmeans <- function(x) { if (inherits(x, "emm_list")) { x <- x[[1]] } post.beta <- methods::slot(x, "post.beta") !(all(dim(post.beta) == 1) && is.na(post.beta)) } # safe add cleaned parameter names to a model object .add_clean_parameters_attribute <- function(params, model) { cp <- tryCatch( { insight::clean_parameters(model) }, error = function(e) { NULL } ) attr(params, "clean_parameters") <- cp params } .n_unique <- function(x, na.rm = TRUE) { if (is.null(x)) { return(0) } if (isTRUE(na.rm)) x <- stats::na.omit(x) length(unique(x)) } bayestestR/R/utils_clean_stan_parameters.R0000644000175000017500000000140413470405141020625 0ustar nileshnilesh#' @keywords internal .clean_up_tmp_stanreg <- function(tmp, group, cols, parms) { tmp$Group <- group tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", tmp$Parameter) tmp } #' @keywords internal .clean_up_tmp_brms <- function(tmp, group, component, cols, parms) { tmp$Group <- group tmp$Component <- component tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", tmp$Parameter) tmp } bayestestR/R/simulate_simpson.R0000644000175000017500000000311514101116425016443 0ustar nileshnilesh#' Simpson's paradox dataset simulation #' #' Simpson's paradox, or the Yule-Simpson effect, is a phenomenon in probability #' and statistics, in which a trend appears in several different groups of data #' but disappears or reverses when these groups are combined. #' #' @param n The number of observations for each group to be generated (minimum #' 4). #' @param groups Number of groups (groups can be participants, clusters, #' anything). #' @param difference Difference between groups. #' @param group_prefix The prefix of the group name (e.g., "G_1", "G_2", "G_3", ...). #' @inheritParams simulate_correlation #' #' @return A dataset. #' #' @examples #' data <- simulate_simpson(n = 10, groups = 5, r = 0.5) #' #' if (require("ggplot2")) { #' ggplot(data, aes(x = V1, y = V2)) + #' geom_point(aes(color = Group)) + #' geom_smooth(aes(color = Group), method = "lm") + #' geom_smooth(method = "lm") #' } #' @export simulate_simpson <- function(n = 100, r = 0.5, groups = 3, difference = 1, group_prefix = "G_") { if (n <= 3) { stop("The number of observation `n` should be higher than 3") } data <- data.frame() for (i in 1:groups) { dat <- simulate_correlation(n = n, r = r) dat$V1 <- dat$V1 + difference * i # (i * -sign(r)) dat$V2 <- dat$V2 + difference * (i * -sign(r)) dat$Group <- sprintf(paste0(group_prefix, "%0", nchar(trunc(abs(groups))), "d"), i) data <- rbind(data, dat) } data } bayestestR/R/bic_to_bf.R0000644000175000017500000000151714101116425014762 0ustar nileshnilesh#' Convert BIC indices to Bayes Factors via the BIC-approximation method. #' #' @param bic A vector of BIC values. #' @param denominator The BIC value to use as a denominator (to test against). #' @param log Return the `log(BF)`? #' #' @examples #' bic1 <- BIC(lm(Sepal.Length ~ 1, data = iris)) #' bic2 <- BIC(lm(Sepal.Length ~ Species, data = iris)) #' bic3 <- BIC(lm(Sepal.Length ~ Species + Petal.Length, data = iris)) #' bic4 <- BIC(lm(Sepal.Length ~ Species * Petal.Length, data = iris)) #' #' bic_to_bf(c(bic1, bic2, bic3, bic4), denominator = bic1) #' @return The Bayes Factors corresponding to the BIC values against the denominator. #' #' @export bic_to_bf <- function(bic, denominator, log = FALSE) { delta <- (bic - denominator) / (-2) if (log) { return(delta) } else { return(exp(delta)) } } bayestestR/R/model_to_priors.R0000644000175000017500000000317014106656140016261 0ustar nileshnilesh#' Convert model's posteriors to priors (EXPERIMENTAL) #' #' Convert model's posteriors to (normal) priors. #' #' @param model A Bayesian model. #' @param scale_multiply The SD of the posterior will be multiplied by this amount before being set as a prior to avoid overly narrow priors. #' @param ... Other arguments for \code{insight::get_prior()} or \code{\link{describe_posterior}}. #' #' @examples #' \dontrun{ #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' formula <- brms::brmsformula(mpg ~ wt + cyl, center = FALSE) #' #' model <- brms::brm(formula, data = mtcars, refresh = 0) #' priors <- model_to_priors(model) #' priors <- brms::validate_prior(priors, formula, data = mtcars) #' priors #' #' model2 <- brms::brm(formula, data = mtcars, prior = priors, refresh = 0) #' } #' } #' @export model_to_priors <- function(model, scale_multiply = 3, ...) { UseMethod("model_to_priors") } #' @export model_to_priors.brmsfit <- function(model, scale_multiply = 3, ...) { params <- describe_posterior(model, centrality = "mean", dispersion = TRUE, ci = NULL, test = NULL, ...) priors_params <- attributes(insight::get_priors(model, ...))$priors priors <- brms::prior_summary(model) for(p in priors_params$Parameter) { if(p %in% params$Parameter) { subset <- params[params$Parameter == p, ] priors$prior[priors_params$Parameter == p] <- paste0( "normal(", insight::format_value(subset$Mean), ", ", insight::format_value(subset$SD * scale_multiply), ")") } } priors } bayestestR/R/p_significance.R0000644000175000017500000002157314101116425016021 0ustar nileshnilesh#' Practical Significance (ps) #' #' Compute the probability of **Practical Significance** (***ps***), which can be conceptualized as a unidirectional equivalence test. It returns the probability that effect is above a given threshold corresponding to a negligible effect in the median's direction. Mathematically, it is defined as the proportion of the posterior distribution of the median sign above the threshold. #' #' @inheritParams rope #' @param threshold The threshold value that separates significant from negligible effect. If `"default"`, the range is set to `0.1` if input is a vector, and based on [`rope_range()`][rope_range] if a Bayesian model is provided. #' #' @return Values between 0 and 1 corresponding to the probability of practical significance (ps). #' #' @details `p_significance()` returns the proportion of a probability #' distribution (`x`) that is outside a certain range (the negligible #' effect, or ROPE, see argument `threshold`). If there are values of the #' distribution both below and above the ROPE, `p_significance()` returns #' the higher probability of a value being outside the ROPE. Typically, this #' value should be larger than 0.5 to indicate practical significance. However, #' if the range of the negligible effect is rather large compared to the #' range of the probability distribution `x`, `p_significance()` #' will be less than 0.5, which indicates no clear practical significance. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_significance(posterior) #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_significance(df) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_significance(model) #' } #' } #' @export p_significance <- function(x, ...) { UseMethod("p_significance") } #' @rdname p_significance #' @export p_significance.numeric <- function(x, threshold = "default", ...) { threshold <- .select_threshold_ps(threshold = threshold) psig <- max( c( length(x[x > abs(threshold)]) / length(x), # ps positive length(x[x < -abs(threshold)]) / length(x) # ps negative ) ) attr(psig, "threshold") <- threshold attr(psig, "data") <- x class(psig) <- unique(c("p_significance", "see_p_significance", class(psig))) psig } #' @export p_significance.data.frame <- function(x, threshold = "default", ...) { obj_name <- .safe_deparse(substitute(x)) threshold <- .select_threshold_ps(threshold = threshold) x <- .select_nums(x) if (ncol(x) == 1) { ps <- p_significance(x[, 1], threshold = threshold, ...) } else { ps <- sapply(x, p_significance, threshold = threshold, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "ps" = as.numeric(ps), row.names = NULL, stringsAsFactors = FALSE ) attr(out, "threshold") <- threshold attr(out, "object_name") <- obj_name class(out) <- unique(c("p_significance", "see_p_significance", class(out))) out } #' @export p_significance.parameters_simulate_model <- function(x, threshold = "default", ...) { obj_name <- attr(x, "object_name") if (!is.null(obj_name)) { # first try, parent frame model <- tryCatch( { get(obj_name, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(model)) { # second try, global env model <- tryCatch( { get(obj_name, envir = globalenv()) }, error = function(e) { NULL } ) } } threshold <- .select_threshold_ps(model = model, threshold = threshold) out <- p_significance.data.frame(x, threshold = threshold) attr(out, "object_name") <- obj_name out } #' @export p_significance.MCMCglmm <- function(x, threshold = "default", ...) { nF <- x$Fixed$nfl out <- p_significance(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_significance.BFBayesFactor <- function(x, threshold = "default", ...) { out <- p_significance(insight::get_parameters(x), threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_significance.mcmc <- function(x, threshold = "default", ...) { p_significance(as.data.frame(x), threshold = threshold, ...) } #' @export p_significance.bamlss <- function(x, threshold = "default", component = c("all", "conditional", "location"), ...) { out <- p_significance(insight::get_parameters(x, component = component), threshold = threshold, ...) out <- .add_clean_parameters_attribute(out, x) out } #' @export p_significance.bcplm <- function(x, threshold = "default", ...) { p_significance(insight::get_parameters(x), threshold = threshold, ...) } #' @export p_significance.mcmc.list <- p_significance.bcplm #' @export p_significance.bayesQR <- p_significance.bcplm #' @export p_significance.blrm <- p_significance.bcplm #' @export p_significance.BGGM <- p_significance.bcplm #' @rdname p_significance #' @export p_significance.emmGrid <- function(x, threshold = "default", ...) { xdf <- insight::get_parameters(x) out <- p_significance(xdf, threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_significance.emm_list <- p_significance.emmGrid #' @rdname p_significance #' @export p_significance.stanreg <- function(x, threshold = "default", effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) threshold <- .select_threshold_ps(model = x, threshold = threshold) data <- p_significance( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), threshold = threshold ) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output(data, cleaned_parameters, inherits(x, "stanmvreg")) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "threshold") <- threshold attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(data) out } #' @export p_significance.stanfit <- p_significance.stanreg #' @export p_significance.blavaan <- p_significance.stanreg #' @rdname p_significance #' @export p_significance.brmsfit <- function(x, threshold = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) threshold <- .select_threshold_ps(model = x, threshold = threshold) data <- p_significance( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), threshold = threshold ) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output(data, cleaned_parameters) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "threshold") <- threshold attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(data) out } #' @rdname as.numeric.p_direction #' @export as.numeric.p_significance <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$ps))) } else { return(as.vector(x)) } } #' @method as.double p_significance #' @export as.double.p_significance <- as.numeric.p_significance #' @keywords internal .select_threshold_ps <- function(model = NULL, threshold = "default") { # If a range is passed if (length(threshold) > 1) { if (length(unique(abs(threshold))) == 1) { # If symmetric range threshold <- abs(threshold[2]) } else { stop("`threshold` should be 'default' or a numeric value (e.g., 0.1).") } } # If default if (all(threshold == "default")) { if (!is.null(model)) { threshold <- rope_range(model)[2] } else { threshold <- 0.1 } } else if (!all(is.numeric(threshold))) { stop("`threshold` should be 'default' or a numeric value (e.g., 0.1).") } threshold } bayestestR/R/utils_print_data_frame.R0000644000175000017500000000547714023526535017616 0ustar nileshnileshprint_data_frame <- function(x, digits) { out <- list(x) names(out) <- "fixed" if (all(c("Effects", "Component") %in% colnames(x))) { x$split <- sprintf("%s_%s", x$Effects, x$Component) } else if ("Effects" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Effects")] <- "split" } else if ("Component" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Component")] <- "split" } if ("split" %in% colnames(x)) { if (anyNA(x$split)) { x$split[is.na(x$split)] <- "{other}" } out <- lapply(split(x, f = x$split), function(i) { .remove_column(i, c("split", "Component", "Effects")) }) } for (i in names(out)) { header <- switch(i, "conditional" = , "fixed_conditional" = , "fixed" = "# Fixed Effects (Conditional Model)", "fixed_sigma" = "# Sigma (fixed effects)", "sigma" = "# Sigma (fixed effects)", "zi" = , "zero_inflated" = , "fixed_zero_inflated" = , "fixed_zi" = "# Fixed Effects (Zero-Inflated Model)", "random" = , "random_conditional" = "# Random Effects (Conditional Model)", "random_zero_inflated" = , "random_zi" = "# Random Effects (Zero-Inflated Model)", "smooth_sd" = , "fixed_smooth_sd" = "# Smooth Terms", # blavaan "latent" = "# Latent Loading", "residual" = "# Residual Variance", "intercept" = "# Intercept", "regression" = "# Regression", # Default paste0("# ", i) ) if ("Parameter" %in% colnames(out[[i]])) { # clean parameters names out[[i]]$Parameter <- gsub("(b_|bs_|bsp_|bcs_)(?!zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) out[[i]]$Parameter <- gsub("(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) # clean random effect parameters names out[[i]]$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[\\(Intercept\\) (.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", out[[i]]$Parameter) # clean smooth terms out[[i]]$Parameter <- gsub("^smooth_sd\\[(.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("^sds_", "\\1", out[[i]]$Parameter) # SD out[[i]]$Parameter <- gsub("(.*)(__Intercept|__zi_Intercept)(.*)", "\\1 (Intercept)\\3", gsub("^sd_(.*)", "SD \\1", out[[i]]$Parameter)) # remove ".1" etc. suffix out[[i]]$Parameter <- gsub("(.*)(\\.)(\\d)$", "\\1 \\3", out[[i]]$Parameter) # remove "__zi" out[[i]]$Parameter <- gsub("__zi", "", out[[i]]$Parameter) } if (length(out) > 1) { insight::print_color(header, "blue") cat("\n\n") } cat(insight::export_table(out[[i]], digits = digits)) cat("\n") } } bayestestR/R/format.R0000644000175000017500000002364014076521541014357 0ustar nileshnilesh#' @export format.describe_posterior <- function(x, cp, digits = 2, format = "text", ci_string = "CI", caption = NULL, subtitles = NULL, ...) { # reshape CI if (is.data.frame(x) && .n_unique(x$CI) > 1) { att <- attributes(x) x <- reshape_ci(x) attributes(x) <- utils::modifyList(att, attributes(x)) } # format columns and values of data frame out <- insight::format_table(x, digits = digits, format = format, ...) # different CI-types as column names? if (ci_string != "CI" && any(grepl("CI$", colnames(out)))) { colnames(out) <- gsub("(.*)CI$", paste0("\\1", ci_string), colnames(out)) } # match and split at components if (!is.null(cp) && !all(is.na(match(cp$Parameter, out$Parameter)))) { out <- insight::print_parameters( cp, out, keep_parameter_column = FALSE, remove_empty_column = TRUE, titles = caption, subtitles = subtitles, format = format ) } else { attr(out, "table_caption") <- caption attr(out, "table_subtitle") <- subtitles } out } #' @export format.point_estimate <- format.describe_posterior #' @export format.p_rope <- format.describe_posterior #' @export format.p_direction <- format.describe_posterior #' @export format.p_map <- format.describe_posterior #' @export format.map_estimate <- format.describe_posterior #' @export format.p_significance <- format.describe_posterior #' @export format.bayestestR_hdi <- format.describe_posterior #' @export format.bayestestR_eti <- format.describe_posterior #' @export format.bayestestR_si <- format.describe_posterior # special handling for bayes factors ------------------ #' @export format.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, format = "text", caption = NULL, exact = TRUE, ...) { BFE <- x denominator <- attr(BFE, "denominator") grid.type <- attr(BFE, "BF_method") model_names <- attr(BFE, "model_names") formula_length <- attr(BFE, "text_length") BFE <- as.data.frame(BFE) if (!log) { BFE$log_BF <- exp(BFE$log_BF) } BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) if (any((sgn <- sign(BFE$log_BF) < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE$Model[BFE$Model == "1"] <- "(Intercept only)" # indicate null-model # shorten model formulas? if (!is.null(formula_length) && !is.null(BFE$Model)) { BFE$Model <- insight::format_string(BFE$Model, length = formula_length) } if (isFALSE(show_names) || is.null(model_names) || length(model_names) != nrow(BFE)) { BFE$i <- paste0("[", seq_len(nrow(BFE)), "]") } else { BFE$i <- paste0("[", model_names, "]") } # Denominator denM <- .trim(paste0(BFE$i, " ", BFE$Model)[denominator]) BFE <- BFE[-denominator, ] BFE <- BFE[c("i", "Model", "BF")] colnames(BFE)[1] <- ifelse(identical(format, "html"), "Name", "") # footer if (is.null(format) || format == "text") { footer <- list( "\n* Against Denominator: ", c(denM, "cyan"), "\n* Bayes Factor Type: ", c(grid.type, "cyan"), if (log) c("\n\nBayes Factors are on the log-scale.", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- .compact_list(list( paste0("Against Denominator: ", denM), paste0("Bayes Factor Type: ", grid.type), if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, format = "text", caption = NULL, exact = TRUE, ...) { priorOdds <- attr(x, "priorOdds") matched <- attr(x, "matched") # format table BFE <- as.data.frame(x) if (!log) { BFE$log_BF <- exp(BFE$log_BF) } BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) if (any((sgn <- sign(BFE$log_BF) < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE <- BFE[c("p_prior", "p_posterior", "BF")] BFE <- cbind(rownames(BFE), BFE) colnames(BFE) <- c("", "P(prior)", "P(posterior)", "Inclusion BF") colnames(BFE)[1] <- ifelse(identical(format, "html"), "Parameter", "") # footer if (is.null(format) || format == "text") { footer <- list( c("\n* Compared among: "), c(if (matched) "matched models only" else "all models", "cyan"), c("\n* Priors odds: "), c(if (!is.null(priorOdds)) "custom" else "uniform-equal", "cyan"), if (log) c("\n\nBayes Factors are on the log-scale.", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- .compact_list(list( paste0("Compared among: ", if (matched) "matched models only" else "all models"), paste0("Priors odds: ", if (!is.null(priorOdds)) "custom" else "uniform-equal"), if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, format = "text", caption = NULL, exact = TRUE, ...) { BFE <- as.data.frame(x) # Format if (!log) { BFE$log_BF <- exp(BFE$log_BF) } BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) if (any((sgn <- sign(BFE$log_BF) < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE$log_BF <- NULL colnames(BFE) <- c("Hypothesis", "P(Prior)", "P(Posterior)", "BF") # footer if (is.null(format) || format == "text") { footer <- list( c("\n* Bayes factors for the restricted model vs. the un-restricted model.\n"), if (log) c("\nBayes Factors are on the log-scale.\n", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- .compact_list(list( "Bayes factors for the restricted model vs. the un-restricted model.", if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_parameters <- function(x, cp = NULL, digits = 3, log = FALSE, format = "text", exact = TRUE, ...) { null <- attr(x, "hypothesis") direction <- attr(x, "direction") if (!log) { x$log_BF <- exp(x$log_BF) } x$BF_override <- insight::format_bf(abs(x$log_BF), name = NULL, exact = exact, ...) if (any((sgn <- sign(x$log_BF) < 0)[!is.na(x$log_BF)])) { x$BF_override[sgn] <- paste0("-", x$BF_override[sgn]) } x$log_BF <- NULL # format columns and values of data frame out <- insight::format_table(x, digits = digits, format = format, ...) colnames(out)[colnames(out) == "BF_override"] <- "BF" # table caption caption <- sprintf( "Bayes Factor (%s)", if (length(null) == 1) "Savage-Dickey density ratio" else "Null-Interval" ) if (is.null(format) || format == "text") { caption <- c(caption, "blue") } # format null-value if (length(null) == 1) { null <- insight::format_value(null, digits = digits, protect_integers = TRUE) } else { null <- insight::format_ci(null[1], null[2], ci = NULL, digits = digits) } # footer if (is.null(format) || format == "text") { footer <- list( c("\n* Evidence Against The Null: "), c(null, "cyan"), if (direction) c("\n* Direction: "), if (direction < 0) c("Left-Sided test", "cyan"), if (direction > 0) c("Right-Sided test", "cyan"), if (log) c("\n\nBayes Factors are on the log-scale.\n", "red") ) } else { footer <- .compact_list(list( paste0("Evidence Against The Null: ", null), if (direction) c("Direction: "), if (direction < 0) "Left-Sided test", if (direction > 0) "Right-Sided test", if (log) "Bayes Factors are on the log-scale." )) } # match and split at components if (!is.null(cp) && !all(is.na(match(cp$Parameter, out$Parameter)))) { out <- insight::print_parameters( cp, out, keep_parameter_column = FALSE, remove_empty_column = TRUE, format = format ) attr(out[[1]], "table_caption") <- caption attr(out[[length(out)]], "table_footer") <- footer } else { attr(out, "table_caption") <- caption attr(out, "table_footer") <- footer } out } bayestestR/R/reshape_iterations.R0000644000175000017500000000474514111634401016752 0ustar nileshnilesh#' Reshape estimations with multiple iterations (draws) to long format #' #' Reshape a wide data.frame of iterations (such as posterior draws or bootsrapped samples) as columns to long format. Instead of having all iterations as columns (e.g., `iter_1, iter_2, ...`), will return 3 columns with the `\*_index` (the previous index of the row), the `\*_group` (the iteration number) and the `\*_value` (the value of said iteration). #' #' @param x A data.frame containing posterior draws obtained from `estimate_response` or `estimate_link`. #' @param prefix The prefix of the draws (for instance, `"iter_"` for columns named as `iter_1, iter_2, iter_3`). If more than one are provided, will search for the first one that matches. #' @examples #' \donttest{ #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ am, data = mtcars, refresh = 0) #' draws <- insight::get_predicted(model) #' long_format <- reshape_iterations(draws) #' head(long_format) #' } #' } #' @return Data frame of reshaped draws in long format. #' @export reshape_iterations <- function(x, prefix = c("draw", "iter", "iteration", "sim")) { # Accomodate output from get_predicted if (inherits(x, "get_predicted") && "iterations" %in% names(attributes(x))) { x <- as.data.frame(x) } # Find columns' name prefix <- prefix[min(which(sapply(tolower(prefix), function(prefix) sum(grepl(prefix, tolower(names(x)))) > 1)))] if (is.na(prefix) || is.null(prefix)) { stop("Couldn't find columns corresponding to iterations in your dataframe, please specify the correct prefix.") } # Get column names iter_cols <- tolower(names(x))[grepl(prefix, tolower(names(x)))] # Drop "_" if prefix ends with it newname <- ifelse(endsWith(prefix, "_"), substr(prefix, 1, nchar(prefix) - 1), prefix) # Create Index column index_col <- paste0(newname, "_index") if(index_col %in% names(x)) index_col <- paste0(".", newname, "_index") x[[index_col]] <- 1:nrow(x) # Reshape long <- stats::reshape(x, varying = iter_cols, idvar = index_col, v.names = paste0(newname, "_value"), timevar = paste0(newname, "_group"), direction = "long" ) row.names(long) <- NULL class(long) <- class(long)[which(class(long) == "data.frame"):length(class(long))] long } #' @rdname reshape_iterations #' @export reshape_draws <- function(x, prefix = c("draw", "iter", "iteration", "sim")) { .Deprecated("reshape_iterations") reshape_iterations(x, prefix) } bayestestR/R/hdi.R0000644000175000017500000003235514111634401013624 0ustar nileshnilesh#' Highest Density Interval (HDI) #' #' Compute the **Highest Density Interval (HDI)** of posterior distributions. #' All points within this interval have a higher probability density than points #' outside the interval. The HDI can be used in the context of uncertainty #' characterisation of posterior distributions as **Credible Interval (CI)**. #' #' @param x Vector representing a posterior distribution, or a data frame of such #' vectors. Can also be a Bayesian model (`stanreg`, `brmsfit`, #' `MCMCglmm`, `mcmc` or `bcplm`) or a `BayesFactor` model. #' @param ci Value or vector of probability of the (credible) interval - CI #' (between 0 and 1) to be estimated. Default to `.95` (`95%`). #' @param effects Should results for fixed effects, random effects or both be #' returned? Only applies to mixed models. May be abbreviated. #' @param component Should results for all parameters, parameters for the #' conditional model or the zero-inflated part of the model be returned? May #' be abbreviated. Only applies to \pkg{brms}-models. #' @param parameters Regular expression pattern that describes the parameters #' that should be returned. Meta-parameters (like `lp__` or `prior_`) are #' filtered by default, so only parameters that typically appear in the #' `summary()` are returned. Use `parameters` to select specific parameters #' for the output. #' @param verbose Toggle off warnings. #' @param ... Currently not used. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details Unlike equal-tailed intervals (see `eti()`) that typically exclude `2.5%` #' from each tail of the distribution and always include the median, the HDI is #' *not* equal-tailed and therefore always includes the mode(s) of posterior #' distributions. #' \cr \cr #' The [`95%` or `89%` Credible Intervals (CI)](https://easystats.github.io/bayestestR/articles/credible_interval.html) #' are two reasonable ranges to characterize the uncertainty related to the estimation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html) for a discussion about the differences between these two values). #' \cr #' The `89%` intervals (`ci = 0.89`) are deemed to be more stable than, for #' instance, `95%` intervals (\cite{Kruschke, 2014}). An effective sample size #' of at least 10.000 is recommended if one wants to estimate `95%` intervals #' with high precision (\cite{Kruschke, 2014, p. 183ff}). Unfortunately, the #' default number of posterior samples for most Bayes packages (e.g., `rstanarm` #' or `brms`) is only 4.000 (thus, you might want to increase it when fitting #' your model). Moreover, 89 indicates the arbitrariness of interval limits - #' its only remarkable property is being the highest prime number that does not #' exceed the already unstable `95%` threshold (\cite{McElreath, 2015}). #' \cr #' However, `95%` has some [advantages #' too](https://easystats.github.io/blog/posts/bayestestr_95/). For instance, it #' shares (in the case of a normal posterior distribution) an intuitive #' relationship with the standard deviation and it conveys a more accurate image #' of the (artificial) bounds of the distribution. Also, because it is wider, it #' makes analyses more conservative (i.e., the probability of covering 0 is #' larger for the `95%` CI than for lower ranges such as `89%`), which is a good #' thing in the context of the reproducibility crisis. #' \cr \cr #' A `95%` equal-tailed interval (ETI) has `2.5%` of the distribution on either #' side of its limits. It indicates the 2.5th percentile and the 97.5h #' percentile. In symmetric distributions, the two methods of computing credible #' intervals, the ETI and the [HDI][hdi], return similar results. #' \cr \cr #' This is not the case for skewed distributions. Indeed, it is possible that #' parameter values in the ETI have lower credibility (are less probable) than #' parameter values outside the ETI. This property seems undesirable as a summary #' of the credible values in a distribution. #' \cr \cr #' On the other hand, the ETI range does change when transformations are applied #' to the distribution (for instance, for a log odds scale to probabilities): #' the lower and higher bounds of the transformed distribution will correspond #' to the transformed lower and higher bounds of the original distribution. #' On the contrary, applying transformations to the distribution will change #' the resulting HDI. #' #' @inherit ci return #' #' @family ci #' @seealso Other interval functions, such as [hdi()], [eti()], [bci()], [si()], [cwi()]. #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' hdi(posterior, ci = .89) #' hdi(posterior, ci = c(.80, .90, .95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' hdi(df) #' hdi(df, ci = c(.80, .90, .95)) #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' hdi(model) #' hdi(model, ci = c(.80, .90, .95)) #' #' library(emmeans) #' hdi(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' hdi(model) #' hdi(model, ci = c(.80, .90, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' hdi(bf) #' hdi(bf, ci = c(.80, .90, .95)) #' } #' @author Credits go to [ggdistribute](https://rdrr.io/cran/ggdistribute/src/R/stats.R) and [HDInterval](https://github.com/mikemeredith/HDInterval). #' #' @references \itemize{ #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' \item McElreath, R. (2015). Statistical rethinking: A Bayesian course with examples in R and Stan. Chapman and Hall/CRC. #' } #' #' @export hdi <- function(x, ...) { UseMethod("hdi") } #' @rdname hdi #' @export hdi.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .hdi(x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname hdi #' @export hdi.data.frame <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname hdi #' @export hdi.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "data") <- deparse(substitute(x), width.cutoff = 500) dat } #' @export hdi.bamlss <- function(x, ci = 0.95, component = c("all", "conditional", "location"), verbose = TRUE, ...) { component <- match.arg(component) d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") dat <- .add_clean_parameters_attribute(dat, x) attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export hdi.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export hdi.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export hdi.bayesQR <- hdi.bcplm #' @export hdi.blrm <- hdi.bcplm #' @export hdi.mcmc.list <- hdi.bcplm #' @export hdi.BGGM <- hdi.bcplm #' @rdname hdi #' @export hdi.sim.merMod <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) dat <- .compute_interval_simMerMod( x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "hdi" ) out <- dat$result attr(out, "data") <- dat$data out } #' @rdname hdi #' @export hdi.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim( x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "hdi" ) out <- dat$result attr(out, "data") <- dat$data out } #' @rdname hdi #' @export hdi.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- hdi(xdf, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export hdi.emm_list <- hdi.emmGrid #' @rdname hdi #' @export hdi.stanreg <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( hdi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @export hdi.stanfit <- hdi.stanreg #' @export hdi.blavaan <- hdi.stanreg #' @rdname hdi #' @export hdi.brmsfit <- function(x, ci = 0.95, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( hdi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @rdname hdi #' @export hdi.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- hdi(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export hdi.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { out <- hdi(as.data.frame(t(attributes(x)$iterations)), ...) } else { stop("No iterations present in the output.") } attr(out, "object_name") <- .safe_deparse(substitute(x)) out } # Helper ------------------------------------------------------------------ #' @keywords internal .hdi <- function(x, ci = 0.95, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } x_sorted <- unname(sort.int(x, method = "quick")) # removes NA/NaN, but not Inf window_size <- ceiling(ci * length(x_sorted)) # See https://github.com/easystats/bayestestR/issues/39 if (window_size < 2) { if (verbose) { warning("`ci` is too small or x does not contain enough data points, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } nCIs <- length(x_sorted) - window_size if (nCIs < 1) { if (verbose) { warning("`ci` is too large or x does not contain enough data points, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } ci.width <- sapply(1:nCIs, function(.x) x_sorted[.x + window_size] - x_sorted[.x]) # find minimum of width differences, check for multiple minima min_i <- which(ci.width == min(ci.width)) n_candies <- length(min_i) if (n_candies > 1) { if (any(diff(sort(min_i)) != 1)) { if (verbose) { warning("Identical densities found along different segments of the distribution, choosing rightmost.", call. = FALSE) } min_i <- max(min_i) } else { min_i <- floor(mean(min_i)) } } data.frame( "CI" = ci, "CI_low" = x_sorted[min_i], "CI_high" = x_sorted[min_i + window_size] ) } bayestestR/R/print.bayesfactor_models.R0000644000175000017500000000464314076521542020072 0ustar nileshnilesh # print.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, ...) { # BFE <- x # denominator <- attr(BFE, "denominator") # grid.type <- attr(BFE, "BF_method") # model_names <- rownames(BFE) # # BFE <- as.data.frame(BFE) # if (log) { # BFE$BF <- log(BFE$BF) # } # BFE$BF <- insight::format_value(BFE$BF, digits = digits, missing = "NA", zap_small = log) # BFE$Model[BFE$Model == "1"] <- "(Intercept only)" # indicate null-model # # if ((!show_names) || is.null(model_names) || length(model_names) != nrow(BFE)) { # BFE$i <- paste0("[", seq_len(nrow(BFE)), "]") # } else { # BFE$i <- paste0("[", model_names, "]") # } # # # Denominator # denM <- .trim(paste0(BFE$i, " ", BFE$Model)[denominator]) # BFE <- BFE[-denominator, ] # BFE <- BFE[c("i", "Model", "BF")] # colnames(BFE)[1] <- "" # # # footer # footer <- list( # "\n* Against Denominator: ", # c(denM, "cyan"), # "\n* Bayes Factor Type: ", # c(grid.type, "cyan"), # if (log) c("\n\nBayes Factors are on the log-scale.", "red") # ) # # cat(insight::export_table( # BFE, # sep = " ", header = NULL, align = c("llr"), # caption = c("# Bayes Factors for Model Comparison", "blue"), # footer = footer # )) # # invisible(x) # } #' @export print.bayesfactor_models_matrix <- function(x, digits = 2, log = FALSE, exact = TRUE, ...) { orig_x <- x # Format values x <- unclass(x) if (!log) x <- exp(x) sgn <- sign(x) < 0 x <- insight::format_bf(abs(x), name = NULL, exact = exact, ...) diag(x) <- if (log) "0" else "1" if (any(sgn)) x[sgn] <- paste0("-", x[sgn]) df <- as.data.frame(x) # Model names models <- colnames(df) models[models == "1"] <- "(Intercept only)" models <- paste0("[", seq_along(models), "] ", models) k <- max(sapply(c(models, "Denominator"), nchar)) + 2 rownames(df) <- colnames(df) <- NULL df <- cbind(Model = models, df) colnames(df) <- c("placeholder", paste0(" [", seq_along(models), "] ")) out <- insight::export_table( df, caption = c("# Bayes Factors for Model Comparison", "blue"), subtitle = c(sprintf("\n\n%sNumerator\nDenominator", paste(rep(" ", k), collapse = "")), "cyan"), footer = if (log) c("\nBayes Factors are on the log-scale.\n", "red") ) out <- sub("placeholder", "\b\b", out) cat(out) invisible(orig_x) } bayestestR/R/area_under_curve.R0000644000175000017500000000371514101116425016367 0ustar nileshnilesh#' Area under the Curve (AUC) #' #' Based on the DescTools `AUC` function. It can calculate the area under the curve with a naive algorithm or a more elaborated spline approach. The curve must be given by vectors of xy-coordinates. This function can handle unsorted x values (by sorting x) and ties for the x values (by ignoring duplicates). #' #' @param x Vector of x values. #' @param y Vector of y values. #' @param method Method to compute the Area Under the Curve (AUC). Can be `"trapezoid"` (default), `"step"` or `"spline"`. If "trapezoid", the curve is formed by connecting all points by a direct line (composite trapezoid rule). If "step" is chosen then a stepwise connection of two points is used. For calculating the area under a spline interpolation the splinefun function is used in combination with integrate. #' @param ... Arguments passed to or from other methods. #' #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(1000) #' #' dens <- estimate_density(posterior) #' dens <- dens[dens$x > 0, ] #' x <- dens$x #' y <- dens$y #' #' area_under_curve(x, y, method = "trapezoid") #' area_under_curve(x, y, method = "step") #' area_under_curve(x, y, method = "spline") #' @seealso DescTools #' @export area_under_curve <- function(x, y, method = c("trapezoid", "step", "spline"), ...) { # Stolen from DescTools: https://github.com/cran/DescTools/blob/master/R/StatsAndCIs.r if (length(x) != length(y)) { stop("length x must equal length y") } idx <- order(x) x <- x[idx] y <- y[idx] switch(match.arg(arg = method, choices = c("trapezoid", "step", "spline")), "trapezoid" = sum((rowMeans(cbind(y[-length(y)], y[-1]))) * (x[-1] - x[-length(x)])), "step" = sum(y[-length(y)] * (x[-1] - x[-length(x)])), "spline" = stats::integrate(stats::splinefun(x, y, method = "natural"), lower = min(x), upper = max(x))$value ) } #' @rdname area_under_curve #' @export auc <- area_under_curve bayestestR/R/effective_sample.R0000644000175000017500000001100214114077153016353 0ustar nileshnilesh#' Effective Sample Size (ESS) #' #' This function returns the effective sample size (ESS). #' #' @param model A `stanreg`, `stanfit`, or `brmsfit` object. #' @param ... Currently not used. #' @inheritParams hdi #' #' @return A data frame with two columns: Parameter name and effective sample size (ESS). #' #' @details **Effective Sample (ESS)** should be as large as possible, altough for most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (*Kruschke 2015, p182-3*). #' #' @references \itemize{ #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' \item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28 #' } #' #' @examples #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' effective_sample(model) #' } #' @export effective_sample <- function(model, ...) { UseMethod("effective_sample") } #' @rdname effective_sample #' @export effective_sample.brmsfit <- function(model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) pars <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) insight::check_if_installed("rstan") s <- rstan::summary(model$fit)$summary s <- subset(s, subset = rownames(s) %in% colnames(pars)) data.frame( Parameter = rownames(s), ESS = round(s[, "n_eff"]), stringsAsFactors = FALSE, row.names = NULL ) } #' @rdname effective_sample #' @export effective_sample.stanreg <- function(model, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) pars <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) s <- as.data.frame(summary(model)) s <- s[rownames(s) %in% colnames(pars), ] data.frame( Parameter = rownames(s), ESS = s[["n_eff"]], stringsAsFactors = FALSE, row.names = NULL ) } #' @export effective_sample.stanfit <- function(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) pars <- insight::get_parameters( model, effects = effects, parameters = parameters ) insight::check_if_installed("rstan") s <- as.data.frame(rstan::summary(model)$summary) s <- s[rownames(s) %in% colnames(pars), ] data.frame( Parameter = rownames(s), ESS = s[["n_eff"]], stringsAsFactors = FALSE, row.names = NULL ) } #' @rdname effective_sample #' @export effective_sample.blavaan <- function(model, parameters = NULL, ...) { insight::check_if_installed("blavaan") ESS <- blavaan::blavInspect(model, what = "neff") data.frame( Parameter = colnames(insight::get_parameters(model)), ESS = ESS, stringsAsFactors = FALSE, row.names = NULL ) } #' @rdname effective_sample #' @export effective_sample.MCMCglmm <- function(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) pars <- insight::get_parameters( model, effects = effects, parameters = parameters, summary = TRUE ) s.fixed <- as.data.frame(summary(model)$solutions) s.random <- as.data.frame(summary(model)$Gcovariances) es <- data.frame( Parameter = rownames(s.fixed), ESS = round(s.fixed[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL ) if (nrow(s.random) > 0) { es <- rbind(es, data.frame( Parameter = rownames(s.random), ESS = round(s.random[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL )) } es[match(pars[[1]], es$Parameter), ] } bayestestR/R/point_estimate.R0000644000175000017500000002443714101116425016106 0ustar nileshnilesh#' Point-estimates of posterior distributions #' #' Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. #' #' @param centrality The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: `"median"`, `"mean"`, `"MAP"` or `"all"`. #' @param dispersion Logical, if `TRUE`, computes indices of dispersion related to the estimate(s) (`SD` and `MAD` for `mean` and `median`, respectively). #' @param threshold For `centrality = "trimmed"` (i.e. trimmed mean), indicates the fraction (0 to 0.5) of observations to be trimmed from each end of the vector before the mean is computed. #' @param ... Additional arguments to be passed to or from methods. #' @inheritParams hdi #' #' @references [Vignette In-Depth 1: Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(bayestestR) #' #' point_estimate(rnorm(1000)) #' point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) #' point_estimate(rnorm(1000), centrality = c("median", "MAP")) #' #' df <- data.frame(replicate(4, rnorm(100))) #' point_estimate(df, centrality = "all", dispersion = TRUE) #' point_estimate(df, centrality = c("median", "MAP")) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' #' # emmeans estimates #' # ----------------------------------------------- #' library(emmeans) #' point_estimate(emtrends(model, ~1, "wt"), centrality = c("median", "MAP")) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' # BayesFactor objects #' # ----------------------------------------------- #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' point_estimate(bf, centrality = "all", dispersion = TRUE) #' point_estimate(bf, centrality = c("median", "MAP")) #' } #' #' @export point_estimate <- function(x, centrality = "all", dispersion = FALSE, ...) { UseMethod("point_estimate") } #' @rdname point_estimate #' @export point_estimate.numeric <- function(x, centrality = "all", dispersion = FALSE, threshold = .1, ...) { centrality <- match.arg(tolower(centrality), c("median", "mean", "map", "trimmed", "all"), several.ok = TRUE) if ("all" %in% centrality) { estimate_list <- c("median", "mean", "map") } else { estimate_list <- centrality } out <- data.frame(".temp" = 0) # Median if ("median" %in% estimate_list) { out$Median <- stats::median(x) if (dispersion) { out$MAD <- stats::mad(x) } } # Mean if ("mean" %in% estimate_list) { out$Mean <- mean(x) if (dispersion) { out$SD <- stats::sd(x) } } # trimmed mean if ("trimmed" %in% estimate_list) { out$Trimmed_Mean <- mean(x, trim = threshold) if (dispersion) { out$SD <- stats::sd(x) } } # MAP if ("map" %in% estimate_list) { out$MAP <- as.numeric(map_estimate(x)) } out <- out[names(out) != ".temp"] attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.data.frame <- function(x, centrality = "all", dispersion = FALSE, threshold = .1, ...) { x <- .select_nums(x) if (ncol(x) == 1) { estimates <- point_estimate(x[, 1], centrality = centrality, dispersion = dispersion, threshold = threshold, ...) } else { estimates <- sapply(x, point_estimate, centrality = centrality, dispersion = dispersion, simplify = FALSE, ...) estimates <- do.call(rbind, estimates) } out <- cbind(data.frame("Parameter" = names(x), stringsAsFactors = FALSE), estimates) rownames(out) <- NULL attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.mcmc <- function(x, centrality = "all", dispersion = FALSE, ...) { point_estimate(as.data.frame(x), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.bcplm <- function(x, centrality = "all", dispersion = FALSE, ...) { point_estimate(insight::get_parameters(x), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.bayesQR <- point_estimate.bcplm #' @export point_estimate.blrm <- point_estimate.bcplm #' @export point_estimate.mcmc.list <- point_estimate.bcplm #' @export point_estimate.BGGM <- point_estimate.bcplm #' @export point_estimate.bamlss <- function(x, centrality = "all", dispersion = FALSE, component = c("conditional", "location", "all"), ...) { component <- match.arg(component) out <- point_estimate(insight::get_parameters(x, component = component), centrality = centrality, dispersion = dispersion, ...) out <- .add_clean_parameters_attribute(out, x) out } #' @export point_estimate.MCMCglmm <- function(x, centrality = "all", dispersion = FALSE, ...) { nF <- x$Fixed$nfl point_estimate(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.emmGrid <- function(x, centrality = "all", dispersion = FALSE, ...) { xdf <- insight::get_parameters(x) out <- point_estimate(xdf, centrality = centrality, dispersion = dispersion, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export point_estimate.emm_list <- point_estimate.emmGrid # Helper ------------------------------------------------------------------ #' @keywords internal .point_estimate_models <- function(x, effects, component, parameters, centrality = "all", dispersion = FALSE, ...) { out <- point_estimate(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ...) out } #' @rdname point_estimate #' @export point_estimate.stanreg <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("location", "all", "conditional", "smooth_terms", "sigma", "distributional", "auxiliary"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( point_estimate(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ...), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.stanfit <- point_estimate.stanreg #' @export point_estimate.blavaan <- point_estimate.stanreg #' @rdname point_estimate #' @export point_estimate.brmsfit <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( point_estimate(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ...), cleaned_parameters ) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim.merMod <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .point_estimate_models( x = x, effects = effects, component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) attr(out, "centrality") <- centrality out <- .add_clean_parameters_attribute(out, x) class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim <- function(x, centrality = "all", dispersion = FALSE, parameters = NULL, ...) { out <- .point_estimate_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @rdname point_estimate #' @export point_estimate.BFBayesFactor <- function(x, centrality = "all", dispersion = FALSE, ...) { out <- point_estimate(insight::get_parameters(x), centrality = centrality, dispersion = dispersion, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.matrix <- function(x, ...) { point_estimate(as.data.frame(x), ...) } #' @export point_estimate.get_predicted <- function(x, ...) { if ("iterations" %in% names(attributes(x))) { point_estimate(as.data.frame(t(attributes(x)$iterations)), ...) } else { as.numeric(x) } } bayestestR/inst/0000755000175000017500000000000014135671001013504 5ustar nileshnileshbayestestR/inst/CITATION0000644000175000017500000000142213704020662014642 0ustar nileshnileshbibentry( bibtype="Article", title="bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework.", author=c(person("Dominique", "Makowski"), person("Mattan S.", "Ben-Shachar"), person("Daniel", "Lüdecke")), journal="Journal of Open Source Software", doi="10.21105/joss.01541", year="2019", number = "40", volume = "4", pages = "1541", url="https://joss.theoj.org/papers/10.21105/joss.01541", textVersion = "Makowski, D., Ben-Shachar, M., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. doi:10.21105/joss.01541", mheader = "To cite bayestestR in publications use:" ) bayestestR/inst/WORDLIST0000644000175000017500000000336514057264527014723 0ustar nileshnileshADE Altough BCa BFs BGGM BICs BMA BMJ Baws BayesFactor Bayesfactor Bergh Bridgesampling CRC DOI DV Dablander DescTools Desimone DiCiccio Driing ESS ETI Efron Elsevier Etz Fernández Funder Gelman Ghosh Grasman Gronau's HDI HDInterval Haaf Hinne Hirose Imai Iverson JASP JASP's Jeffreys Kass Keele Kruschke Kuriyal Ley Liao Liddell Lindley Littman Lodewyckx Ly MCMCglmm MCSE MPE Mathot Mattan Matzke McElreath Midya Modelling Morey Multicollinearity Orthonormal Ozer Parmigiani Piironen Posteriori Psychonomic ROPE's ROPEs ROPE’s Raftery Retrived Rhat Rouder SEM SEXIT SHA SavageDickey Shachar Speckman Tada Tingley Un Vandekerckhove Vehtari Versicolor Visualise Wagenmakers Wetzels Wickham Wookies Yamamoto al altough analsyes arXiv autocorrelated avaible bayesQR bayesian bcplm behavioural blogpost bootsrapped brms brmsfit centred characterisation characterises ci cogpsych cogsci columbia compte containe contr cplm doi driiiing eXistence easystats edu effectsize emmeans et favour favouring fpsyg frac frequentis frequentist's fullrank generalised gg ggdistribute github grano higer https infty ing interpretability interpretable io iteratively jasp jmp joss lavaan lentiful lm maths mcmc modelling nbinom neq nl notin objets operationlizing orthonormal osterior pre preprint priori ps reconceptualisation replicability reproducibility richarddmorey riors rmsb rstanarm sIgnificance salis setosa setosas splinefun stanfit stanreg statmodeling summarise summarised th treedepth un underbrace versicolor versicolors virginica virgnica visualisation visualise warmup wil www xy bayestestR/inst/doc/0000755000175000017500000000000014135671001014251 5ustar nileshnileshbayestestR/inst/doc/bayes_factors.R0000644000175000017500000004146614135670652017246 0ustar nileshnilesh## ----setup, include=FALSE----------------------------------------------------- library(knitr) options(knitr.kable.NA = "", digits = 2) knitr::opts_chunk$set( echo = TRUE, comment = ">", out.width = "100%", message = FALSE, warning = FALSE, dpi = 150 ) pkgs <- c( "rstanarm", "BayesFactor", "emmeans", "logspline", "lme4", "ggplot2", "see", "insight", "emmeans", "knitr", "effectsize", "bayestestR" ) if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set(eval = FALSE) } set.seed(4) if (require("ggplot2") && require("see")) { theme_set(theme_modern()) } ## ----deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/bayestestR/raw/master/man/figures/deathsticks.jpg") ## ----sleep_boxplot, echo=FALSE------------------------------------------------ library(ggplot2) ggplot(sleep, aes(x = group, y = extra, fill = group)) + geom_boxplot() + theme_classic() + theme(legend.position = "none") ## ----rstanarm_model, eval = FALSE--------------------------------------------- # set.seed(123) # library(rstanarm) # # model <- stan_glm( # formula = extra ~ group, # data = sleep, # prior = normal(0, 3, autoscale = FALSE) # ) ## ---- echo=FALSE-------------------------------------------------------------- model <- stan_glm( formula = extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0 ) ## ---- echo=FALSE-------------------------------------------------------------- null <- c(-1, 1) xrange <- c(-10, 10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ## ----rstanarm_fit, echo=FALSE------------------------------------------------- library(bayestestR) model_prior <- unupdate(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals, f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ## ---- eval=FALSE-------------------------------------------------------------- # My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) # My_first_BF ## ---- echo=FALSE-------------------------------------------------------------- print(My_first_BF) ## ----------------------------------------------------------------------------- library(see) plot(My_first_BF) ## ----------------------------------------------------------------------------- effectsize::interpret_bf(exp(My_first_BF$log_BF[2]), include_value = TRUE) ## ---- eval=FALSE-------------------------------------------------------------- # My_second_BF <- bayesfactor_parameters(model, null = 0) # My_second_BF ## ---- echo=FALSE-------------------------------------------------------------- My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0 ) print(My_second_BF) ## ----------------------------------------------------------------------------- plot(My_second_BF) ## ----savagedickey_one_sided, eval=FALSE--------------------------------------- # test_group2_right <- bayesfactor_parameters(model, direction = ">") # test_group2_right ## ----prior_n_post_plot_one_sided, echo=FALSE---------------------------------- test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ## ----------------------------------------------------------------------------- plot(test_group2_right) ## ----inteval_div, eval=FALSE-------------------------------------------------- # test_group2_dividing <- bayesfactor_parameters(model, null = c(-Inf, 0)) # test_group2_dividing ## ----inteval_div2, echo=FALSE------------------------------------------------- test_group2_dividing <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = c(-Inf, 0) ) print(test_group2_dividing) ## ----------------------------------------------------------------------------- plot(test_group2_dividing) ## ----------------------------------------------------------------------------- my_first_si <- si( posterior = data.frame(group2 = posterior), prior = data.frame(group2 = prior), BF = 1 ) print(my_first_si) ## ----------------------------------------------------------------------------- plot(my_first_si) ## ----brms_disp, eval = FALSE-------------------------------------------------- # library(brms) # # # intercept only model # m0 <- brm(Sepal.Length ~ 1, data = iris, # prior = # set_prior("student_t(3, 6, 6)", class = "Intercept") + # set_prior("student_t(3, 0, 6)", class = "sigma"), # save_pars = save_pars(all = TRUE), backend = "rstan") # # # Petal.Length only # m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, # prior = # set_prior("student_t(3, 6, 6)", class = "Intercept") + # set_prior("student_t(3, 0, 6)", class = "sigma") + # set_prior("normal(0, 1)", coef = "Petal.Length"), # save_pars = save_pars(all = TRUE)) # # # Species only # m2 <- brm(Sepal.Length ~ Species, data = iris, # prior = # set_prior("student_t(3, 6, 6)", class = "Intercept") + # set_prior("student_t(3, 0, 6)", class = "sigma") + # set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), # save_pars = save_pars(all = TRUE)) # # # Species + Petal.Length model # m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris, # prior = # set_prior("student_t(3, 6, 6)", class = "Intercept") + # set_prior("student_t(3, 0, 6)", class = "sigma") + # set_prior("normal(0, 1)", coef = "Petal.Length") + # set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), # save_pars = save_pars(all = TRUE)) # # # full interactive model # m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris, # prior = # set_prior("student_t(3, 6, 6)", class = "Intercept") + # set_prior("student_t(3, 0, 6)", class = "sigma") + # set_prior("normal(0, 1)", coef = "Petal.Length") + # set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")) + # set_prior("normal(0, 2)", coef = c("Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length")), # save_pars = save_pars(all = TRUE)) ## ----brms_models_disp, eval = FALSE------------------------------------------- # library(bayestestR) # # comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) # comparison ## ---- echo = FALSE------------------------------------------------------------ comparison <- structure( list(Model = c("Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1"), log_BF = c(101.556419030653, 64.2903334815192, 122.864721399001, 119.712908243647, 0)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c("m1", "m2", "m3", "m4", "m0"), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ## ----update_models1----------------------------------------------------------- update(comparison, reference = 4) ## ----update_models2----------------------------------------------------------- update(comparison, reference = 2) ## ----------------------------------------------------------------------------- as.matrix(comparison) ## ----lme4_models-------------------------------------------------------------- library(lme4) # define models with increasing complexity m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) # model comparison bayesfactor_models(m1, m2, m3, m4, denominator = m0) ## ----------------------------------------------------------------------------- iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0 ) ## ----------------------------------------------------------------------------- botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ## ----------------------------------------------------------------------------- model_prior <- unupdate(iris_model) botanist_BFs <- bayesfactor_restricted( posterior = iris_model, prior = model_prior, hypothesis = botanist_hypotheses ) print(botanist_BFs) ## ----plot_iris, echo=FALSE---------------------------------------------------- ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ## ----inclusion_brms----------------------------------------------------------- bayesfactor_inclusion(comparison) ## ----inclusion_brms2---------------------------------------------------------- bayesfactor_inclusion(comparison, match_models = TRUE) ## ----JASP_all----------------------------------------------------------------- library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose * supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ## ----JASP_all_fig, echo=FALSE------------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.jpg") ## ----JASP_matched------------------------------------------------------------- bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ## ----JASP_matched_fig, echo=FALSE--------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.jpg") ## ----JASP_Nuisance------------------------------------------------------------ BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4] / BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ## ----JASP_Nuisance_fig, echo=FALSE-------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.jpg") ## ----------------------------------------------------------------------------- mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10, 10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0 ) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10, 10, 20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0 ) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF_carb ## ----------------------------------------------------------------------------- hdi(mod_carb, ci = .95) ## ----------------------------------------------------------------------------- BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ## ---- echo=FALSE-------------------------------------------------------------- set.seed(1) ## ----------------------------------------------------------------------------- library(emmeans) (group_diff <- emmeans(model, pairwise ~ group)) # pass the original model via prior bayesfactor_parameters(group_diff, prior = model) ## ---- echo=FALSE-------------------------------------------------------------- set.seed(1) ## ---- eval=FALSE-------------------------------------------------------------- # library(modelbased) # # estimate_contrasts(model, test = "bf", bf_prior = model) ## ----------------------------------------------------------------------------- df <- iris contrasts(df$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_sum <- pairs(emmeans(fit_sum, ~Species))) ggplot(stack(insight::get_parameters(pairs_sum)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ## ----------------------------------------------------------------------------- contrasts(df$Species) <- contr.orthonorm ## ---- eval=FALSE-------------------------------------------------------------- # options(contrasts = c("contr.orthonorm", "contr.poly")) ## ----------------------------------------------------------------------------- fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_bayes <- pairs(emmeans(fit_bayes, ~Species))) ggplot(stack(insight::get_parameters(pairs_bayes)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ## ----------------------------------------------------------------------------- hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(df$Species) <- contr.sum # fit_sum <- stan_glm(Sepal.Length ~ Species, # data = df, # prior = normal(0, c(1, 1), autoscale = FALSE), # family = gaussian() # ) # # em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means # # bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ## ---- echo=FALSE-------------------------------------------------------------- contrasts(df$Species)[, ] <- contr.sum(3) fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(df$Species) <- contr.orthonorm # fit_bayes <- stan_glm(Sepal.Length ~ Species, # data = df, # prior = normal(0, c(1, 1), autoscale = FALSE), # family = gaussian() # ) # em_bayes <- emmeans(fit_sum, ~Species) # the posterior marginal means # bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ## ---- echo=FALSE-------------------------------------------------------------- contrasts(df$Species)[, ] <- contr.orthonorm(3) fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_bayes <- emmeans(fit_bayes, ~Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) bayestestR/inst/doc/bayestestR.Rmd0000644000175000017500000002507514030221266017051 0ustar nileshnilesh--- title: "Get Started with Bayesian Analysis" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Get Started with Bayesian Analysis} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set(comment = ">") options(knitr.kable.NA = "") options(digits = 2) if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(rstanarm) library(bayestestR) } ``` ## Why use the Bayesian Framework? The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards **open and honest science**. Reasons to prefer this approach are: - **reliability** [@etz2016bayesian] - **accuracy** (in noisy data and small samples) [@kruschke2012time] - the possibility of introducing **prior knowledge** into the analysis [@andrews2013prior; @kruschke2012time] - critically, **intuitive nature of results** and their **straightforward interpretation** [@kruschke2010believe; @wagenmakers2018bayesian] In general, the frequentist approach has been associated with the focus on the null hypothesis testing, and the misuse of *p*-values has been shown to critically contribute to the reproducibility crisis in social and psychological sciences [@chambers2014instead; @szucs2016empirical]. There is an emerging consensus that the generalization of the Bayesian approach is *one* way of overcoming these issues [@benjamin2018redefine; @etz2016bayesian]. Once we agree that the Bayesian framework is the right way to go, you might wonder *what* exactly is this framework. **What's all the fuss about?** ## What is the Bayesian Framework? Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (*t*-tests, correlations, ANOVAs, regressions, etc.) can be achieved using the Bayesian framework. The key difference is that in the **frequentist framework** (the "classical" approach to statistics, with *p* and *t* values, as well as some weird *degrees of freedom*), **the effects are fixed** (but unknown) and **data are random**. In other words, it assumes that the unknown parameter has a **unique** value that we are trying to estimate/guess using our sample data. On the other hand, in the **Bayesian framework**, instead of estimating the "true effect", the probability of different effects *given the observed data* is computed, resulting in a **distribution** of possible values for the parameters, called the **posterior distribution**. The uncertainty in Bayesian inference can be summarized, for instance, by the **median** of the distribution, as well as a range of values of the posterior distribution that includes the 95\% most probable values (the 95\% **credible interval**). *Cum grano salis*, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say *"given the observed data, the effect has 95\% probability of falling within this range"*, while the frequentist (less intuitive) alternative would be *"when repeatedly computing confidence intervals from data of this sort, there is a 95\% probability that the effect falls within a given range"*. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (*the posterior*) of an effect that is compatible with the observed data. Thus, an effect can be described by [characterizing its posterior distribution](https://easystats.github.io/bayestestR/articles/guidelines.html) in relation to its centrality (point-estimates), uncertainty, as well as its existence and significance In other words, putting the maths behind it aside for a moment, we can say that: - The frequentist approach tries to estimate the **real effect**. For instance, the "real" value of the correlation between *x* and *y*. Hence, the frequentist models return a **point-estimate** (i.e., a **single** value and not a distribution) of the "real" correlation (e.g., $r = 0.42$) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a "parent", usually normal distribution). - **The Bayesian framework assumes no such thing**. The data are what they are. Based on the observed data (and a **prior** belief about the result), the Bayesian sampling algorithm (**MCMC** sampling is one example) returns a probability distribution (called **the posterior**) of the effect that is compatible with the observed data. For the correlation between *x* and *y*, it will return a **distribution** that says, for example, "the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74 with certain probabilities". - To characterize statistical significance of our effects, we do not need *p*-values, or any other such indices. We simply *describe* the posterior distribution of the effect. For example, we can report the median, the [89% Credible Interval](https://easystats.github.io/bayestestR/articles/credible_interval.html) or [other indices](https://easystats.github.io/bayestestR/articles/guidelines.html). ```{r echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ``` *Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance [this thread](https://discourse.datamethods.org/t/language-for-communicating-frequentist-results-about-treatment-effects/934/16)). As always, the world is not black and white (p \< .001).* **So... how does it work?** ## A simple example ### `bayestestR` installation You can install `bayestestR` along with the whole [**easystats**](https://github.com/easystats/easystats) suite by running the following: ```{r eval=FALSE, message=FALSE, warning=FALSE} install.packages("remotes") remotes::install_github("easystats/easystats") ``` Let's also install and load the [`rstanarm`](https://mc-stan.org/rstanarm/), that allows fitting Bayesian models, as well as [`bayestestR`](https://github.com/easystats/bayestestR), to describe them. ```{r message=FALSE, warning=FALSE, eval=FALSE} install.packages("rstanarm") library(rstanarm) ``` ### Traditional linear regression Let's start by fitting a simple frequentist linear regression (the `lm()` function stands for *linear model*) between two numeric variables, `Sepal.Length` and `Petal.Length` from the famous [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` This analysis suggests that there is a statistically **significant** (whatever that means) and **positive** (with a coefficient of `0.41`) linear relationship between the two variables. Fitting and interpreting the frequentist models is so easy that it is obvious that people use it instead of the Bayesian framework... right? **Not anymore.** ### Bayesian linear regression ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) posteriors <- describe_posterior(model) # for a nicer table print_md(posteriors, digits = 2) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, refresh = 0) posteriors <- describe_posterior(model) # for a nicer table print_md(posteriors, digits = 2) ``` **That's it!** You just fitted a Bayesian version of the model by simply using the [`stan_glm()`](https://mc-stan.org/rstanarm/reference/stan_glm.html) function instead of `lm()` and described the posterior distributions of the parameters! The conclusion we draw, for this example, are very similar. The effect (*the median of the effect's posterior distribution*) is about `0.41`, and it can be also be considered as *significant* in the Bayesian sense (more on that later). **So, ready to learn more?** Check out the [**next tutorial**](https://easystats.github.io/bayestestR/articles/example1.html)! And, if you want even more, you can check out other articles describing all the functionality the package has to offer! ## References bayestestR/inst/doc/bayes_factors.html0000644000175000017500000463113514135670654020015 0ustar nileshnilesh Bayes Factors

Bayes Factors

This vignette can be referred to by citing the following:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Retrieved from 10.3389/fpsyg.2019.02767


The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about the Bayes factor. In short, one school of thought (e.g., the Amsterdam school, led by E. J. Wagenmakers) advocate its use, and emphasize its qualities as a statistical index, while another point to its limits and prefer, instead, the precise description of posterior distributions (using CIs, ROPEs, etc.).

The bayestestR package does not take a side in this debate, and offers tools to carry out analysis irrespective of the school you subscribe to. Instead, it strongly supports the notion of an informed choice:

discover the methods, learn about them, understand them, try them, and decide for yourself.

Having said that, here’s an introduction to Bayes factors :)

The Bayes Factor

Bayes Factors (BFs) are indices of relative evidence of one “model” over another.

In their role as a hypothesis testing index, they are to Bayesian framework what a \(p\)-value is to the classical/frequentist framework. In significance-based testing, \(p\)-values are used to assess how unlikely are the observed data if the null hypothesis were true, while in the Bayesian model selection framework, Bayes factors assess evidence for different models, each model corresponding to a specific hypothesis.

According to Bayes’ theorem, we can update prior probabilities of some model \(M\) (\(P(M)\)) to posterior probabilities (\(P(M|D)\)) after observing some datum \(D\) by accounting for the probability of observing that datum given the model (\(P(D|M)\), also known as the likelihood):

\[ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} \]

Using this equation, we can compare the probability-odds of two models:

\[ \underbrace{\frac{P(M_1|D)}{P(M_2|D)}}_{\text{Posterior Odds}} = \underbrace{\frac{P(D|M_1)}{P(D|M_2)}}_{\text{Likelihood Ratio}} \times \underbrace{\frac{P(M_1)}{P(M_2)}}_{\text{Prior Odds}} \]

Where the likelihood ratio (the middle term) is the Bayes factor - it is the factor by which some prior odds have been updated after observing the data to posterior odds.

Thus, Bayes factors can be calculated in two ways:

  • As a ratio quantifying the relative probability of the observed data under each of the two models. (In some contexts, these probabilities are also called marginal likelihoods.)

\[ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} \]

  • As the degree of shift in prior beliefs about the relative credibility of two models (since they can be computed by dividing posterior odds by prior odds).

\[ BF_{12}=\frac{Posterior~Odds_{12}}{Prior~Odds_{12}} \]

Here we provide functions for computing Bayes factors in two different contexts:

  • testing single parameters (coefficients) within a model
  • comparing statistical models themselves

Testing Models’ Parameters with Bayes Factors

A Bayes factor for a single parameter can be used to answer the question:

“Given the observed data, has the null hypothesis of an absence of an effect become more or less credible?”

Bayesian analysis of the Students' (1908) Sleep data set.

Bayesian analysis of the Students’ (1908) Sleep data set.

Let’s use the Students’ (1908) Sleep data set (data("sleep")). The data comes from a study in which participants were administered a drug and the researchers assessed the extra hours of sleep that participants slept afterwards. We will try answering the following research question using Bayes factors:

Given the observed data, has the hypothesis that the drug (the effect of group) has no effect on the numbers of hours of extra sleep (variable extra) become more of less credible?

The boxplot suggests that the second group has a higher number of hours of extra sleep. By how much?

Let’s fit a simple Bayesian linear model, with a prior of \(b_{group} \sim N(0, 3)\) (i.e. the prior follows a Gaussian/normal distribution with \(mean = 0\) and \(SD = 3\)), using rstanarm package:

set.seed(123)
library(rstanarm)

model <- stan_glm(
  formula = extra ~ group,
  data = sleep,
  prior = normal(0, 3, autoscale = FALSE)
)

Testing against a null-region

One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be practically equivalent to the null (Kruschke, 2010). In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug’s effect falling within this null-region, and the prior probability of the drug’s effect falling outside the null-region to get our prior odds. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as:

\[ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} \]

Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 3 hours, our priors would look like this:

and the prior odds would be 2.2.

By looking at the posterior distribution, can now compute the posterior probability of the drug’s effect falling within the null-region, and the posterior probability of the drug’s effect falling outside the null-region to get our posterior odds:

\[ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} \]

We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2, which seems to favor the effect being non-null. But, does this mean the data support the alternative over the null? Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here!

Let’s compute the Bayes factor as the change from the prior odds to the posterior odds: \(BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9\)! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has overall shifted closer to the null interval, making the values in the null interval more probable! (see Non-overlapping Hypotheses in Morey & Rouder, 2011)

All of this can be achieved with the function bayesfactor_parameters(), which computes a Bayes factor for each of the model’s parameters:

My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1))
My_first_BF
> Bayes Factor (Null-Interval)
> 
> Parameter   |    BF
> -------------------
> (Intercept) | 0.103
> group2      | 0.883
> 
> * Evidence Against The Null: [-1.000, 1.000]

We can also plot using the see package:

library(see)
plot(My_first_BF)

Note that interpretation guides for Bayes factors can be found in the effectsize package:

effectsize::interpret_bf(exp(My_first_BF$log_BF[2]), include_value = TRUE)
> [1] "anecdotal evidence (BF = 1/1.13) against"
> (Rules: jeffreys1961)

Testing against the point-null (0)

What if we don’t know what region would be practically equivalent to 0?

Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the density of the null value between the two distributions.1 This ratio is called the Savage-Dickey ratio, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null:

“[…] the Bayes factor for \(H_0\) versus \(H_1\) could be obtained by analytically integrating out the model parameter \(\theta\). However, the Bayes factor may likewise be obtained by only considering \(H_1\), and dividing the height of the posterior for \(\theta\) by the height of the prior for \(\theta\), at the point of interest.” (Wagenmakers, Lodewyckx, Kuriyal, & Grasman, 2010)

My_second_BF <- bayesfactor_parameters(model, null = 0)
My_second_BF
> Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |   BF
> ----------------
> group2    | 1.24
> 
> * Evidence Against The Null: 0
plot(My_second_BF)

Directional hypotheses

We can also compute Bayes factors for directional hypotheses (“one sided”), if we have a prior hypotheses about the direction of the effect. This can be done by setting an order restriction on the prior distribution (which results in an order restriction on the posterior distribution) of the alternative (Morey & Wagenmakers, 2014). For example, if we have a prior hypothesis that the drug has a positive effect on the number of sleep hours, the alternative will be restricted to the region to the right of the null (point or interval):

test_group2_right <- bayesfactor_parameters(model, direction = ">")
test_group2_right
> Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |   BF
> ----------------
> group2    | 2.37
> 
> * Evidence Against The Null: 0
> *                 Direction: Right-Sided test
plot(test_group2_right)

As we can see, given that we have an a priori assumption about the direction of the effect (that the effect is positive), the presence of an effect is 2.8 times more likely than the absence of an effect, given the observed data (or that the data are 2.8 time more probable under \(H_1\) than \(H_0\)). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite weak evidence).

Thanks to the flexibility of Bayesian framework, it is also possible to compute a Bayes factor for dividing hypotheses - that is, for a null and alternative that are complementary, opposing one-sided hypotheses (Morey & Wagenmakers, 2014).

For example, above we compared an alternative of \(H_A\): the drug has a positive effects to the null \(H_0\): the drug has no effect. But we can also compare instead the same alternative to its complementary hypothesis: \(H_{-A}\): the drug has a negative effects.

test_group2_dividing <- bayesfactor_parameters(model, null = c(-Inf, 0))
test_group2_dividing
> Bayes Factor (Null-Interval)
> 
> Parameter |    BF
> -----------------
> group2    | 20.53
> 
> * Evidence Against The Null: [-Inf, 0.000]
plot(test_group2_dividing)

We can see that this test produces even stronger (more conclusive) evidence than the one-sided vs. point-null test! And indeed, as a rule of thumb, the more specific the two hypotheses are, and the more distinct they are from one another, the more power our Bayes factor has!2

Thanks to the transitivity of Bayes factors, we can also use bayesfactor_parameters() to compare even more types of hypotheses, with some trickery. For example:

\[ \underbrace{BF_{0<b<1\text{ vs. }b=0}}_{\text{range vs. point}} = \underbrace{BF_{b<0\text{ vs. }b=0}}_{\text{directional vs. point}} / \underbrace{BF_{b<0\text{ vs. }0<b<1}}_{\text{directional vs. range}} \]

NOTE: See the Testing Contrasts appendix below.

Support intervals

So far we’ve seen that Bayes factors quantify relative support between competing hypotheses. However, we can also ask:

Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?

For example, we’ve seen that the point null has become somewhat less credible after observing the data, but we might also ask which values have gained credibility given the observed data?. The resulting range of values is called the support interval as it indicates which values are supported by the data (Wagenmakers, Gronau, Dablander, & Etz, 2018). We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities.

In bayestestR, this can be achieved with the si() function:

my_first_si <- si(
  posterior = data.frame(group2 = posterior),
  prior = data.frame(group2 = prior),
  BF = 1
)

print(my_first_si)
> Support Interval
> 
> Parameter |    BF = 1 SI
> ------------------------
> group2    | [0.15, 3.04]

The argument BF = 1 indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all).

Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased):

plot(my_first_si)

We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we’ve already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor:

“The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level \(\alpha\). For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against.” (Wagenmakers et al., 2018)

Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent:

  • A \(BF = 1\) contains values whose credibility has merely not decreased by observing the data.
  • A \(BF > 1\) contains values who received more impressive support from the data.
  • A \(BF < 1\) contains values whose credibility has not been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than \(1/BF\) in support of the alternative.

Comparing Models using Bayes Factors

Bayes factors can also be used to compare statistical models. In this statistical context, they answer the following question:

Under which model are the observed data more probable?

In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the relative evidence for one model over the other.

Let’s use Bayes factors for model comparison to find a model that best describes the length of an iris’ sepal using the iris data set.

For Bayesian models (brms and rstanarm)

Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:

  • brmsfit models must have been fitted with save_pars = save_pars(all = TRUE)
  • stanreg models must have been fitted with a defined diagnostic_file.

Let’s first fit 5 Bayesian regressions with brms to predict Sepal.Length:

library(brms)

# intercept only model
m0 <- brm(Sepal.Length ~ 1, data = iris, 
          prior = 
            set_prior("student_t(3, 6, 6)", class = "Intercept") + 
            set_prior("student_t(3, 0, 6)", class = "sigma"),
          save_pars = save_pars(all = TRUE),  backend = "rstan")

# Petal.Length only
m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, 
          prior = 
            set_prior("student_t(3, 6, 6)", class = "Intercept") + 
            set_prior("student_t(3, 0, 6)", class = "sigma") + 
            set_prior("normal(0, 1)", coef = "Petal.Length"),
          save_pars = save_pars(all = TRUE))

# Species only
m2 <- brm(Sepal.Length ~ Species, data = iris,
          prior = 
            set_prior("student_t(3, 6, 6)", class = "Intercept") + 
            set_prior("student_t(3, 0, 6)", class = "sigma") + 
            set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")),
          save_pars = save_pars(all = TRUE))

# Species + Petal.Length model
m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris,
          prior = 
            set_prior("student_t(3, 6, 6)", class = "Intercept") + 
            set_prior("student_t(3, 0, 6)", class = "sigma") + 
            set_prior("normal(0, 1)", coef = "Petal.Length") + 
            set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")),
          save_pars = save_pars(all = TRUE))

# full interactive model
m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris,
          prior = 
            set_prior("student_t(3, 6, 6)", class = "Intercept") + 
            set_prior("student_t(3, 0, 6)", class = "sigma") + 
            set_prior("normal(0, 1)", coef = "Petal.Length") + 
            set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")) + 
            set_prior("normal(0, 2)", coef = c("Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length")),
          save_pars = save_pars(all = TRUE))

We can now compare these models with the bayesfactor_models() function, using the denominator argument to specify the model against which the rest of the models will be compared (in this case, the intercept-only model):

library(bayestestR)

comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0)
comparison
> Bayes Factors for Model Comparison
> 
>     Model                        BF
> [1] Petal.Length           1.27e+44
> [2] Species                8.34e+27
> [3] Species + Petal.Length 2.29e+53
> [4] Species * Petal.Length 9.79e+51
> 
> * Against Denominator: [5] (Intercept only)
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

We can see that the Species + Petal.Length model is the best model - with \(BF=2\times 10^{53}\) compared to the null (intercept only).

Due to the transitive property of Bayes factors, we can easily change the reference model to the full Species * Petal.Length model:

update(comparison, reference = 4)
> Bayes Factors for Model Comparison
> 
>     Model                        BF
> [1] Petal.Length           1.30e-08
> [2] Species                8.52e-25
> [3] Species + Petal.Length    23.38
> [5] (Intercept only)       1.02e-52
> 
> * Against Denominator: [4] Species * Petal.Length
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

As we can see, the Species + Petal.Length model is also favored compared to the Species * Petal.Length model, though to several orders of magnitude less - is is only supported 23.38 times more!)

We can also change the reference model to the Species model:

update(comparison, reference = 2)
> Bayes Factors for Model Comparison
> 
>     Model                        BF
> [1] Petal.Length           1.53e+16
> [3] Species + Petal.Length 2.74e+25
> [4] Species * Petal.Length 1.17e+24
> [5] (Intercept only)       1.20e-28
> 
> * Against Denominator: [2] Species
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

Notice that, in the Bayesian framework the compared models do not need to be nested models, as happened here when we compared the Petal.Length-only model to the Species-only model (something that cannot be done in the frequentist framework, where compared models must be nested in one another).

We can also get a matrix of Bayes factors of all the pairwise model comparisons:

as.matrix(comparison)
> # Bayes Factors for Model Comparison 
> 
>               Numerator
> Denominator
> 
>                 |      [1] |      [2] |      [3] |      [4] |      [5]
> ---------------------------------------------------------------------------------
> [1] Petal.Length           |        1 | 6.54e-17 | 1.80e+09 | 7.68e+07 | 7.85e-45
> [2] Species                | 1.53e+16 |        1 | 2.74e+25 | 1.17e+24 | 1.20e-28
> [3] Species + Petal.Length | 5.57e-10 | 3.64e-26 |        1 |    0.043 | 4.37e-54
> [4] Species * Petal.Length | 1.30e-08 | 8.52e-25 |    23.38 |        1 | 1.02e-52
> [5] (Intercept only)       | 1.27e+44 | 8.34e+27 | 2.29e+53 | 9.79e+51 |        1

NOTE: In order to correctly and precisely estimate Bayes Factors, you always need the 4 P’s: Proper Priors,3 and a Plentiful Posterior.4

For Frequentist models via the BIC approximation

It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models (Wagenmakers, 2007).

Let’s try it out on some linear mixed-effects models:

library(lme4)

# define models with increasing complexity
m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris)
m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris)
m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris)
m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris)
m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris)

# model comparison
bayesfactor_models(m1, m2, m3, m4, denominator = m0)
> Bayes Factors for Model Comparison
> 
>      Model                                                       BF
> [m1] Petal.Length + (1 | Species)                          8.24e+24
> [m2] Petal.Length + (Petal.Length | Species)               4.77e+23
> [m3] Petal.Length + Petal.Width + (Petal.Length | Species) 1.52e+22
> [m4] Petal.Length * Petal.Width + (Petal.Length | Species) 5.93e+20
> 
> * Against Denominator: [m0] 1 + (1 | Species)
> *   Bayes Factor Type: BIC approximation

Order restricted models

As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris’ sepal from the length of its petal, as well as from its species, with priors: - \(b_{petal} \sim N(0,2)\) - \(b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)\)

iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length,
  data = iris,
  prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE),
  refresh = 0
)

These priors are unrestricted - that is, all values between \(-\infty\) and \(\infty\) of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, a priori the ordering of the parameters relating to the iris species can have any ordering, such that a priori setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa!

Does it make sense to let our priors cover all of these possibilities? That depends on our prior knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be negatively associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica.

These priors can be formulated as restricted priors (Morey, 2015; Morey & Rouder, 2011):

  1. The novice botanist: \(b_{petal} > 0\)
  2. The expert botanist: \(b_{versicolors} > 0\ \&\ b_{virginica} > 0\)

By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with bayesfactor_restricted(), that compute a Bayes factor for these restricted model vs the unrestricted model. Let’s first specify these restrictions as logical conditions:

botanist_hypotheses <- c(
  "Petal.Length > 0",
  "(Speciesversicolor > 0) & (Speciesvirginica > 0)"
)

Let’s test these hypotheses:

model_prior <- unupdate(iris_model)

botanist_BFs <- bayesfactor_restricted(
  posterior = iris_model,
  prior = model_prior,
  hypothesis = botanist_hypotheses
)

print(botanist_BFs)
> Bayes Factor (Order-Restriction)
> 
> Hypothesis                                       P(Prior) P(Posterior)       BF
> Petal.Length > 0                                     0.50            1     2.02
> (Speciesversicolor > 0) & (Speciesvirginica > 0)     0.24            0 0.00e+00
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

We can see that the novice botanist’s hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction.

What about our expert botanist? He seems to have failed miserably, with a BF favoring the unrestricted model many many times over. How is this possible? It seems that when controlling for petal length, versicolor and virginica actually have shorter sepals!

Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so:

\[ BF_{\text{restricted vs. NULL}} = \frac {BF_{\text{restricted vs. un-restricted}}} {BF_{\text{un-restricted vs NULL}}} \]

Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (a priori) hypotheses, and should not be used for any post hoc comparisons (Morey, 2015).

NOTE: See the Specifying Correct Priors for Factors with More Than 2 Levels appendix below.

Bayesian Model Averaging

In the previous section, we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider, or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases, we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models.

Inclusion Bayes factors

Inclusion Bayes factors answer the question:

Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?

In other words, on average, are models with predictor \(X\) more likely to have produced the observed data than models without predictor \(X\)?5

Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the prior inclusion probability), and of all models that do not include that predictor (the prior exclusion probability). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models’ probabilities to obtain the posterior inclusion probability and the posterior exclusion probability. Once again, the change from prior inclusion odds to the posterior inclusion odds is the Inclusion Bayes factor [“\(BF_{Inclusion}\)”; Clyde, Ghosh, & Littman (2011)].

Lets use the brms example from above:

bayesfactor_inclusion(comparison)
> Inclusion Bayes Factors (Model Averaged)
> 
>                      P(prior) P(posterior) Inclusion BF
> Petal.Length             0.60         1.00     1.91e+25
> Species                  0.60         1.00     1.25e+09
> Petal.Length:Species     0.20         0.04        0.171
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

If we examine the interaction term’s inclusion Bayes factor, we can see that across all 5 models, a model with the term is on average (1/0.171) 5.84 times less supported than a model without the term. Note that Species, a factor represented in the model with several parameters, gets a single Bayes factor - inclusion Bayes factors are given per predictor!

We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effects from which the interaction predictor is comprised (see explanation for why you might want to do this here).

bayesfactor_inclusion(comparison, match_models = TRUE)
> Inclusion Bayes Factors (Model Averaged)
> 
>                      P(prior) P(posterior) Inclusion BF
> Petal.Length             0.40         0.96     2.74e+25
> Species                  0.40         0.96     1.80e+09
> Petal.Length:Species     0.20         0.04        0.043
> 
> * Compared among: matched models only
> *    Priors odds: uniform-equal

Comparison with JASP

bayesfactor_inclusion() is meant to provide Bayes Factors per predictor, similar to JASP’s Effects option.

Let’s compare the two:

  1. Across all models:
library(BayesFactor)
data(ToothGrowth)
ToothGrowth$dose <- as.factor(ToothGrowth$dose)

BF_ToothGrowth <- anovaBF(len ~ dose * supp, ToothGrowth, progress = FALSE)

bayesfactor_inclusion(BF_ToothGrowth)
> Inclusion Bayes Factors (Model Averaged)
> 
>           P(prior) P(posterior) Inclusion BF
> supp          0.60         1.00       140.99
> dose          0.60         1.00     3.21e+14
> dose:supp     0.20         0.72        10.12
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

  1. Across matched models:
bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE)
> Inclusion Bayes Factors (Model Averaged)
> 
>           P(prior) P(posterior) Inclusion BF
> supp          0.40         0.28        59.19
> dose          0.40         0.28     1.36e+14
> dose:supp     0.20         0.72         2.57
> 
> * Compared among: matched models only
> *    Priors odds: uniform-equal

  1. With Nuisance Effects:

We’ll add dose to the null model in JASP, and do the same in R:

BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4] / BF_ToothGrowth[2] # OR:
# update(bayesfactor_models(BF_ToothGrowth),
#        subset = c(4, 5),
#        reference = 3)
BF_ToothGrowth_against_dose
> Bayes factor analysis
> --------------
> [1] supp + dose             : 59  ±4.5%
> [2] supp + dose + supp:dose : 152 ±1.5%
> 
> Against denominator:
>   len ~ dose 
> ---
> Bayes factor type: BFlinearModel, JZS
bayesfactor_inclusion(BF_ToothGrowth_against_dose)
> Inclusion Bayes Factors (Model Averaged)
> 
>           P(prior) P(posterior) Inclusion BF
> dose          1.00         1.00             
> supp          0.67         1.00       105.74
> dose:supp     0.33         0.72         5.06
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

Averaging posteriors

Similar to how we can average evidence for a predictor across models, we can also average the posterior estimate across models. This is useful in situations where Bayes factors seem to support a null effect, yet the HDI for the alternative excludes the null value (also see si() described above).

For example, looking at Motor Trend Car Road Tests (data(mtcars)), we would naturally predict miles/gallon (mpg) from transition type (am) and weight (wt), but what about number of carburetors (carb)? Is this a good predictor?

We can determine this by comparing the following models:

mod <- stan_glm(mpg ~ wt + am,
  data = mtcars,
  prior = normal(0, c(10, 10), autoscale = FALSE),
  diagnostic_file = file.path(tempdir(), "df1.csv"),
  refresh = 0
)

mod_carb <- stan_glm(mpg ~ wt + am + carb,
  data = mtcars,
  prior = normal(0, c(10, 10, 20), autoscale = FALSE),
  diagnostic_file = file.path(tempdir(), "df0.csv"),
  refresh = 0
)

BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE)
BF_carb
> Bayes Factors for Model Comparison
> 
>     Model             BF
> [1] wt + am + carb 0.811
> 
> * Against Denominator: [2] wt + am
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

It seems that the model without carb as a predictor is \(1/BF=1.2\) times more likely than the model with carb as a predictor. We might then assume that in the latter model, the HDI will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case:

hdi(mod_carb, ci = .95)
> Highest Density Interval
> 
> Parameter   |        95% HDI
> ----------------------------
> (Intercept) | [28.10, 40.09]
> wt          | [-5.48, -1.72]
> am          | [-0.81,  5.86]
> carb        | [-2.04, -0.38]

How can this be? By estimating the HDI of the effect for carb in the full model, we are acting under the assumption that this model is correct. However, as we’ve just seen, both models are practically tied. If this is the case why limit our estimation of the effect just to one model? (Bergh, Haaf, Ly, Rouder, & Wagenmakers, 2019).

Using Bayesian Model Averaging, we can combine the posteriors samples from several models, weighted by the models’ marginal likelihood (done via the bayesfactor_models() function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI.

In bayestestR, we can do this with the weighted_posteriors() function:

BMA_draws <- weighted_posteriors(mod, mod_carb)

BMA_hdi <- hdi(BMA_draws, ci = .95)
BMA_hdi
> Highest Density Interval
> 
> Parameter   |        95% HDI
> ----------------------------
> (Intercept) | [29.03, 42.43]
> wt          | [-6.66, -2.14]
> am          | [-2.80,  5.04]
> carb        | [-1.69,  0.00]
plot(BMA_hdi)

We can see that across both models under consideration, the posterior of the carb effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now the HDI does contain 0. Thus we have resolved the conflict between the Bayes factor and the HDI (Rouder, Haaf, & Vandekerckhove, 2018)!

Note: Parameters might play different roles across different models.

For example, the parameter A plays a different role in the model Y ~ A + B (where it is a main effect) than it does in the model Y ~ A + B + A:B (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via contr.sum or orthonormal coding via contr.orthonorm for factors) can in some cases reduce this issue.

Appendices

Testing contrasts (with emmeans / modelbased)

Besides testing parameter bayesfactor_parameters() can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of bayesfactor_parameters() + emmeans to test Bayesian contrasts.

For example, in the sleep example from above, we can estimate the group means and the difference between them:

library(emmeans)

(group_diff <- emmeans(model, pairwise ~ group))
> $emmeans
>  group emmean lower.HPD upper.HPD
>  1       0.79     -0.48       2.0
>  2       2.28      1.00       3.5
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95 
> 
> $contrasts
>  contrast estimate lower.HPD upper.HPD
>  1 - 2       -1.47      -3.2     0.223
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95
# pass the original model via prior
bayesfactor_parameters(group_diff, prior = model)
> Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |    BF
> -----------------
> 1         | 0.287
> 2         | 21.37
> 1 - 2     |  1.26
> 
> * Evidence Against The Null: 0

That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way!

We can also use the easystatsmodelbased package to compute Bayes factors for contrasts:

library(modelbased)

estimate_contrasts(model, test = "bf", bf_prior = model)

NOTE: See the Specifying Correct Priors for Factors with More Than 2 Levels section below.

Specifying correct priors for factors

This section introduces the biased priors obtained when using the common effects factor coding (contr.sum) or dummy factor coding (contr.treatment), and the solution of using orthonormal factor coding (contr.orthonorm) (as outlined in Rouder, Morey, Speckman, & Province, 2012, sec. 7.2).

Special care should be taken when working with factors with 3 or more levels.

Contrasts (and marginal means)

The effects factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all a priori differences to have the same distribution, but…

For our example, we will be test all prior pairwise differences between the 3 species in the iris dataset.

df <- iris
contrasts(df$Species) <- contr.sum

fit_sum <- stan_glm(Sepal.Length ~ Species,
  data = df,
  prior = normal(0, c(1, 1), autoscale = FALSE),
  prior_PD = TRUE, # sample priors
  family = gaussian(),
  refresh = 0
)

(pairs_sum <- pairs(emmeans(fit_sum, ~Species)))
>  contrast               estimate lower.HPD upper.HPD
>  setosa - versicolor      -0.017      -2.8       2.7
>  setosa - virginica       -0.027      -4.0       4.6
>  versicolor - virginica    0.001      -4.2       4.5
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95
ggplot(stack(insight::get_parameters(pairs_sum)), aes(x = values, fill = ind)) +
  geom_density(size = 1) +
  facet_grid(ind ~ .) +
  labs(x = "prior difference values") +
  theme(legend.position = "none")

Notice that, though the prior estimate for all 3 pairwise contrasts is ~0, the scale or the HDI is much narrower for the prior of the setosa - versicolor contrast!

What happened???

This is caused by an inherent bias in the priors introduced by the effects coding (it’s even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect’s parameters). And since it affects the priors, this bias will also bias the Bayes factors over / understating evidence for some contrasts over others!

The solution is to use orthonormal factor coding, a-la contr.orthonorm, which can either specify this factor coding per-factor:

contrasts(df$Species) <- contr.orthonorm

Or you can set it globally:

options(contrasts = c("contr.orthonorm", "contr.poly"))

Let’s again estimate the prior differences:

fit_bayes <- stan_glm(Sepal.Length ~ Species,
  data = df,
  prior = normal(0, c(1, 1), autoscale = FALSE),
  prior_PD = TRUE, # sample priors
  family = gaussian(),
  refresh = 0
)

(pairs_bayes <- pairs(emmeans(fit_bayes, ~Species)))
>  contrast               estimate lower.HPD upper.HPD
>  setosa - versicolor       0.000     -2.98      2.67
>  setosa - virginica        0.032     -2.73      2.81
>  versicolor - virginica    0.003     -2.91      2.67
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95
ggplot(stack(insight::get_parameters(pairs_bayes)), aes(x = values, fill = ind)) +
  geom_density(size = 1) +
  facet_grid(ind ~ .) +
  labs(x = "prior difference values") +
  theme(legend.position = "none")

We can see that using this coding scheme, we have equal priors on all pairwise contrasts.

There are other solutions to this problem of priors. You can read about them in Solomon Kurz’s blog post.

Order restrictions

This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the a priori probability of obtaining the order A > C > B is 1/6 (reach back to intro to stats year 1), but…

For our example, we will be interested in the following order restrictions in the iris dataset (each line is a separate restriction):

hyp <- c(
  # comparing 2 levels
  "setosa < versicolor",
  "setosa < virginica",
  "versicolor < virginica",

  # comparing 3 (or more) levels
  "setosa    < virginica  & virginica  < versicolor",
  "virginica < setosa     & setosa     < versicolor",
  "setosa    < versicolor & versicolor < virginica"
)

With the default factor coding, this looks like this:

contrasts(df$Species) <- contr.sum
fit_sum <- stan_glm(Sepal.Length ~ Species,
  data = df,
  prior = normal(0, c(1, 1), autoscale = FALSE),
  family = gaussian()
)

em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means

bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp)
> Bayes Factor (Order-Restriction)
> 
> Hypothesis                                       P(Prior) P(Posterior)       BF
> setosa < versicolor                                  0.51            1     1.97
> setosa < virginica                                   0.49            1     2.02
> versicolor < virginica                               0.49            1     2.03
> setosa    < virginica  & virginica  < versicolor     0.11            0 0.00e+00
> virginica < setosa     & setosa     < versicolor     0.20            0 0.00e+00
> setosa    < versicolor & versicolor < virginica      0.20            1     5.09
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

What happened???

  1. The comparison of 2 levels all have a prior of ~0.5, as expected.
  2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. some orders are a priori more likely than others!!!

Again, this is solved by using the orthonormal factor coding (from above).

contrasts(df$Species) <- contr.orthonorm
fit_bayes <- stan_glm(Sepal.Length ~ Species,
  data = df,
  prior = normal(0, c(1, 1), autoscale = FALSE),
  family = gaussian()
)
em_bayes <- emmeans(fit_sum, ~Species) # the posterior marginal means
bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp)
> Bayes Factor (Order-Restriction)
> 
> Hypothesis                                       P(Prior) P(Posterior)       BF
> setosa < versicolor                                  0.49            1     2.06
> setosa < virginica                                   0.49            1     2.03
> versicolor < virginica                               0.51            1     1.96
> setosa    < virginica  & virginica  < versicolor     0.17            0 0.00e+00
> virginica < setosa     & setosa     < versicolor     0.16            0 0.00e+00
> setosa    < versicolor & versicolor < virginica      0.16            1     6.11
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

Conclusion

When comparing the results from the two factor coding schemes, we find:
1. In both cases, the estimated (posterior) means are quite similar (if not identical).
2. The priors and Bayes factors differ between the two schemes.
3. Only with contr.orthonorm, the prior distribution of the difference or the order of 3 (or more) means is balanced.

References

Bergh, D. van den, Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E.-J. (2019). A cautionary note on estimating effect size.
Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80–101.
Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.
Morey, R. D. (2015). Multiple comparisons with BayesFactor, part 2 – order restrictions. Retrieved from http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html
Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological Methods, 16(4), 406.
Morey, R. D., & Wagenmakers, E.-J. (2014). Simple relation between bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121–124.
Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part IV: Parameter estimation and bayes factors. Psychonomic Bulletin & Review, 25(1), 102–113.
Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). Default bayes factors for ANOVA designs. Journal of Mathematical Psychology, 56(5), 356–374.
Wagenmakers, E.-J. (2007). A practical solution to the pervasive problems ofp values. Psychonomic Bulletin & Review, 14(5), 779–804.
Wagenmakers, E.-J., Gronau, Q. F., Dablander, F., & Etz, A. (2018). The support interval. https://doi.org/10.31234/osf.io/zwnxb
Wagenmakers, E.-J., Lodewyckx, T., Kuriyal, H., & Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the savage–dickey method. Cognitive Psychology, 60(3), 158–189.

  1. Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.↩︎

  2. For more, see this talk by Richard D. Morey, minute 48↩︎

  3. Robert, 2016; Kass & Raftery, 1993; Fernández, Ley, & Steel, 2001↩︎

  4. Gronau, Singmann, & Wagenmakers, 2017↩︎

  5. A model without predictor \(X\) can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.↩︎

bayestestR/inst/doc/credible_interval.html0000644000175000017500000016230414135670657020642 0ustar nileshnilesh Credible Intervals (CI)

Credible Intervals (CI)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

What is a Credible Interval?

Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise the uncertainty related to the unknown parameters you are trying to estimate. In this regard, it could appear as quite similar to the frequentist Confidence Intervals. However, while their goal is similar, their statistical definition and meaning is very different. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute.

As the Bayesian inference returns a distribution of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95% credible interval is simply the central portion of the posterior distribution that contains 95% of the values.

Note how this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say “given the observed data, the effect has 95% probability of falling within this range”, compared to the less straightforward, frequentist alternative (the 95% Confidence* Interval) would be “there is a 95% probability that when computing a confidence interval from data of this sort, the effect falls within this range.”

89% vs. 95% CI

Using 89% is another popular choice, and used to be the default for a long time (read here the story of the change). How did it start?

Naturally, when it came about choosing the CI level to report by default, people started using 95%, the arbitrary convention used in the frequentist world. However, some authors suggested that 95% might not be the most appropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn (Kruschke, 2014).

The proposition was to use 90% instead of 95%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary thresholds in the first place, why not use 89%? Moreover, 89 is the highest prime number that does not exceed the already unstable 95% threshold. What does it have to do with anything? Nothing, but it reminds us of the total arbitrariness of these conventions (McElreath, 2018).

Thus, CIs computed with 89% intervals (ci = 0.89), are deemed to be more stable than, for instance, 95% intervals (Kruschke, 2014). An effective sample size (ESS; see here) of at least 10.000 is recommended if one wants to compute precise 95% intervals (Kruschke, 2014, p. 183ff). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., rstanarm or brms) is only 4.000 (thus, you might want to increase it when fitting your model).

However, 95% has some advantages too. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the 95% CI than for lower ranges such as 89%), which is a good thing in the context of the reproducibility crisis.

To add to the mess, some other software use different default, such as for instance 90%. Ultimately, you as the user should make an informed decision, based on your needs and goals, and justify your choice.

Different types of CIs

The reader might notice that bayestestR provides two methods to compute credible intervals, the Highest Density Interval (HDI) (hdi()) and the Equal-tailed Interval (ETI) (eti()). These methods can also be changed via the method argument of the ci() function. What is the difference? Let’s see:

library(bayestestR)
library(dplyr)
library(ggplot2)

# Generate a normal distribution
posterior <- distribution_normal(1000)

# Compute HDI and ETI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")

# Plot the distribution and add the limits of the two CIs
posterior %>% 
  estimate_density(extend=TRUE) %>% 
  ggplot(aes(x = x, y = y)) +
  geom_area(fill = "orange") +
  theme_classic() +
  # HDI in blue
  geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) +
  geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) +
  # Quantile in red
  geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) +
  geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1)

These are exactly the same…

But is it also the case for other types of distributions?

# Generate a beta distribution
posterior <- distribution_beta(1000, 6, 2)

# Compute HDI and Quantile CI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")

# Plot the distribution and add the limits of the two CIs
posterior %>% 
  estimate_density(extend = TRUE) %>% 
  ggplot(aes(x = x, y = y)) +
  geom_area(fill = "orange") +
  theme_classic() +
  # HDI in blue
  geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) +
  geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) +
  # ETI in red
  geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) +
  geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1)

The difference is strong with this one.

Contrary to the HDI, for which all points within the interval have a higher probability density than points outside the interval, the ETI is equal-tailed. This means that a 90% interval has 5% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results.

This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution.

On the other hand, the ETI range does not change when transformations are applied to the distribution (for instance, for log-odds to probabilities transformation): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. Thus, for instance, if exponentiated credible intervals are required, it is recommended to calculate the ETI.

The Support Interval

Unlike the HDI and the ETI, which look at the posterior distribution, the Support Interval (SI) provides information regarding the change in the credibility of values from the prior to the posterior - in other words, it indicates which values of a parameter have gained support by the observed data by some factor greater or equal to k (Wagenmakers, Gronau, Dablander, & Etz, 2018).

prior <- distribution_normal(1000, mean = 0, sd = 1)
posterior <- distribution_normal(1000, mean = .5, sd = .3)

si_1 <- si(posterior, prior, BF = 1)
si_3 <- si(posterior, prior, BF = 3)

ggplot(mapping = aes(x = x, y = y)) +
  theme_classic() +
  # The posterior
  geom_area(fill = "orange",
            data = estimate_density(posterior, extend = TRUE)) +
  # The prior
  geom_area(color = "black", fill = NA, size = 1, linetype = "dashed",
            data = estimate_density(prior, extend = TRUE)) +
  # BF = 1 SI in blue
  geom_vline(xintercept = si_1$CI_low, color = "royalblue", size = 1) +
  geom_vline(xintercept = si_1$CI_high, color = "royalblue", size = 1) +
  # BF = 3 SI in red
  geom_vline(xintercept = si_3$CI_low, color = "red", size = 1) +
  geom_vline(xintercept = si_3$CI_high, color = "red", size = 1)

Between the blue lines are values that received some support by the data (this is a \(BF = 1~SI\)), while between the red lines are values that received at least moderate support (\(BF = 3~SI\)) by the data.

From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the Support Interval will yield a Bayes factor smaller than 1/BF.

References

Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, JAGS, and stan. Academic Press.
McElreath, R. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.
Wagenmakers, E.-J., Gronau, Q. F., Dablander, F., & Etz, A. (2018). The support interval. https://doi.org/10.31234/osf.io/zwnxb
bayestestR/inst/doc/bayestestR.html0000644000175000017500000113554714135670656017323 0ustar nileshnilesh Get Started with Bayesian Analysis

Get Started with Bayesian Analysis

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Why use the Bayesian Framework?

The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards open and honest science. Reasons to prefer this approach are:

  • reliability (Etz & Vandekerckhove, 2016)
  • accuracy (in noisy data and small samples) (Kruschke, Aguinis, & Joo, 2012)
  • the possibility of introducing prior knowledge into the analysis (Andrews & Baguley, 2013; Kruschke et al., 2012)
  • critically, intuitive nature of results and their straightforward interpretation (Kruschke, 2010; Wagenmakers et al., 2018)

In general, the frequentist approach has been associated with the focus on the null hypothesis testing, and the misuse of p-values has been shown to critically contribute to the reproducibility crisis in social and psychological sciences (Chambers, Feredoes, Muthukumaraswamy, & Etchells, 2014; Szucs & Ioannidis, 2016). There is an emerging consensus that the generalization of the Bayesian approach is one way of overcoming these issues (Benjamin et al., 2018; Etz & Vandekerckhove, 2016).

Once we agree that the Bayesian framework is the right way to go, you might wonder what exactly is this framework.

What’s all the fuss about?

What is the Bayesian Framework?

Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (t-tests, correlations, ANOVAs, regressions, etc.) can be achieved using the Bayesian framework. The key difference is that in the frequentist framework (the “classical” approach to statistics, with p and t values, as well as some weird degrees of freedom), the effects are fixed (but unknown) and data are random. In other words, it assumes that the unknown parameter has a unique value that we are trying to estimate/guess using our sample data. On the other hand, in the Bayesian framework, instead of estimating the “true effect,” the probability of different effects given the observed data is computed, resulting in a distribution of possible values for the parameters, called the posterior distribution.

The uncertainty in Bayesian inference can be summarized, for instance, by the median of the distribution, as well as a range of values of the posterior distribution that includes the 95% most probable values (the 95% credible interval). Cum grano salis, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say “given the observed data, the effect has 95% probability of falling within this range”, while the frequentist (less intuitive) alternative would be “when repeatedly computing confidence intervals from data of this sort, there is a 95% probability that the effect falls within a given range”. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (the posterior) of an effect that is compatible with the observed data. Thus, an effect can be described by characterizing its posterior distribution in relation to its centrality (point-estimates), uncertainty, as well as its existence and significance

In other words, putting the maths behind it aside for a moment, we can say that:

  • The frequentist approach tries to estimate the real effect. For instance, the “real” value of the correlation between x and y. Hence, the frequentist models return a point-estimate (i.e., a single value and not a distribution) of the “real” correlation (e.g., \(r = 0.42\)) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a “parent,” usually normal distribution).

  • The Bayesian framework assumes no such thing. The data are what they are. Based on the observed data (and a prior belief about the result), the Bayesian sampling algorithm (MCMC sampling is one example) returns a probability distribution (called the posterior) of the effect that is compatible with the observed data. For the correlation between x and y, it will return a distribution that says, for example, “the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74 with certain probabilities.”

  • To characterize statistical significance of our effects, we do not need p-values, or any other such indices. We simply describe the posterior distribution of the effect. For example, we can report the median, the 89% Credible Interval or other indices.

Accurate depiction of a regular Bayesian user estimating a credible interval.

Accurate depiction of a regular Bayesian user estimating a credible interval.

Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance this thread). As always, the world is not black and white (p < .001).

So… how does it work?

A simple example

bayestestR installation

You can install bayestestR along with the whole easystats suite by running the following:

install.packages("remotes")
remotes::install_github("easystats/easystats")

Let’s also install and load the rstanarm, that allows fitting Bayesian models, as well as bayestestR, to describe them.

install.packages("rstanarm")
library(rstanarm)

Traditional linear regression

Let’s start by fitting a simple frequentist linear regression (the lm() function stands for linear model) between two numeric variables, Sepal.Length and Petal.Length from the famous iris dataset, included by default in R.

model <- lm(Sepal.Length ~ Petal.Length, data = iris)
summary(model)

Call:
lm(formula = Sepal.Length ~ Petal.Length, data = iris)

Residuals:
    Min      1Q  Median      3Q     Max 
-1.2468 -0.2966 -0.0152  0.2768  1.0027 

Coefficients:
             Estimate Std. Error t value Pr(>|t|)    
(Intercept)    4.3066     0.0784    54.9   <2e-16 ***
Petal.Length   0.4089     0.0189    21.6   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.41 on 148 degrees of freedom
Multiple R-squared:  0.76,  Adjusted R-squared:  0.758 
F-statistic:  469 on 1 and 148 DF,  p-value: <2e-16

This analysis suggests that there is a statistically significant (whatever that means) and positive (with a coefficient of 0.41) linear relationship between the two variables.

Fitting and interpreting the frequentist models is so easy that it is obvious that people use it instead of the Bayesian framework… right?

Not anymore.

Bayesian linear regression

model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris)
posteriors <- describe_posterior(model)
# for a nicer table
print_md(posteriors, digits = 2)
Summary of Posterior Distribution
Parameter Median 95% CI pd ROPE % in ROPE Rhat ESS
(Intercept) 4.30 [4.15, 4.46] 100% [-0.08, 0.08] 0% 1.000 4057.00
Petal.Length 0.41 [0.37, 0.45] 100% [-0.08, 0.08] 0% 1.000 4115.00

That’s it!

You just fitted a Bayesian version of the model by simply using the stan_glm() function instead of lm() and described the posterior distributions of the parameters!

The conclusion we draw, for this example, are very similar. The effect (the median of the effect’s posterior distribution) is about 0.41, and it can be also be considered as significant in the Bayesian sense (more on that later).

So, ready to learn more?

Check out the next tutorial!

And, if you want even more, you can check out other articles describing all the functionality the package has to offer!

https://easystats.github.io/bayestestR/articles/

References

Andrews, M., & Baguley, T. (2013). Prior approval: The growth of bayesian methods in psychology. British Journal of Mathematical and Statistical Psychology, 66(1), 1–7.
Benjamin, D. J., Berger, J. O., Johannesson, M., Nosek, B. A., Wagenmakers, E.-J., Berk, R., … others. (2018). Redefine statistical significance. Nature Human Behaviour, 2(1), 6.
Chambers, C. D., Feredoes, E., Muthukumaraswamy, S. D., & Etchells, P. (2014). Instead of ’playing the game’ it is time to change the rules: Registered reports at AIMS neuroscience and beyond. AIMS Neuroscience, 1(1), 4–17.
Etz, A., & Vandekerckhove, J. (2016). A bayesian perspective on the reproducibility project: psychology. PloS One, 11(2), e0149794.
Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.
Kruschke, J. K., Aguinis, H., & Joo, H. (2012). The time has come: Bayesian methods for data analysis in the organizational sciences. Organizational Research Methods, 15(4), 722–752.
Szucs, D., & Ioannidis, J. P. (2016). Empirical assessment of published effect sizes and power in the recent cognitive neuroscience and psychology literature. BioRxiv, 071530.
Wagenmakers, E.-J., Marsman, M., Jamil, T., Ly, A., Verhagen, J., Love, J., … others. (2018). Bayesian inference for psychology. Part i: Theoretical advantages and practical ramifications. Psychonomic Bulletin & Review, 25(1), 35–57.
bayestestR/inst/doc/guidelines.html0000644000175000017500000110635714135670667017325 0ustar nileshnilesh Reporting Guidelines

Reporting Guidelines

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

Reporting Guidelines

How to describe and report the parameters of a model

A Bayesian analysis returns a posterior distribution for each parameter (or effect). To minimally describe these distributions, we recommend reporting a point-estimate of centrality as well as information characterizing the estimation uncertainty (the dispersion). Additionally, one can also report indices of effect existence and/or significance.

Based on the previous comparison of point-estimates and indices of effect existence, we can draw the following recommendations.

Centrality

We suggest reporting the median as an index of centrality, as it is more robust compared to the mean or the MAP estimate. However, in case of a severely skewed posterior distribution, the MAP estimate could be a good alternative.

Uncertainty

The 95% or 89% Credible Intervals (CI) are two reasonable ranges to characterize the uncertainty related to the estimation (see here for a discussion about the differences between these two values). We also recommend computing the CIs based on the HDI rather than quantiles, favouring probable over central values.

Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see here).

Existence

Reviewer 2 (circa a long time ago in a galaxy far away).

Reviewer 2 (circa a long time ago in a galaxy far away).

The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect existence and significance. The most straightforward index to describe existence of an effect is the Probability of Direction (pd), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics, and independent from the scale of the data.

Moreover, it is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A two-sided p-value of respectively .1, .05, .01 and .001 correspond approximately to a pd* of 95%, 97.5%, 99.5% and 99.95%.

Thus, for convenience, we suggest the following reference values as an interpretation helpers:

  • pd <= 95% ~ p > .1: uncertain
  • pd > 95% ~ p < .1: possibly existing
  • pd > 97%: likely existing
  • pd > 99%: probably existing
  • pd > 99.9%: certainly existing

Significance

The percentage in ROPE is a index of significance (in its primary meaning), informing us whether a parameter is related or not to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the percentage of the full posterior distribution (the full ROPE) instead of a given proportion of CI in the ROPE, which appears to be more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original equivalence test, we recommend using the percentage as a continuous index of significance. However, based on simulation data, we suggest the following reference values as an interpretation helpers:

  • > 99% in ROPE: negligible (we can accept the null hypothesis)
  • > 97.5% in ROPE: probably negligible
  • <= 97.5% & >= 2.5% in ROPE: undecided significance
  • < 2.5% in ROPE: probably significant
  • < 1% in ROPE: significant (we can reject the null hypothesis)

Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see here).

Template Sentence

Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be:

“the effect of X has a probability of pd of being negative (Median = median, 89% CI [ HDIlow , HDIhigh ] and can be considered as significant (ROPE% in ROPE).”

How to compare different models

Although it can also be used to assess effect existence and significance, the Bayes factor (BF) is a versatile index that can be used to directly compare different models (or data generation processes). The Bayes factor is a ratio that informs us by how much more (or less) likely the observed data are under two compared models - usually a model with versus a model without the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., 0) or an interval), the Bayes factor could be used both in the context of effect existence and significance.

In general, a Bayes factor greater than 1 is taken as evidence in favour of one of the model (in the nominator), and a Bayes factor smaller than 1 is taken as evidence in favour of the other model (in the denominator). Several rules of thumb exist to help the interpretation (see here), with > 3 being one common threshold to categorize non-anecdotal evidence.

Template Sentence

When reporting Bayes factors (BF), one can use the following sentence:

“There is moderate evidence in favour of an absence of effect of x (BF = BF).”

Suggestions

If you have any advice, opinion or such, we encourage you to let us know by opening an discussion thread or making a pull request.

bayestestR/inst/doc/example2.Rmd0000644000175000017500000002736314054322056016450 0ustar nileshnilesh--- title: "2. Confirmation of Bayesian skills" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{2. Confirmation of Bayesian skills} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r , include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) ``` Now that [**describing and understanding posterior distributions**](https://easystats.github.io/bayestestR/articles/example1.html) of linear regressions is not that mysterious to you, we will take one step back and study some simpler models: **correlations** and ***t*-tests**. But before we do that, let us take a moment to remind ourselves and appreciate the fact that **all basic statistical procedures** such as correlations, *t*-tests, ANOVAs, or chi-square tests **are** linear regressions (we strongly recommend [this excellent demonstration](https://lindeloev.github.io/tests-as-linear/)). Nevertheless, these simple models will provide a good pretext to introduce a few more complex indices, such as the **Bayes factor**. ## Correlations ### Frequentist version Once again, let us begin with a **frequentist correlation** between two continuous variables, the **width** and the **length** of the sepals of some flowers. The data is available in `R` as the `iris` dataset (the same that was used in the [previous tutorial](https://easystats.github.io/bayestestR/articles/example1.html)). We will compute a Pearson's correlation test, store the results in an object called `result`, and then display it: ```{r} result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ``` As you can see in the output, the test actually compared **two** hypotheses: - the **null hypothesis** (*h0*; no correlation), - the **alternative hypothesis** (*h1*; a non-null correlation). Based on the *p*-value, the null hypothesis cannot be rejected: the correlation between the two variables is **negative but non-significant** ($r = -.12, p > .05$). ### Bayesian correlation To compute a Bayesian correlation test, we will need the [`BayesFactor`](https://richarddmorey.github.io/BayesFactor/) package (you can install it by running `install.packages("BayesFactor")`). We can then load this package, compute the correlation using the `correlationBF()` function, and store the result. ```{r, results='hide'} library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ``` Now, let us run our `describe_posterior()` function on that: ```{r } describe_posterior(result) ``` We see again many things here, but the important indices for now are the **median** of the posterior distribution, `-.11`. This is (again) quite close to the frequentist correlation. We could, as previously, describe the [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html), the [**pd**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) or the [**ROPE percentage**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), but we will focus here on another index provided by the Bayesian framework, the **Bayes Factor (BF)**. ### Bayes Factor (BF) We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an alternative one (presence of an effect). The [**Bayes factor (BF)**](https://easystats.github.io/bayestestR/articles/bayes_factors.html) allows the same comparison and determines **under which of these two models the observed data are more probable**: a model with the effect of interest, and a null model without the effect of interest. So, in the context of our correlation example, the null hypothesis would be no correlation between the two variables ($h0: \rho = 0$; where $\rho$ stands for Bayesian correlation coefficient), while the alternative hypothesis would be that there is a correlation **different** than 0 - positive or negative ($h1: \rho \neq 0$). We can use `bayesfactor()` to specifically compute the Bayes factor comparing those models: ```{r} bayesfactor(result) ``` We got a *BF* of `0.51`. What does it mean? Bayes factors are **continuous measures of *relative* evidence**, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as *the numerator*), and a Bayes factor smaller than 1 giving evidence in favour of the other model (*the denominator*). > **Yes, you heard that right, evidence in favour of the *null*!** That's one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the ***p*-value can only be used to reject *h0***, but not *accept* it. With the **Bayes factor**, you can measure **evidence against - and in favour of - the null**. In other words, in the frequentist framework, if the *p*-value is not significant, we can conclude that **evidence for the effect is absent**, but not that there is **evidence for the absence of the effect**. In Bayesian framework, we can do the latter. This is important since sometimes our hypotheses are about no effect. BFs representing evidence for the alternative against the null can be reversed using $BF_{01}=1/BF_{10}$ (the *01* and *10* correspond to *h0* against *h1* and *h1* against *h0*, respectively) to provide evidence of the null against the alternative. This improves human readability^[If the effect is really strong, the BF values can be extremely high. So don't be surprised if you see BF values that have been log-transformed to make them more human readable.] in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null). In our case, `BF = 1/0.51 = 2`, indicates that the data are **2 times more probable under the null compared to the alternative hypothesis**, which, though favouring the null, is considered only [anecdotal evidence against the null](https://easystats.github.io/effectsize/reference/interpret_bf.html). We can thus conclude that there is **anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51)**, which is a much more informative statement that what we can do with frequentist statistics. **And that's not all!** ### Visualise the Bayes factor In general, **pie charts are an absolute no-go in data visualisation**, as our brain's perceptive system heavily distorts the information presented in such way^[An exception would be when the pie slices are well-labeled so that our brain's perception system does not have to do the decoding work.]. Nevertheless, there is one exception: pizza charts. It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise. ```{r echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great blog.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ``` Such "pizza plots" can be directly created through the [`see`](https://github.com/easystats/see) visualisation companion package for `easystats` (you can install it by running `install.packages("see")`): ```{r } library(see) plot(bayesfactor(result)) + scale_fill_pizza() ``` So, after seeing this pizza, how much would you be surprised by the outcome of a blinded poke? ## *t*-tests > **"I know that I know nothing, and especially not if *versicolor* and *virginica* differ in terms of their Sepal.Width" - Socrates**. Time to finally answer this crucial question! ### Versicolor *vs.* virginica Bayesian *t*-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the `Species` factor, *versicolor* and *virginica*. We will start by filtering out from `iris` the non-relevant observations corresponding to the *setosa* specie, and we will then visualise the observations and the distribution of the `Sepal.Width` variable. ```{r } library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ``` It *seems* (visually) that *virgnica* flowers have, on average, a slightly higer width of sepals. Let's assess this difference statistically by using the `ttestBF()` function in the `BayesFactor` package. ### Compute the Bayesian *t*-test ```{r} result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ``` From the indices, we can say that the difference of `Sepal.Width` between *virginica* and *versicolor* has a probability of **100% of being negative** [*from the pd and the sign of the median*] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a **strong evidence against the null hypothesis** (BF = 18). Keep that in mind as we will see another way of investigating this question. ## Logistic Model A hypothesis for which one uses a *t*-test can also be tested using a binomial model (*e.g.*, a **logistic model**). Indeed, it is possible to reformulate the following hypothesis, "*there is an important difference in this variable between the two groups*" with the hypothesis "*this variable is able to discriminate between (or classify) the two groups*". However, these models are much more powerful than a *t*-test. In the case of the difference of `Sepal.Width` between *virginica* and *versicolor*, the question becomes, *how well can we classify the two species using only* `Sepal.Width`. ### Fit the model ```{r} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ``` ### Visualise the model Using the [`modelbased`](https://github.com/easystats/modelbased) package. ```{r} library(modelbased) vizdata <- estimate_relation(model) ggplot(vizdata, aes(x = Sepal.Width, y = Predicted)) + geom_ribbon(aes(ymin = CI_low, ymax = CI_high), alpha = 0.5) + geom_line() + ylab("Probability of being virginica") + theme_modern() ``` ### Performance and Parameters Once again, we can extract all indices of interest for the posterior distribution using our old pal `describe_posterior()`. ```{r} describe_posterior(model, test = c("pd", "ROPE", "BF")) ``` ```{r} library(performance) model_performance(model) ``` ### Visualise the indices TO DO. ```{r } library(see) plot(rope(result)) ``` ### Diagnostic Indices About diagnostic indices such as Rhat and ESS. bayestestR/inst/doc/example2.R0000644000175000017500000000620014135670665016126 0ustar nileshnilesh## ---- include=FALSE----------------------------------------------------------- if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) ## ----------------------------------------------------------------------------- result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ## ---- results='hide'---------------------------------------------------------- library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ## ----------------------------------------------------------------------------- describe_posterior(result) ## ----------------------------------------------------------------------------- bayesfactor(result) ## ----echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great blog.", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ## ----------------------------------------------------------------------------- library(see) plot(bayesfactor(result)) + scale_fill_pizza() ## ----------------------------------------------------------------------------- library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ## ----------------------------------------------------------------------------- result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ## ----------------------------------------------------------------------------- library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ## ----------------------------------------------------------------------------- library(modelbased) vizdata <- estimate_relation(model) ggplot(vizdata, aes(x = Sepal.Width, y = Predicted)) + geom_ribbon(aes(ymin = CI_low, ymax = CI_high), alpha = 0.5) + geom_line() + ylab("Probability of being virginica") + theme_modern() ## ----------------------------------------------------------------------------- describe_posterior(model, test = c("pd", "ROPE", "BF")) ## ----------------------------------------------------------------------------- library(performance) model_performance(model) ## ----------------------------------------------------------------------------- library(see) plot(rope(result)) bayestestR/inst/doc/mediation.html0000644000175000017500000013543614135670703017134 0ustar nileshnilesh Mediation Analysis using Bayesian Regression Models

Mediation Analysis using Bayesian Regression Models

This vignettes demonstrates the mediation()-function. Before we start, we fit some models, including a mediation-object from the mediation-package and a structural equation modelling approach with the lavaan-package, both of which we use for comparison with brms and rstanarm.

Mediation Analysis in brms and rstanarm

library(bayestestR)
library(mediation)
library(brms)
library(rstanarm)

# load sample data
data(jobs)

set.seed(123)
# linear models, for mediation analysis
b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs)
b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs)

# mediation analysis, for comparison with brms
m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek")
# Fit Bayesian mediation model in brms
f1 <- bf(job_seek ~ treat + econ_hard + sex + age)
f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age)
m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4)
# Fit Bayesian mediation model in rstanarm
m3 <- stan_mvmer(
  list(job_seek ~ treat + econ_hard + sex + age + (1 | occp),
       depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp)),
  data = jobs,
  cores = 4,
  refresh = 0
)

mediation() is a summary function, especially for mediation analysis, i.e. for multivariate response models with casual mediation effects.

In the models m2 and m3, treat is the treatment effect and job_seek is the mediator effect. For the brms model (m2), f1 describes the mediator model and f2 describes the outcome model. This is similar for the rstanarm model.

mediation() returns a data frame with information on the direct effect (median value of posterior samples from treatment of the outcome model), mediator effect (median value of posterior samples from mediator of the outcome model), indirect effect (median value of the multiplication of the posterior samples from mediator of the outcome model and the posterior samples from treatment of the mediation model) and the total effect (median value of sums of posterior samples used for the direct and indirect effect). The proportion mediated is the indirect effect divided by the total effect.

The simplest call just needs the model-object.

# for brms
mediation(m2)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |          95% ETI
#> ----------------------------------------------------
#> Direct Effect (ADE)    |   -0.040 | [-0.124,  0.046]
#> Indirect Effect (ACME) |   -0.015 | [-0.041,  0.008]
#> Mediator Effect        |   -0.240 | [-0.294, -0.185]
#> Total Effect           |   -0.055 | [-0.145,  0.034]
#> 
#> Proportion mediated: 28.14% [-181.46%, 237.75%]

# for rstanarm
mediation(m3)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |          95% ETI
#> ----------------------------------------------------
#> Direct Effect (ADE)    |   -0.040 | [-0.129,  0.048]
#> Indirect Effect (ACME) |   -0.018 | [-0.042,  0.006]
#> Mediator Effect        |   -0.241 | [-0.296, -0.187]
#> Total Effect           |   -0.057 | [-0.151,  0.033]
#> 
#> Proportion mediated: 30.59% [-221.09%, 282.26%]

Typically, mediation() finds the treatment and mediator variables automatically. If this does not work, use the treatment and mediator arguments to specify the related variable names. For all values, the 89% credible intervals are calculated by default. Use ci to calculate a different interval.

Comparison to the mediation package

Here is a comparison with the mediation package. Note that the summary()-output of the mediation package shows the indirect effect first, followed by the direct effect.

summary(m1)
#> 
#> Causal Mediation Analysis 
#> 
#> Quasi-Bayesian Confidence Intervals
#> 
#>                Estimate 95% CI Lower 95% CI Upper p-value
#> ACME            -0.0157      -0.0387         0.01    0.19
#> ADE             -0.0438      -0.1315         0.04    0.35
#> Total Effect    -0.0595      -0.1530         0.02    0.21
#> Prop. Mediated   0.2137      -2.0277         2.70    0.32
#> 
#> Sample Size Used: 899 
#> 
#> 
#> Simulations: 1000

mediation(m2, ci = .95)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |          95% ETI
#> ----------------------------------------------------
#> Direct Effect (ADE)    |   -0.040 | [-0.124,  0.046]
#> Indirect Effect (ACME) |   -0.015 | [-0.041,  0.008]
#> Mediator Effect        |   -0.240 | [-0.294, -0.185]
#> Total Effect           |   -0.055 | [-0.145,  0.034]
#> 
#> Proportion mediated: 28.14% [-181.46%, 237.75%]

mediation(m3, ci = .95)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |          95% ETI
#> ----------------------------------------------------
#> Direct Effect (ADE)    |   -0.040 | [-0.129,  0.048]
#> Indirect Effect (ACME) |   -0.018 | [-0.042,  0.006]
#> Mediator Effect        |   -0.241 | [-0.296, -0.187]
#> Total Effect           |   -0.057 | [-0.151,  0.033]
#> 
#> Proportion mediated: 30.59% [-221.09%, 282.26%]

If you want to calculate mean instead of median values from the posterior samples, use the centrality-argument. Furthermore, there is a print()-method, which allows to print more digits.

m <- mediation(m2, centrality = "mean", ci = .95)
print(m, digits = 4)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |            95% ETI
#> ------------------------------------------------------
#> Direct Effect (ADE)    |  -0.0395 | [-0.1237,  0.0456]
#> Indirect Effect (ACME) |  -0.0158 | [-0.0405,  0.0083]
#> Mediator Effect        |  -0.2401 | [-0.2944, -0.1846]
#> Total Effect           |  -0.0553 | [-0.1454,  0.0341]
#> 
#> Proportion mediated: 28.60% [-181.01%, 238.20%]

As you can see, the results are similar to what the mediation package produces for non-Bayesian models.

Comparison to SEM from the lavaan package

Finally, we also compare the results to a SEM model, using lavaan. This example should demonstrate how to “translate” the same model in different packages or modeling approached.

library(lavaan)
data(jobs)
set.seed(1234)

model <- ' # direct effects
             depress2 ~ c1*treat + c2*econ_hard + c3*sex + c4*age + b*job_seek
             
           # mediation
             job_seek ~ a1*treat + a2*econ_hard + a3*sex + a4*age
             
           # indirect effects (a*b)
             indirect_treat := a1*b
             indirect_econ_hard := a2*b
             indirect_sex := a3*b
             indirect_age := a4*b
             
           # total effects
             total_treat := c1 + (a1*b)
             total_econ_hard := c2 + (a2*b)
             total_sex := c3 + (a3*b)
             total_age := c4 + (a4*b)
         '
m4 <- sem(model, data = jobs)
summary(m4)
#> lavaan 0.6-9 ended normally after 25 iterations
#> 
#>   Estimator                                         ML
#>   Optimization method                           NLMINB
#>   Number of model parameters                        11
#>                                                       
#>   Number of observations                           899
#>                                                       
#> Model Test User Model:
#>                                                       
#>   Test statistic                                 0.000
#>   Degrees of freedom                                 0
#> 
#> Parameter Estimates:
#> 
#>   Standard errors                             Standard
#>   Information                                 Expected
#>   Information saturated (h1) model          Structured
#> 
#> Regressions:
#>                    Estimate  Std.Err  z-value  P(>|z|)
#>   depress2 ~                                          
#>     treat     (c1)   -0.040    0.043   -0.929    0.353
#>     econ_hard (c2)    0.149    0.021    7.156    0.000
#>     sex       (c3)    0.107    0.041    2.604    0.009
#>     age       (c4)    0.001    0.002    0.332    0.740
#>     job_seek   (b)   -0.240    0.028   -8.524    0.000
#>   job_seek ~                                          
#>     treat     (a1)    0.066    0.051    1.278    0.201
#>     econ_hard (a2)    0.053    0.025    2.167    0.030
#>     sex       (a3)   -0.008    0.049   -0.157    0.875
#>     age       (a4)    0.005    0.002    1.983    0.047
#> 
#> Variances:
#>                    Estimate  Std.Err  z-value  P(>|z|)
#>    .depress2          0.373    0.018   21.201    0.000
#>    .job_seek          0.524    0.025   21.201    0.000
#> 
#> Defined Parameters:
#>                    Estimate  Std.Err  z-value  P(>|z|)
#>     indirect_treat   -0.016    0.012   -1.264    0.206
#>     indirct_cn_hrd   -0.013    0.006   -2.100    0.036
#>     indirect_sex      0.002    0.012    0.157    0.875
#>     indirect_age     -0.001    0.001   -1.932    0.053
#>     total_treat      -0.056    0.045   -1.244    0.214
#>     total_econ_hrd    0.136    0.022    6.309    0.000
#>     total_sex         0.109    0.043    2.548    0.011
#>     total_age        -0.000    0.002   -0.223    0.824

# just to have the numbers right at hand and you don't need to scroll up
mediation(m2, ci = .95)
#> # Causal Mediation Analysis for Stan Model
#> 
#>   Treatment: treat
#>   Mediator : job_seek
#>   Response : depress2
#> 
#> Effect                 | Estimate |          95% ETI
#> ----------------------------------------------------
#> Direct Effect (ADE)    |   -0.040 | [-0.124,  0.046]
#> Indirect Effect (ACME) |   -0.015 | [-0.041,  0.008]
#> Mediator Effect        |   -0.240 | [-0.294, -0.185]
#> Total Effect           |   -0.055 | [-0.145,  0.034]
#> 
#> Proportion mediated: 28.14% [-181.46%, 237.75%]

The summary output from lavaan is longer, but we can find the related numbers quite easily:

  • the direct effect of treatment is treat (c1), which is -0.040
  • the indirect effect of treatment is indirect_treat, which is -0.016
  • the mediator effect of job_seek is job_seek (b), which is -0.240
  • the total effect is total_treat, which is -0.056
bayestestR/inst/doc/indicesExistenceComparison.html0000644000175000017500000162655514135670672022521 0ustar nileshnilesh In-Depth 2: Comparison of Indices of Effect Existence and Significance

In-Depth 2: Comparison of Indices of Effect Existence and Significance

This vignette can be referred to by citing the following:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

Indices of Effect Existence and Significance in the Bayesian Framework

A comparison of different Bayesian indices (pd, BFs, ROPE etc.) is accessible here.

But, in case you don’t wish to read the full article, the following table summarizes the key takeaways!

Suggestions

If you have any advice, opinion or such, we encourage you to let us know by opening an discussion thread or making a pull request.

bayestestR/inst/doc/region_of_practical_equivalence.R0000644000175000017500000001546314135671001022757 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # library(rstanarm) # library(bayestestR) # library(see) # # data <- iris # Use the iris data # model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=">"-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ## ----echo=TRUE, message=FALSE, warning=FALSE, comment=">"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 # model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=">"-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ## ----echo=TRUE, message=FALSE, warning=FALSE, comment=">"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope bayestestR/inst/doc/credible_interval.Rmd0000644000175000017500000002335314030767205020406 0ustar nileshnilesh--- title: "Credible Intervals (CI)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, ci, credible interval] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Credible Intervals (CI)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # What is a *Credible* Interval? Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise **the uncertainty** related to the unknown parameters you are trying to estimate. In this regard, it could appear as quite similar to the frequentist **Confidence Intervals**. However, while their goal is similar, **their statistical definition and meaning is very different**. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute. As the Bayesian inference returns a **distribution** of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95\% credible interval is simply the central portion of the posterior distribution that contains 95\% of the values. Note how this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say *"given the observed data, the effect has 95% probability of falling within this range"*, compared to the less straightforward, frequentist alternative (the 95\% **Confidence* Interval**) would be "*there is a 95\% probability that when computing a confidence interval from data of this sort, the effect falls within this range*". # 89\% vs. 95\% CI Using 89\% is another popular choice, and used to be the default for a long time ([read here the story of the change](https://github.com/easystats/bayestestR/discussions/250)). How did it start? Naturally, when it came about choosing the CI level to report by default, **people started using 95\%**, the arbitrary convention used in the **frequentist** world. However, some authors suggested that 95\% might not be the most appropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn [@kruschke2014doing]. The proposition was to use 90\% instead of 95\%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary thresholds in the first place, why not use 89\%? Moreover, 89 is the highest **prime number** that does not exceed the already unstable 95\% threshold. What does it have to do with anything? *Nothing*, but it reminds us of the total arbitrariness of these conventions [@mcelreath2018statistical]. Thus, CIs computed with 89\% intervals (`ci = 0.89`), are deemed to be more stable than, for instance, 95\% intervals [@kruschke2014doing]. An effective sample size (ESS; see [here](https://easystats.github.io/bayestestR/reference/diagnostic_posterior.html)) of at least 10.000 is recommended if one wants to compute precise 95\% intervals (Kruschke, 2014, p. 183ff). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., `rstanarm` or `brms`) is only 4.000 (thus, you might want to increase it when fitting your model). However, 95\% has some [**advantages too**](https://easystats.github.io/blog/posts/bayestestr_95/). For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering 0 is larger for the 95\% CI than for lower ranges such as 89\%), which is a good thing in the context of the reproducibility crisis. To add to the mess, some other software use different default, such as for instance 90\%. Ultimately, **you as the user should make an informed decision**, based on your needs and goals, and justify your choice. # Different types of CIs The reader might notice that `bayestestR` provides **two methods** to compute credible intervals, the **Highest Density Interval (HDI)** (`hdi()`) and the **Equal-tailed Interval (ETI)** (`eti()`). These methods can also be changed via the `method` argument of the `ci()` function. What is the difference? Let's see: ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # Quantile in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ``` > **These are exactly the same...** But is it also the case for other types of distributions? ```{r warning=FALSE, message=FALSE} # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend = TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # ETI in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ``` > **The difference is strong with this one.** Contrary to the **HDI**, for which all points within the interval have a higher probability density than points outside the interval, the **ETI** is **equal-tailed**. This means that a 90\% interval has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does *not* change when transformations are applied to the distribution (for instance, for log-odds to probabilities transformation): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. Thus, for instance, if exponentiated credible intervals are required, it is recommended to calculate the ETI. # The Support Interval Unlike the HDI and the ETI, which look at the posterior distribution, the **Support Interval (SI)** provides information regarding the change in the credibility of values from the prior to the posterior - in other words, it indicates which values of a parameter have gained support by the observed data by some factor greater or equal to *k* [@wagenmakers2018SI]. ```{r warning=FALSE, message=FALSE} prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x = x, y = y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept = si_1$CI_low, color = "royalblue", size = 1) + geom_vline(xintercept = si_1$CI_high, color = "royalblue", size = 1) + # BF = 3 SI in red geom_vline(xintercept = si_3$CI_low, color = "red", size = 1) + geom_vline(xintercept = si_3$CI_high, color = "red", size = 1) ``` Between the blue lines are values that received *some* support by the data (this is a $BF = 1~SI$), while between the red lines are values that received at least *moderate* support ($BF = 3~SI$) by the data. From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the Support Interval will yield a Bayes factor smaller than 1/`BF`. # References bayestestR/inst/doc/mediation.R0000644000175000017500000002462114135670703016362 0ustar nileshnilesh## ---- SETTINGS-knitr, echo = FALSE, warning = FALSE, message = FALSE---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dev = "png", fig.width = 7, fig.height = 5, message = FALSE, warning = FALSE ) options(width = 800) if (!requireNamespace("mediation", quietly = TRUE) || !requireNamespace("httr", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("insight", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(bayestestR) library(mediation) library(brms) library(rstanarm) # load sample data data(jobs) set.seed(123) # linear models, for mediation analysis b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) # mediation analysis, for comparison with brms m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") ## ----eval=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # # Fit Bayesian mediation model in brms # f1 <- bf(job_seek ~ treat + econ_hard + sex + age) # f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) # m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4) # ## ----echo=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- m2 <- insight::download_model("brms_mv_6") ## ----eval=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # # Fit Bayesian mediation model in rstanarm # m3 <- stan_mvmer( # list(job_seek ~ treat + econ_hard + sex + age + (1 | occp), # depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp)), # data = jobs, # cores = 4, # refresh = 0 # ) ## ----echo=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- m3 <- insight::download_model("stanmvreg_2") ## ---- message=TRUE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ # for brms mediation(m2) # for rstanarm mediation(m3) ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(m1) mediation(m2, ci = .95) mediation(m3, ci = .95) ## ---- message=TRUE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ m <- mediation(m2, centrality = "mean", ci = .95) print(m, digits = 4) ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(lavaan) data(jobs) set.seed(1234) model <- ' # direct effects depress2 ~ c1*treat + c2*econ_hard + c3*sex + c4*age + b*job_seek # mediation job_seek ~ a1*treat + a2*econ_hard + a3*sex + a4*age # indirect effects (a*b) indirect_treat := a1*b indirect_econ_hard := a2*b indirect_sex := a3*b indirect_age := a4*b # total effects total_treat := c1 + (a1*b) total_econ_hard := c2 + (a2*b) total_sex := c3 + (a3*b) total_age := c4 + (a4*b) ' m4 <- sem(model, data = jobs) summary(m4) # just to have the numbers right at hand and you don't need to scroll up mediation(m2, ci = .95) bayestestR/inst/doc/region_of_practical_equivalence.html0000644000175000017500000022027514135671001023521 0ustar nileshnilesh Region of Practical Equivalence (ROPE)

Region of Practical Equivalence (ROPE)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

What is the ROPE?

Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against “zero.” Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as “practically no effect” (i.e., a negligible magnitude) is sufficient. This range is called the region of practical equivalence (ROPE).

Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are equivalent to the null value for practical purposes (J. Kruschke, 2014; J. K. Kruschke, 2010; J. K. Kruschke, Aguinis, & Joo, 2012).

Equivalence Test

The ROPE, being a region corresponding to a “null” hypothesis, is used for the equivalence test, to test whether a parameter is significant (in the sense of important enough to be cared about). This test is usually based on the “HDI+ROPE decision rule” (J. Kruschke, 2014; J. K. Kruschke & Liddell, 2018) to check whether parameter values should be accepted or rejected against an explicitly formulated “null hypothesis” (i.e., a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted.

Credible interval in ROPE vs full posterior in ROPE

Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95% HDI that falls within the ROPE as a decision rule. However, as the 89% HDI is considered a better choice (J. Kruschke, 2014; R. McElreath, 2014; Richard McElreath, 2018), bayestestR provides by default the percentage of the 89% HDI that falls within the ROPE.

However, simulation studies data suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the full ROPE percentage (by setting ci = 1), which will return the portion of the entire posterior distribution in the ROPE.

What percentage in ROPE to accept or to reject?

If the HDI is completely outside the ROPE, the “null hypothesis” for this parameter is “rejected.” If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected.

If the full ROPE is used (i.e., 100% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5% or greater than 97.5%. Desirable results are low proportions inside the ROPE (the closer to zero the better).

How to define the ROPE range?

Kruschke (2018) suggests that the ROPE could be set, by default, to a range from -0.1 to 0.1 of a standardized parameter (negligible effect size according to Cohen, 1988).

  • For linear models (lm), this can be generalised to: \[[-0.1*SD_{y}, 0.1*SD_{y}]\].
  • For logistic models, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: \[\pi/\sqrt{3}\] (see the effectsize package, resulting in a range of -0.18 to -0.18. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models.
  • For t-tests, the standard deviation of the response is used, similarly to linear models (see above).
  • For correlations, -0.05, 0.05 is used, i.e., half the value of a negligible correlation as suggested by Cohen’s (1988) rules of thumb.
  • For all other models, -0.1, 0.1 is used to determine the ROPE limits, but it is strongly advised to specify it manually.

Sensitivity to parameter’s scale

It is important to consider the unit (i.e., the scale) of the predictors when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the pd), the percentage in ROPE depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response’s scale, its proximity with a coefficient depends on the scale of the coefficient itself.

For instance, if we consider a simple regression growth ~ time, modelling the development of Wookies babies, a negligible change (the ROPE) is less than 54 cm. If our time variable is expressed in days, we will find that the coefficient (representing the growth by day) is of about 10 cm (the median of the posterior of the coefficient is 10). Which we would consider as negligible. However, if we decide to express the time variable in years, the coefficient will be scaled by this transformation (as it will now represent the growth by year). The coefficient will now be around 3550 cm (10 * 355), which we would now consider as significant.

library(rstanarm)
library(bayestestR)
library(see)

data <- iris  # Use the iris data
model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data)  # Fit model
# Compute indices
pd <- p_direction(model)
percentage_in_rope <- rope(model, ci=1)

# Visualise the pd
plot(pd)

pd
> Probability of Direction
> 
> Parameter   |     pd
> --------------------
> (Intercept) |   100%
> Sepal.Width | 91.65%
# Visualise the percentage in ROPE
plot(percentage_in_rope)

percentage_in_rope
> # Proportion of samples inside the ROPE [-0.08, 0.08]:
> 
> Parameter   | inside ROPE
> -------------------------
> (Intercept) |      0.00 %
> Sepal.Width |     16.28 %

We can see that the pd and the percentage in ROPE of the linear relationship between Sepal.Length and Sepal.Width are respectively of about 92.95% and 15.95%, corresponding to an uncertain and not significant effect. What happen if we scale our predictor?

data$Sepal.Width_scaled <- data$Sepal.Width / 100  # Divide predictor by 100
model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data)  # Fit model
# Compute indices
pd <- p_direction(model)
percentage_in_rope <- rope(model, ci=1)

# Visualise the pd
plot(pd)

pd
> Probability of Direction
> 
> Parameter          |     pd
> ---------------------------
> (Intercept)        |   100%
> Sepal.Width_scaled | 91.65%
# Visualise the percentage in ROPE
plot(percentage_in_rope)

percentage_in_rope
> # Proportion of samples inside the ROPE [-0.08, 0.08]:
> 
> Parameter          | inside ROPE
> --------------------------------
> (Intercept)        |      0.00 %
> Sepal.Width_scaled |      0.10 %

As you can see, by simply dividing the predictor by 100, we drastically changed the conclusion related to the percentage in ROPE (which became very close to 0): the effect could now be interpreted as being significant. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (e.g., what coefficient would correspond to a small effect?), and when reporting or reading ROPE results.

Multicollinearity: Non-independent covariates

When parameters show strong correlations, i.e., when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate (J. Kruschke, 2014).

The equivalence_test() and rope() functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (Piironen & Vehtari, 2017).

Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, JAGS, and stan. Academic Press.
Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.
Kruschke, J. K., Aguinis, H., & Joo, H. (2012). The time has come: Bayesian methods for data analysis in the organizational sciences. Organizational Research Methods, 15(4), 722–752.
Kruschke, J. K., & Liddell, T. M. (2018). The bayesian new statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a bayesian perspective. Psychonomic Bulletin & Review, 25(1), 178–206.
McElreath, R. (2014). Rethinking: Statistical rethinking book package. R package version 1.391.
McElreath, Richard. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.
Piironen, J., & Vehtari, A. (2017). Comparison of bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735.
bayestestR/inst/doc/guidelines.Rmd0000644000175000017500000002046714023535711017061 0ustar nileshnilesh--- title: "Reporting Guidelines" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > %\VignetteIndexEntry{Reporting Guidelines} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Reporting Guidelines ## How to describe and report the parameters of a model A Bayesian analysis returns a posterior distribution for each parameter (or *effect*). To minimally describe these distributions, we recommend reporting a point-estimate of [centrality](https://en.wikipedia.org/wiki/Central_tendency) as well as information characterizing the estimation uncertainty (the [dispersion](https://en.wikipedia.org/wiki/Statistical_dispersion)). Additionally, one can also report indices of effect existence and/or significance. Based on the previous [**comparison of point-estimates**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) and [**indices of effect existence**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we can draw the following recommendations. ### **Centrality** We suggest reporting the [**median**](https://easystats.github.io/bayestestR/reference/point_estimate.html) as an index of centrality, as it is more robust compared to the [mean](https://easystats.github.io/bayestestR/reference/point_estimate.html) or the [MAP estimate](https://easystats.github.io/bayestestR/reference/map_estimate.html). However, in case of a severely skewed posterior distribution, the MAP estimate could be a good alternative. ### **Uncertainty** The [**95\% or 89\% Credible Intervals (CI)**](https://easystats.github.io/bayestestR/articles/credible_interval.html) are two reasonable ranges to characterize the uncertainty related to the estimation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html) for a discussion about the differences between these two values). We also recommend computing the CIs based on the [HDI](https://easystats.github.io/bayestestR/reference/hdi.html) rather than [quantiles](https://easystats.github.io/bayestestR/reference/ci.html), favouring probable over central values. Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis)). ### **Existence** ```{r echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") ``` The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect *existence* and *significance*. The most straightforward index to describe existence of an effect is the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics, and independent from the scale of the data. Moreover, it is strongly correlated with the frequentist **p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A **two-sided *p*-value** of respectively `.1`, `.05`, `.01` and `.001` correspond approximately to a ***pd*** of 95\%, 97.5\%, 99.5\% and 99.95\%. Thus, for convenience, we suggest the following reference values as an interpretation helpers: - *pd* **\<= 95\%** ~ *p* \> .1: uncertain - *pd* **\> 95\%** ~ *p* \< .1: possibly existing - *pd* **\> 97\%**: likely existing - *pd* **\> 99\%**: probably existing - *pd* **\> 99.9\%**: certainly existing ### **Significance** The percentage in **ROPE** is a index of **significance** (in its primary meaning), informing us whether a parameter is related or not to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the **percentage of the full posterior distribution** (the *full* ROPE) instead of a given proportion of CI in the ROPE, which appears to be more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original [equivalence test](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#equivalence-test), we recommend using the percentage as a *continuous* index of significance. However, based on [simulation data](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we suggest the following reference values as an interpretation helpers: - **\> 99\%** in ROPE: negligible (we can accept the null hypothesis) - **\> 97.5\%** in ROPE: probably negligible - **\<= 97.5\%** \& **\>= 2.5\%** in ROPE: undecided significance - **\< 2.5\%** in ROPE: probably significant - **\< 1\%** in ROPE: significant (we can reject the null hypothesis) *Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see [here](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#sensitivity-to-parameters-scale))*. ### **Template Sentence** Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be: > "the effect of *X* has a probability of ***pd*** of being *negative* (Median = ***median***, 89\% CI [ ***HDIlow*** , ***HDIhigh*** ] and can be considered as *significant* (***ROPE***\% in ROPE)." ## How to compare different models Although it can also be used to assess effect existence and significance, the **Bayes factor (BF)** is a versatile index that can be used to directly compare different models (or data generation processes). The [Bayes factor](https://easystats.github.io/bayestestR/articles/bayes_factors.html) is a ratio that informs us by how much more (or less) likely the observed data are under two compared models - usually a model *with* versus a model *without* the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., **0**) or an interval), the Bayes factor could be used both in the context of effect existence and significance. In general, a Bayes factor greater than 1 is taken as evidence in favour of one of the model (in the nominator), and a Bayes factor smaller than 1 is taken as evidence in favour of the other model (in the denominator). Several rules of thumb exist to help the interpretation (see [here](https://easystats.github.io/effectsize/reference/interpret_bf.html)), with **\> 3** being one common threshold to categorize non-anecdotal evidence. ### **Template Sentence** When reporting Bayes factors (BF), one can use the following sentence: > "There is *moderate evidence* in favour of an *absence* of effect of *x* (BF = *BF*)." # Suggestions If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request. bayestestR/inst/doc/indicesExistenceComparison.Rmd0000644000175000017500000000367014054321351022244 0ustar nileshnilesh--- title: "In-Depth 2: Comparison of Indices of Effect Existence and Significance" output: rmarkdown::html_vignette: toc: false toc_depth: 3 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 2: Comparison of Indices of Effect Existence and Significance} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75, out.width = "100%") options(digits=2) ``` This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Indices of Effect *Existence* and *Significance* in the Bayesian Framework A comparison of different Bayesian indices (*pd*, *BFs*, ROPE etc.) is accessible [**here**](https://doi.org/10.3389/fpsyg.2019.02767). But, in case you don't wish to read the full article, the following table summarizes the key takeaways! ```{r, echo=FALSE} knitr::include_graphics("https://www.frontiersin.org/files/Articles/498833/fpsyg-10-02767-HTML/image_m/fpsyg-10-02767-t003.jpg") ``` # Suggestions If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request.bayestestR/inst/doc/example3.html0000644000175000017500000106541514135670667016712 0ustar nileshnilesh 3. Become a Bayesian master

3. Become a Bayesian master

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Yoda Bayes (896 BBY - 4 ABY).

Yoda Bayes (896 BBY - 4 ABY).

Mixed Models

TO BE CONTINUED.

Priors

TO BE CONTINUED.

What’s next?

The journey to become a true Bayesian master is not yet over. It is merely the beginning. It is now time to leave the bayestestR universe and apply the Bayesian framework in a variety of other statistical contexts:

bayestestR/inst/doc/credible_interval.R0000644000175000017500000000564114135670657020077 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ## ----warning=FALSE, message=FALSE--------------------------------------------- library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # Quantile in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ## ----warning=FALSE, message=FALSE--------------------------------------------- # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend = TRUE) %>% ggplot(aes(x = x, y = y)) + geom_area(fill = "orange") + theme_classic() + # HDI in blue geom_vline(xintercept = ci_hdi$CI_low, color = "royalblue", size = 3) + geom_vline(xintercept = ci_hdi$CI_high, color = "royalblue", size = 3) + # ETI in red geom_vline(xintercept = ci_eti$CI_low, color = "red", size = 1) + geom_vline(xintercept = ci_eti$CI_high, color = "red", size = 1) ## ----warning=FALSE, message=FALSE--------------------------------------------- prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x = x, y = y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept = si_1$CI_low, color = "royalblue", size = 1) + geom_vline(xintercept = si_1$CI_high, color = "royalblue", size = 1) + # BF = 3 SI in red geom_vline(xintercept = si_3$CI_low, color = "red", size = 1) + geom_vline(xintercept = si_3$CI_high, color = "red", size = 1) bayestestR/inst/doc/region_of_practical_equivalence.Rmd0000644000175000017500000002421014023526535023275 0ustar nileshnilesh--- title: "Region of Practical Equivalence (ROPE)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, rope, equivalence test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Region of Practical Equivalence (ROPE)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *ROPE?* Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against "zero". Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as **"practically no effect"** (*i.e.*, a negligible magnitude) is sufficient. This range is called the **region of practical equivalence (ROPE)**. Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are **equivalent to the null** value for practical purposes [@kruschke2010believe; @kruschke2012time; @kruschke2014doing]. # Equivalence Test The ROPE, being a region corresponding to a "null" hypothesis, is used for the **equivalence test**, to test whether a parameter is **significant** (in the sense of *important* enough to be cared about). This test is usually based on the **"HDI+ROPE decision rule"** [@kruschke2014doing; @kruschke2018bayesian] to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (*i.e.*, a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. # Credible interval in ROPE *vs* full posterior in ROPE Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95\% HDI that falls within the ROPE as a decision rule. However, as the 89\% HDI [is considered a better choice](https://easystats.github.io/bayestestR/articles/credible_interval.html) [@kruschke2014doing; @mcelreath2014rethinking; @mcelreath2018statistical], `bayestestR` provides by default the percentage of the 89\% HDI that falls within the ROPE. However, [*simulation studies data*](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the **full ROPE** percentage (by setting `ci = 1`), which will return the portion of the entire posterior distribution in the ROPE. # What percentage in ROPE to accept or to reject? If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, *i.e.*, all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected. If the **full ROPE** is used (*i.e.*, 100\% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5\% or greater than 97.5\%. Desirable results are low proportions inside the ROPE (the closer to zero the better). # How to define the ROPE range? Kruschke (2018) suggests that the ROPE could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988). - For **linear models (lm)**, this can be generalised to: $$[-0.1*SD_{y}, 0.1*SD_{y}]$$. - For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: $$\pi/\sqrt{3}$$ (see [the **effectsize** package](https://easystats.github.io/effectsize/articles/convert.html#from-odds-ratios), resulting in a range of `-0.18` to `-0.18`. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. - For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). - For **correlations**, `-0.05, 0.05` is used, *i.e.*, half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. - For all other models, `-0.1, 0.1` is used to determine the ROPE limits, but it is strongly advised to specify it manually. # Sensitivity to parameter's scale It is important to consider **the unit (*i.e.*, the scale) of the predictors** when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the [`pd`](https://easystats.github.io/bayestestR/articles/probability_of_direction.html)), the percentage in **ROPE** depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. For instance, if we consider a simple regression `growth ~ time`, modelling the development of **Wookies babies**, a negligible change (the ROPE) is less than **54 cm**. If our `time` variable is **expressed in days**, we will find that the coefficient (representing the growth **by day**) is of about **10 cm** (*the median of the posterior of the coefficient is 10*). Which we would consider as **negligible**. However, if we decide to express the `time` variable **in years**, the coefficient will be scaled by this transformation (as it will now represent the growth **by year**). The coefficient will now be around **3550** cm (`10 * 355`), which we would now consider as **significant**. ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) library(bayestestR) library(see) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` We can see that the *pd* and the percentage in ROPE of the linear relationship between **Sepal.Length** and **Sepal.Width** are respectively of about `92.95%` and `15.95%`, corresponding to an **uncertain** and **not significant** effect. What happen if we scale our predictor? ```{r message=FALSE, warning=FALSE, eval=FALSE} data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` As you can see, by simply dividing the predictor by 100, we **drastically** changed the conclusion related to the **percentage in ROPE** (which became very close to `0`): the effect could now be **interpreted as being significant**. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (*e.g.*, what coefficient would correspond to a small effect?), and when reporting or reading ROPE results. # Multicollinearity: Non-independent covariates When **parameters show strong correlations**, *i.e.*, when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate [@kruschke2014doing]. The `equivalence_test()` and `rope()` functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection [@piironen2017comparison]. bayestestR/inst/doc/probability_of_direction.html0000644000175000017500000061163514135670777022242 0ustar nileshnilesh Probability of Direction (pd)

Probability of Direction (pd)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

What is the pd?

The Probability of Direction (pd) is an index of effect existence, ranging from 50% to 100%, representing the certainty with which an effect goes in a particular direction (i.e., is positive or negative).

Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties:

  • It is independent from the model: It is solely based on the posterior distributions and does not require any additional information from the data or the model.
  • It is robust to the scale of both the response variable and the predictors.
  • It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics.

However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of “significance”), which is better achieved through other indices such as the ROPE percentage. In fact, indices of significance and existence are totally independent. You can have an effect with a pd of 99.99%, for which the whole posterior distribution is concentrated within the [0.0001, 0.0002] range. In this case, the effect is positive with a high certainty, but also not significant (i.e., very small).

Indices of effect existence, such as the pd, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect’s direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance.

Relationship with the p-value

In most cases, it seems that the pd has a direct correspondence with the frequentist one-sided p-value through the formula: \[p_{one-sided} = 1-p_d\] Similarly, the two-sided p-value (the most commonly reported one) is equivalent through the formula: \[p_{two-sided} = 2*(1-p_d)\] Thus, the two-sided p-value of respectively .1, .05, .01 and .001 would correspond approximately to a pd of 95%, 97.5%, 99.5% and 99.95% .

Correlation between the frequentist p-value and the probability of direction (pd)

Correlation between the frequentist p-value and the probability of direction (pd)

But if it’s like the p-value, it must be bad because the p-value is bad [insert reference to the reproducibility crisis].

In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the p-value is an intrinsically bad or wrong. Instead, it is its misuse, misunderstanding and misinterpretation that fuels the decay of the situation. For instance, the fact that the pd is highly correlated with the p-value suggests that the latter is more an index of effect existence than significance (i.e., “worth of interest”). The Bayesian version, the pd, has an intuitive meaning and makes obvious the fact that all thresholds are arbitrary. Additionally, the mathematical and interpretative transparency of the pd, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist p-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework.

Methods of computation

The most simple and direct way to compute the pd is to 1) look at the median’s sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This “simple” method is the most straightforward, but its precision is directly tied to the number of posterior draws.

The second approach relies on density estimation. It starts by estimating the density function (for which many methods are available), and then computing the area under the curve (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function.

Methods comparison

Let’s compare the 4 available methods, the direct method and 3 density-based methods differing by their density estimation algorithm (see estimate_density).

Correlation

Let’s start by testing the proximity and similarity of the results obtained by different methods.

library(bayestestR)
library(logspline)
library(KernSmooth)

# Compute the correlations
data <- data.frame()
for (the_mean in runif(25, 0, 4)) {
  for (the_sd in runif(25, 0.5, 4)) {
    x <- rnorm(100, the_mean, abs(the_sd))
    data <- rbind(
      data,
      data.frame(
        "direct" = pd(x),
        "kernel" = pd(x, method = "kernel"),
        "logspline" = pd(x, method = "logspline"),
        "KernSmooth" = pd(x, method = "KernSmooth")
      )
    )
  }
}
data <- as.data.frame(sapply(data, as.numeric))

# Visualize the correlations
library(ggplot2)
library(GGally)

GGally::ggpairs(data) +
  theme_classic()

All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much.

Accuracy

To test the accuracy of each methods, we will start by computing the direct pd from a very dense distribution (with a large amount of observations). This will be our baseline, or “true” pd. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the pd with different methods. The closer this estimate is from the reference one, the better.

data <- data.frame()
for (i in 1:25) {
  the_mean <- runif(1, 0, 4)
  the_sd <- abs(runif(1, 0.5, 4))
  parent_distribution <- rnorm(100000, the_mean, the_sd)
  true_pd <- pd(parent_distribution)

  for (j in 1:25) {
    sample_size <- round(runif(1, 25, 5000))
    subsample <- sample(parent_distribution, sample_size)
    data <- rbind(
      data,
      data.frame(
        "sample_size" = sample_size,
        "true" = true_pd,
        "direct" = pd(subsample) - true_pd,
        "kernel" = pd(subsample, method = "kernel") - true_pd,
        "logspline" = pd(subsample, method = "logspline") - true_pd,
        "KernSmooth" = pd(subsample, method = "KernSmooth") - true_pd
      )
    )
  }
}
data <- as.data.frame(sapply(data, as.numeric))
library(tidyr)
library(dplyr)

data %>%
  tidyr::gather(Method, Distance, -sample_size, -true) %>%
  ggplot(aes(x = sample_size, y = Distance, color = Method, fill = Method)) +
  geom_point(alpha = 0.3, stroke = 0, shape = 16) +
  geom_smooth(alpha = 0.2) +
  geom_hline(yintercept = 0) +
  theme_classic() +
  xlab("\nDistribution Size")

The “Kernel” based density methods seems to consistently underestimate the pd. Interestingly, the “direct” method appears as being the more reliable, even in the case of small number of posterior draws.

Can the pd be 100%?

p = 0.000 is coined as one of the term to avoid when reporting results (Lilienfeld et al., 2015), even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the p = 0.000 returned by software is due to approximations related, among other, to finite memory hardware.

One could apply this rationale for the pd: since all data points have a non-null probability density, then the pd (a particular portion of the probability density) can never be 100%. While this is an entirely valid point, people using the direct method might argue that their pd is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which pd = 100% is a valid statement.

Lilienfeld, S. O., Sauvigné, K. C., Lynn, S. J., Cautin, R. L., Latzman, R. D., & Waldman, I. D. (2015). Fifty psychological and psychiatric terms to avoid: A list of inaccurate, misleading, misused, ambiguous, and logically confused words and phrases. Frontiers in Psychology, 6, 1100. https://doi.org/10.3389/fpsyg.2015.01100
bayestestR/inst/doc/example1.html0000644000175000017500000131122714135670663016677 0ustar nileshnilesh 1. Initiation to Bayesian models

1. Initiation to Bayesian models

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Now that you’ve read the Get started section, let’s dive in the subtleties of Bayesian modelling using R.

Loading the packages

Once you’ve installed the necessary packages, we can load rstanarm (to fit the models), bayestestR (to compute useful indices), and insight (to access the parameters).

library(rstanarm)
library(bayestestR)
library(insight)

Simple linear (regression) model

We will begin by conducting a simple linear regression to test the relationship between Petal.Length (our predictor, or independent, variable) and Sepal.Length (our response, or dependent, variable) from the iris dataset which is included by default in R.

Fitting the model

Let’s start by fitting a frequentist version of the model, just to have a reference point:

model <- lm(Sepal.Length ~ Petal.Length, data = iris)
summary(model)
> 
> Call:
> lm(formula = Sepal.Length ~ Petal.Length, data = iris)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -1.2468 -0.2966 -0.0152  0.2768  1.0027 
> 
> Coefficients:
>              Estimate Std. Error t value Pr(>|t|)    
> (Intercept)    4.3066     0.0784    54.9   <2e-16 ***
> Petal.Length   0.4089     0.0189    21.6   <2e-16 ***
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 0.41 on 148 degrees of freedom
> Multiple R-squared:  0.76,    Adjusted R-squared:  0.758 
> F-statistic:  469 on 1 and 148 DF,  p-value: <2e-16

We can also zoom in on the parameters of interest to us:

insight::get_parameters(model)
>      Parameter Estimate
> 1  (Intercept)     4.31
> 2 Petal.Length     0.41

In this model, the linear relationship between Petal.Length and Sepal.Length is positive and significant (\(\beta = 0.41, t(148) = 21.6, p < .001\)). This means that for each one-unit increase in Petal.Length (the predictor), you can expect Sepal.Length (the response) to increase by 0.41. This effect can be visualized by plotting the predictor values on the x axis and the response values as y using the ggplot2 package:

library(ggplot2) # Load the package

# The ggplot function takes the data as argument, and then the variables
# related to aesthetic features such as the x and y axes.
ggplot(iris, aes(x = Petal.Length, y = Sepal.Length)) +
  geom_point() + # This adds the points
  geom_smooth(method = "lm") # This adds a regression line

Now let’s fit a Bayesian version of the model by using the stan_glm function in the rstanarm package:

model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris)

You can see the sampling algorithm being run.

Extracting the posterior

Once it is done, let us extract the parameters (i.e., coefficients) of the model.

posteriors <- insight::get_parameters(model)

head(posteriors) # Show the first 6 rows
>   (Intercept) Petal.Length
> 1         4.4         0.39
> 2         4.4         0.40
> 3         4.3         0.41
> 4         4.3         0.40
> 5         4.3         0.40
> 6         4.3         0.41

As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the intercept and the effect of Petal.Length. These columns contain the posterior distributions of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter. Contrast this with the result we saw from the frequentist linear regression mode using lm, where the results had single values for each effect of the model, and not a distribution of values. This is one of the most important differences between these two frameworks.

About posterior draws

Let’s look at the length of the posteriors.

nrow(posteriors) # Size (number of rows)
> [1] 4000

Why is the size 4000, and not more or less?

First of all, these observations (the rows) are usually referred to as posterior draws. The underlying idea is that the Bayesian sampling algorithm (e.g., Monte Carlo Markov Chains - MCMC) will draw from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. Therefore, the more draws you have, the better your estimation of the posterior distribution. However, increased draws also means longer computation time.

If we look at the documentation (?sampling) for the rstanarm’s "sampling" algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are 4 chains (you can see it as distinct sampling runs), that each create 2000 iter (draws). However, only half of these iterations are kept, as half are used for warm-up (the convergence of the algorithm). Thus, the total for posterior draws equals 4 chains * (2000 iterations - 1000 warm-up) = 4000.

We can change that, for instance:

model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250)

nrow(insight::get_parameters(model)) # Size (number of rows)
[1] 1500

In this case, as would be expected, we have 2 chains * (1000 iterations - 250 warm-up) = 1500 posterior draws. But let’s keep our first model with the default setup (as it has more draws).

Visualizing the posterior distribution

Now that we’ve understood where these values come from, let’s look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of Petal.Length.

ggplot(posteriors, aes(x = Petal.Length)) +
  geom_density(fill = "orange")

This distribution represents the probability (the y axis) of different effects (the x axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about 0.35 to 0.50, with the bulk of it being at around 0.41.

Congrats! You’ve just described your first posterior distribution.

And this is the heart of Bayesian analysis. We don’t need p-values, t-values, or degrees of freedom. Everything we need is contained within this posterior distribution.

Our description above is consistent with the values obtained from the frequentist regression (which resulted in a \(\beta\) of 0.41). This is reassuring! Indeed, in most cases, Bayesian analysis does not drastically differ from the frequentist results or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe.

We can now go ahead and precisely characterize this posterior distribution.

Describing the Posterior

Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a concise way to summarize it. We recommend to describe the posterior distribution with 3 elements:

  1. A point-estimate which is a one-value summary (similar to the \(beta\) in frequentist regressions).
  2. A credible interval representing the associated uncertainty.
  3. Some indices of significance, giving information about the relative importance of this effect.

Point-estimate

What single value can best represent my posterior distribution?

Centrality indices, such as the mean, the median, or the mode are usually used as point-estimates. But what’s the difference between them?

Let’s answer this by first inspecting the mean:

mean(posteriors$Petal.Length)
> [1] 0.41

This is close to the frequentist \(\beta\). But, as we know, the mean is quite sensitive to outliers or extremes values. Maybe the median could be more robust?

median(posteriors$Petal.Length)
> [1] 0.41

Well, this is very close to the mean (and identical when rounding the values). Maybe we could take the mode, that is, the peak of the posterior distribution? In the Bayesian framework, this value is called the Maximum A Posteriori (MAP). Let’s see:

map_estimate(posteriors$Petal.Length)
> MAP Estimate: 0.41

They are all very close!

Let’s visualize these values on the posterior distribution:

ggplot(posteriors, aes(x = Petal.Length)) +
  geom_density(fill = "orange") +
  # The mean in blue
  geom_vline(xintercept = mean(posteriors$Petal.Length), color = "blue", size = 1) +
  # The median in red
  geom_vline(xintercept = median(posteriors$Petal.Length), color = "red", size = 1) +
  # The MAP in purple
  geom_vline(xintercept = map_estimate(posteriors$Petal.Length), color = "purple", size = 1)

Well, all these values give very similar results. Thus, we will choose the median, as this value has a direct meaning from a probabilistic perspective: there is 50% chance that the true effect is higher and 50% chance that the effect is lower (as it divides the distribution in two equal parts).

Uncertainty

Now that the have a point-estimate, we have to describe the uncertainty. We could compute the range:

range(posteriors$Petal.Length)
> [1] 0.33 0.48

But does it make sense to include all these extreme values? Probably not. Thus, we will compute a credible interval. Long story short, it’s kind of similar to a frequentist confidence interval, but easier to interpret and easier to compute — and it makes more sense.

We will compute this credible interval based on the Highest Density Interval (HDI). It will give us the range containing the 89% most probable effect values. Note that we will use 89% CIs instead of 95% CIs (as in the frequentist framework), as the 89% level gives more stable results (Kruschke, 2014) and reminds us about the arbitrariness of such conventions (McElreath, 2018).

hdi(posteriors$Petal.Length, ci = 0.89)
> 89% HDI: [0.38, 0.44]

Nice, so we can conclude that the effect has 89% chance of falling within the [0.38, 0.44] range. We have just computed the two most important pieces of information for describing our effects.

Effect significance

However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is important. For instance, is the effect different from 0? So how do we assess the significance of an effect. How can we do this?

Well, in this particular case, it is very eloquent: all possible effect values (i.e., the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero.

But still, we want some objective decision criterion, to say if yes or no the effect is ‘significant’. One approach, similar to the frequentist framework, would be to see if the Credible Interval contains 0. If it is not the case, that would mean that our effect is ‘significant’.

But this index is not very fine-grained, no? Can we do better? Yes!

A linear model with a categorical predictor

Imagine for a moment you are interested in how the weight of chickens varies depending on two different feed types. For this example, we will start by selecting from the chickwts dataset (available in base R) two feed types of interest for us (we do have peculiar interests): meat meals and sunflowers.

Data preparation and model fitting

library(dplyr)

# We keep only rows for which feed is meatmeal or sunflower
data <- filter(chickwts, feed %in% c("meatmeal", "sunflower"))

Let’s run another Bayesian regression to predict the weight with the two types of feed type.

model <- stan_glm(weight ~ feed, data = data)

Posterior description

posteriors <- insight::get_parameters(model)

ggplot(posteriors, aes(x = feedsunflower)) +
  geom_density(fill = "red")

This represents the posterior distribution of the difference between meatmeal and sunflowers. It seems that the difference is positive (since the values are concentrated on the right side of 0). Eating sunflowers makes you more fat (at least, if you’re a chicken). But, by how much?

Let us compute the median and the CI:

median(posteriors$feedsunflower)
> [1] 53
hdi(posteriors$feedsunflower)
> 95% HDI: [-0.12, 97.72]

It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: there is 89% chance that the difference between the two feed types is between 14 and 91.

Is this effect different from 0?

ROPE Percentage

Testing whether this distribution is different from 0 doesn’t make sense, as 0 is a single value (and the probability that any distribution is different from a single value is infinite).

However, one way to assess significance could be to define an area around 0, which will consider as practically equivalent to zero (i.e., absence of, or a negligible, effect). This is called the Region of Practical Equivalence (ROPE), and is one way of testing the significance of parameters.

How can we define this region?

Driing driiiing

The easystats team speaking. How can we help?

I am Prof. Sanders. An expert in chicks… I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.

Well, that’s convenient. Now we know that we can define the ROPE as the [-20, 20] range. All effects within this range are considered as null (negligible). We can now compute the proportion of the 89% most probable values (the 89% CI) which are not null, i.e., which are outside this range.

rope(posteriors$feedsunflower, range = c(-20, 20), ci = 0.89)
> # Proportion of samples inside the ROPE [-20.00, 20.00]:
> 
> inside ROPE
> -----------
> 5.28 %

5% of the 89% CI can be considered as null. Is that a lot? Based on our guidelines, yes, it is too much. Based on this particular definition of ROPE, we conclude that this effect is not significant (the probability of being negligible is too high).

That said, to be honest, I have some doubts about this Prof. Sanders. I don’t really trust his definition of ROPE. Is there a more objective way of defining it?

Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).

Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).

Yes! One of the practice is for instance to use the tenth (1/10 = 0.1) of the standard deviation (SD) of the response variable, which can be considered as a “negligible” effect size (Cohen, 1988).

rope_value <- 0.1 * sd(data$weight)
rope_range <- c(-rope_value, rope_value)
rope_range
> [1] -6.2  6.2

Let’s redefine our ROPE as the region within the [-6.2, 6.2] range. Note that this can be directly obtained by the rope_range function :)

rope_value <- rope_range(model)
rope_value
> [1] -6.2  6.2

Let’s recompute the percentage in ROPE:

rope(posteriors$feedsunflower, range = rope_range, ci = 0.89)
> # Proportion of samples inside the ROPE [-6.17, 6.17]:
> 
> inside ROPE
> -----------
> 0.00 %

With this reasonable definition of ROPE, we observe that the 89% of the posterior distribution of the effect does not overlap with the ROPE. Thus, we can conclude that the effect is significant (in the sense of important enough to be noted).

Probability of Direction (pd)

Maybe we are not interested in whether the effect is non-negligible. Maybe we just want to know if this effect is positive or negative. In this case, we can simply compute the proportion of the posterior that is positive, no matter the “size” of the effect.

n_positive <- posteriors %>%
  filter(feedsunflower > 0) %>% # select only positive values
  nrow() # Get length

n_positive / nrow(posteriors) * 100
> [1] 98

We can conclude that the effect is positive with a probability of 98%. We call this index the Probability of Direction (pd). It can, in fact, be computed more easily with the following:

p_direction(posteriors$feedsunflower)
> Probability of Direction: 0.98

Interestingly, it so happens that this index is usually highly correlated with the frequentist p-value. We could almost roughly infer the corresponding p-value with a simple transformation:

pd <- 97.82
onesided_p <- 1 - pd / 100
twosided_p <- onesided_p * 2
twosided_p
> [1] 0.044

If we ran our model in the frequentist framework, we should approximately observe an effect with a p-value of 0.04. Is that true?

Comparison to frequentist

summary(lm(weight ~ feed, data = data))
> 
> Call:
> lm(formula = weight ~ feed, data = data)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -123.91  -25.91   -6.92   32.09  103.09 
> 
> Coefficients:
>               Estimate Std. Error t value Pr(>|t|)    
> (Intercept)      276.9       17.2   16.10  2.7e-13 ***
> feedsunflower     52.0       23.8    2.18     0.04 *  
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 57 on 21 degrees of freedom
> Multiple R-squared:  0.185,   Adjusted R-squared:  0.146 
> F-statistic: 4.77 on 1 and 21 DF,  p-value: 0.0405

The frequentist model tells us that the difference is positive and significant (\(\beta = 52, p = 0.04\)).

Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.

All with one function

And yet, I agree, it was a bit tedious to extract and compute all the indices. But what if I told you that we can do all of this, and more, with only one function?

Behold, describe_posterior!

This function computes all of the adored mentioned indices, and can be run directly on the model:

describe_posterior(model, test = c("p_direction", "rope", "bayesfactor"))
> Summary of Posterior Distribution
> 
> Parameter     | Median |           95% CI |     pd |          ROPE | % in ROPE |  Rhat |     ESS |     BF
> ---------------------------------------------------------------------------------------------------------
> (Intercept)   | 276.62 | [240.03, 310.68] |   100% | [-6.17, 6.17] |        0% | 1.002 | 2793.00 | > 1000
> feedsunflower |  52.73 | [ -0.12,  97.72] | 98.38% | [-6.17, 6.17] |     1.89% | 1.000 | 2860.00 |  0.806

Tada! There we have it! The median, the CI, the pd and the ROPE percentage!

Understanding and describing posterior distributions is just one aspect of Bayesian modelling. Are you ready for more?! Click here to see the next example.

References

Cohen, J. (1988). Statistical power analysis for the social sciences.
Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, JAGS, and stan. Academic Press.
McElreath, R. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.
bayestestR/inst/doc/probability_of_direction.Rmd0000644000175000017500000002674014023526535022001 0ustar nileshnilesh--- title: "Probability of Direction (pd)" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Probability of Direction (pd)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = "") knitr::opts_chunk$set(comment = ">") options(digits = 2) set.seed(333) ``` # What is the *pd?* The **Probability of Direction (pd)** is an index of **effect existence**, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (*i.e.*, is positive or negative). Beyond its **simplicity of interpretation, understanding and computation**, this index also presents other interesting properties: - It is **independent from the model**: It is solely based on the posterior distributions and does not require any additional information from the data or the model. - It is **robust** to the scale of both the response variable and the predictors. - It is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of "significance"), which is better achieved through other indices such as the [ROPE percentage](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). In fact, indices of significance and existence are totally independent. You can have an effect with a *pd* of **99.99\%**, for which the whole posterior distribution is concentrated within the `[0.0001, 0.0002]` range. In this case, the effect is **positive with a high certainty**, but also **not significant** (*i.e.*, very small). Indices of effect existence, such as the *pd*, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect's direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance. # Relationship with the *p*-value In most cases, it seems that the *pd* has a direct correspondence with the frequentist **one-sided *p*-value** through the formula: $$p_{one-sided} = 1-p_d$$ Similarly, the **two-sided *p*-value** (the most commonly reported one) is equivalent through the formula: $$p_{two-sided} = 2*(1-p_d)$$ Thus, the two-sided *p*-value of respectively **.1**, **.05**, **.01** and **.001** would correspond approximately to a *pd* of **95\%**, **97.5\%**, **99.5\%** and **99.95\%** . ```{r message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'} library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate( effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100 ) %>% ggplot(aes(x = p_direction, y = p_value, color = effect_existence)) + geom_point2(alpha = 0.1) + geom_segment(aes(x = 95, y = Inf, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = -Inf, y = 0.1, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = 97.5, y = Inf, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + geom_segment(aes(x = -Inf, y = 0.05, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits = 2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values = c("Presence of true effect" = "green", "Absence of true effect" = "red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ``` > **But if it's like the *p*-value, it must be bad because the *p*-value is bad [*insert reference to the reproducibility crisis*].** In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the *p*-value is an intrinsically bad or wrong. Instead, it is its **misuse**, **misunderstanding** and **misinterpretation** that fuels the decay of the situation. For instance, the fact that the **pd** is highly correlated with the *p*-value suggests that the latter is more an index of effect *existence* than *significance* (*i.e.*, "worth of interest"). The Bayesian version, the **pd**, has an intuitive meaning and makes obvious the fact that **all thresholds are arbitrary**. Additionally, the **mathematical and interpretative transparency** of the **pd**, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist *p*-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework. # Methods of computation The most **simple and direct** way to compute the **pd** is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on [**density estimation**](https://easystats.github.io/bayestestR/reference/estimate_density.html). It starts by estimating the density function (for which many methods are available), and then computing the [**area under the curve**](https://easystats.github.io/bayestestR/reference/area_under_curve.html) (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function. # Methods comparison Let's compare the 4 available methods, the **direct** method and 3 **density-based** methods differing by their density estimation algorithm (see [`estimate_density`](https://easystats.github.io/bayestestR/reference/estimate_density.html)). ## Correlation Let's start by testing the proximity and similarity of the results obtained by different methods. ```{r message=FALSE, warning=FALSE, fig.align='center'} library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for (the_mean in runif(25, 0, 4)) { for (the_sd in runif(25, 0.5, 4)) { x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind( data, data.frame( "direct" = pd(x), "kernel" = pd(x, method = "kernel"), "logspline" = pd(x, method = "logspline"), "KernSmooth" = pd(x, method = "KernSmooth") ) ) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ``` All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much. ## Accuracy To test the accuracy of each methods, we will start by computing the **direct *pd*** from a very dense distribution (with a large amount of observations). This will be our baseline, or "true" *pd*. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the *pd* with different methods. The closer this estimate is from the reference one, the better. ```{r message=FALSE, warning=FALSE} data <- data.frame() for (i in 1:25) { the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for (j in 1:25) { sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind( data, data.frame( "sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method = "kernel") - true_pd, "logspline" = pd(subsample, method = "logspline") - true_pd, "KernSmooth" = pd(subsample, method = "KernSmooth") - true_pd ) ) } } data <- as.data.frame(sapply(data, as.numeric)) ``` ```{r message=FALSE, warning=FALSE, fig.align='center'} library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x = sample_size, y = Distance, color = Method, fill = Method)) + geom_point(alpha = 0.3, stroke = 0, shape = 16) + geom_smooth(alpha = 0.2) + geom_hline(yintercept = 0) + theme_classic() + xlab("\nDistribution Size") ``` The "Kernel" based density methods seems to consistently underestimate the *pd*. Interestingly, the "direct" method appears as being the more reliable, even in the case of small number of posterior draws. ## Can the pd be 100\%? `p = 0.000` is coined as one of the term to avoid when reporting results [@lilienfeld2015fifty], even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the `p = 0.000` returned by software is due to approximations related, among other, to finite memory hardware. One could apply this rationale for the *pd*: since all data points have a non-null probability density, then the *pd* (a particular portion of the probability density) can *never* be 100\%. While this is an entirely valid point, people using the *direct* method might argue that their *pd* is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which `pd = 100%` is a valid statement. bayestestR/inst/doc/bayes_factors.Rmd0000644000175000017500000012634414133142440017551 0ustar nileshnilesh--- title: "Bayes Factors" output: rmarkdown::html_vignette: toc: true toc_depth: 2 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, bayes factors] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Bayes Factors} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Retrieved from [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r setup, include=FALSE} library(knitr) options(knitr.kable.NA = "", digits = 2) knitr::opts_chunk$set( echo = TRUE, comment = ">", out.width = "100%", message = FALSE, warning = FALSE, dpi = 150 ) pkgs <- c( "rstanarm", "BayesFactor", "emmeans", "logspline", "lme4", "ggplot2", "see", "insight", "emmeans", "knitr", "effectsize", "bayestestR" ) if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) { knitr::opts_chunk$set(eval = FALSE) } set.seed(4) if (require("ggplot2") && require("see")) { theme_set(theme_modern()) } ``` The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about the **Bayes factor**. In short, one school of thought (e.g., the *Amsterdam school*, led by [E. J. Wagenmakers](https://www.bayesianspectacles.org/)) advocate its use, and emphasize its qualities as a statistical index, while another point to its limits and prefer, instead, the precise description of posterior distributions (using [CIs](https://easystats.github.io/bayestestR/reference/hdi.html), [ROPEs](https://easystats.github.io/bayestestR/reference/rope.html), etc.). The `bayestestR` package does **not** take a side in this debate, and offers tools to carry out analysis irrespective of the school you subscribe to. Instead, it strongly supports the notion of an *informed choice*: **discover the methods, learn about them, understand them, try them, and decide for yourself**. Having said that, here's an introduction to Bayes factors :) # The Bayes Factor **Bayes Factors (BFs) are indices of *relative* evidence of one "model" over another**. In their role as a hypothesis testing index, they are to Bayesian framework what a $p$-value is to the **classical/frequentist framework**. In significance-based testing, $p$-values are used to assess how unlikely are the observed data if the **null hypothesis** were true, while in the **Bayesian model selection framework**, Bayes factors assess evidence for different models, each model corresponding to a specific hypothesis. According to Bayes' theorem, we can update prior probabilities of some model $M$ ($P(M)$) to posterior probabilities ($P(M|D)$) after observing some datum $D$ by accounting for the probability of observing that datum given the model ($P(D|M)$, also known as the *likelihood*): $$ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} $$ Using this equation, we can compare the probability-odds of two models: $$ \underbrace{\frac{P(M_1|D)}{P(M_2|D)}}_{\text{Posterior Odds}} = \underbrace{\frac{P(D|M_1)}{P(D|M_2)}}_{\text{Likelihood Ratio}} \times \underbrace{\frac{P(M_1)}{P(M_2)}}_{\text{Prior Odds}} $$ Where the *likelihood ratio* (the middle term) is the *Bayes factor* - it is the ***factor*** by which some **prior odds** have been updated after observing the data to **posterior odds**. Thus, Bayes factors can be calculated in two ways: - As a ratio quantifying **the relative probability of the observed data under each of the two models**. (In some contexts, these probabilities are also called *marginal likelihoods*.) $$ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} $$ - As **the degree of shift in prior beliefs** about the relative credibility of two models (since they can be computed by dividing posterior odds by prior odds). $$ BF_{12}=\frac{Posterior~Odds_{12}}{Prior~Odds_{12}} $$ Here we provide functions for computing Bayes factors in two different contexts: - **testing single parameters (coefficients) within a model** - **comparing statistical models themselves** # Testing Models' Parameters with Bayes Factors {#bayesfactor_parameters} A **Bayes factor for a single parameter** can be used to answer the question: > "Given the observed data, has the null hypothesis of an absence of an effect become more or less credible?" ```{r deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/bayestestR/raw/master/man/figures/deathsticks.jpg") ``` Let's use the Students' (1908) Sleep data set (`data("sleep")`). The data comes from a study in which participants were administered a drug and the researchers assessed the extra hours of sleep that participants slept afterwards. We will try answering the following research question using Bayes factors: > **Given the observed data, has the hypothesis that the drug (the effect of `group`) has no effect on the numbers of hours of extra sleep (variable `extra`) become more of less credible?** ```{r sleep_boxplot, echo=FALSE} library(ggplot2) ggplot(sleep, aes(x = group, y = extra, fill = group)) + geom_boxplot() + theme_classic() + theme(legend.position = "none") ``` The **boxplot** suggests that the second group has a higher number of hours of extra sleep. *By how much?* Let's fit a simple [Bayesian linear model](https://easystats.github.io/bayestestR/articles/example1.html), with a prior of $b_{group} \sim N(0, 3)$ (i.e. the prior follows a Gaussian/normal distribution with $mean = 0$ and $SD = 3$), using `rstanarm` package: ```{r rstanarm_model, eval = FALSE} set.seed(123) library(rstanarm) model <- stan_glm( formula = extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE) ) ``` ```{r, echo=FALSE} model <- stan_glm( formula = extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0 ) ``` ### Testing against a null-*region* One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be *practically* equivalent to the null [@kruschke2010believe]. In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug's effect falling *within this null-region*, and the prior probability of the drug's effect falling *outside the null-region* to get our *prior odds*. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as: $$ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} $$ Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 3 hours, our priors would look like this: ```{r, echo=FALSE} null <- c(-1, 1) xrange <- c(-10, 10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ``` and the prior odds would be 2.2. By looking at the posterior distribution, can now compute the posterior probability of the drug's effect falling *within the null-region*, and the posterior probability of the drug's effect falling *outside the null-region* to get our *posterior odds*: $$ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} $$ ```{r rstanarm_fit, echo=FALSE} library(bayestestR) model_prior <- unupdate(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals, f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + coord_cartesian(ylim = c(0, 0.45)) + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ``` We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2, which seems to favor **the effect being non-null**. **But**, does this mean the data support the alternative over the null? Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here! Let's compute the Bayes factor as the change from the prior odds to the posterior odds: $BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9$! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has *overall* shifted closer to the null interval, making the values in the null interval more probable! [see *Non-overlapping Hypotheses* in @morey2011bayesinterval] All of this can be achieved with the function `bayesfactor_parameters()`, which computes a Bayes factor for each of the model's parameters: ```{r, eval=FALSE} My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) My_first_BF ``` ```{r, echo=FALSE} print(My_first_BF) ``` We can also plot using the `see` package: ```{r} library(see) plot(My_first_BF) ``` Note that **interpretation guides** for Bayes factors can be found in the `effectsize` package: ```{r} effectsize::interpret_bf(exp(My_first_BF$log_BF[2]), include_value = TRUE) ``` ### Testing against the *point*-null (0) > **What if we don't know what region would be practically equivalent to 0?** Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the density of the null value between the two distributions.^[Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null: > "[...] the Bayes factor for $H_0$ versus $H_1$ could be obtained by analytically integrating out the model parameter $\theta$. However, the Bayes factor may likewise be obtained by only considering $H_1$, and dividing the height of the posterior for $\theta$ by the height of the prior for $\theta$, at the point of interest." [@wagenmakers2010bayesian] ```{r, eval=FALSE} My_second_BF <- bayesfactor_parameters(model, null = 0) My_second_BF ``` ```{r, echo=FALSE} My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0 ) print(My_second_BF) ``` ```{r} plot(My_second_BF) ``` ### Directional hypotheses We can also compute Bayes factors for directional hypotheses ("one sided"), if we have a prior hypotheses about the direction of the effect. This can be done by setting an *order restriction* on the prior distribution (which results in an order restriction on the posterior distribution) of the alternative [@morey2014simple]. For example, if we have a prior hypothesis that *the drug has a positive effect on the number of sleep hours*, the alternative will be restricted to the region to the right of the null (point or interval): ```{r savagedickey_one_sided, eval=FALSE} test_group2_right <- bayesfactor_parameters(model, direction = ">") test_group2_right ``` ```{r prior_n_post_plot_one_sided, echo=FALSE} test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ``` ```{r} plot(test_group2_right) ``` As we can see, given that we have an *a priori* assumption about the direction of the effect (that the effect is positive), **the presence of an effect is 2.8 times more likely than the absence of an effect**, given the observed data (or that the data are 2.8 time more probable under $H_1$ than $H_0$). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite [weak evidence](https://easystats.github.io/effectsize/reference/interpret_bf.html)). Thanks to the flexibility of Bayesian framework, it is also possible to compute a Bayes factor for **dividing** hypotheses - that is, for a null and alternative that are *complementary*, opposing one-sided hypotheses [@morey2014simple]. For example, above we compared an alternative of $H_A$: *the drug has a positive effects* to the null $H_0$: *the drug has no effect*. But we can also compare instead the same alternative to its *complementary* hypothesis: $H_{-A}$: *the drug has a negative effects*. ```{r inteval_div, eval=FALSE} test_group2_dividing <- bayesfactor_parameters(model, null = c(-Inf, 0)) test_group2_dividing ``` ```{r inteval_div2, echo=FALSE} test_group2_dividing <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = c(-Inf, 0) ) print(test_group2_dividing) ``` ```{r} plot(test_group2_dividing) ``` We can see that this test produces even stronger (more conclusive) evidence than the one-sided vs. point-null test! And indeed, as a rule of thumb, the more specific the two hypotheses are, and the more distinct they are from one another, the more *power* our Bayes factor has! ^[For more, see [this talk by Richard D. Morey, minute 48](https://philstatwars.files.wordpress.com/2020/09/richard_presentation.mp4)] Thanks to the transitivity of Bayes factors, we can also use `bayesfactor_parameters()` to compare even more types of hypotheses, with some trickery. For example: $$ \underbrace{BF_{0 **Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?** For example, we've seen that the point null has become somewhat less credible after observing the data, but we might also ask which values have **gained** credibility given the observed data?. The resulting range of values is called **the support interval** as it indicates which values are supported by the data [@wagenmakers2018SI]. We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities. In `bayestestR`, this can be achieved with the `si()` function: ```{r} my_first_si <- si( posterior = data.frame(group2 = posterior), prior = data.frame(group2 = prior), BF = 1 ) print(my_first_si) ``` The argument `BF = 1` indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all). Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased): ```{r} plot(my_first_si) ``` We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we've already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor: > "The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level $\alpha$. For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against." [@wagenmakers2018SI] Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent: - A $BF = 1$ contains values whose credibility has merely not decreased by observing the data. - A $BF > 1$ contains values who received more impressive support from the data. - A $BF < 1$ contains values whose credibility has *not* been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than $1/BF$ in support of the alternative. # Comparing Models using Bayes Factors {#bayesfactor_models} Bayes factors can also be used to compare statistical **models**. In this statistical context, they answer the following question: > **Under which model are the observed data more probable?** In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the **relative** evidence for one model over the other. Let's use Bayes factors for model comparison to find a model that best describes the length of an iris' sepal using the `iris` data set. ### For Bayesian models (`brms` and `rstanarm`) **Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:** - `brmsfit` models **must** have been fitted with `save_pars = save_pars(all = TRUE)` - `stanreg` models **must** have been fitted with a defined `diagnostic_file`. Let's first fit 5 Bayesian regressions with `brms` to predict `Sepal.Length`: ```{r brms_disp, eval = FALSE} library(brms) # intercept only model m0 <- brm(Sepal.Length ~ 1, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma"), save_pars = save_pars(all = TRUE), backend = "rstan") # Petal.Length only m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length"), save_pars = save_pars(all = TRUE)) # Species only m2 <- brm(Sepal.Length ~ Species, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), save_pars = save_pars(all = TRUE)) # Species + Petal.Length model m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")), save_pars = save_pars(all = TRUE)) # full interactive model m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris, prior = set_prior("student_t(3, 6, 6)", class = "Intercept") + set_prior("student_t(3, 0, 6)", class = "sigma") + set_prior("normal(0, 1)", coef = "Petal.Length") + set_prior("normal(0, 3)", coef = c("Speciesversicolor", "Speciesvirginica")) + set_prior("normal(0, 2)", coef = c("Speciesversicolor:Petal.Length", "Speciesvirginica:Petal.Length")), save_pars = save_pars(all = TRUE)) ``` We can now compare these models with the `bayesfactor_models()` function, using the `denominator` argument to specify the model against which the rest of the models will be compared (in this case, the intercept-only model): ```{r brms_models_disp, eval = FALSE} library(bayestestR) comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) comparison ``` ```{r, echo = FALSE} comparison <- structure( list(Model = c("Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1"), log_BF = c(101.556419030653, 64.2903334815192, 122.864721399001, 119.712908243647, 0)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c("m1", "m2", "m3", "m4", "m0"), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ``` We can see that the `Species + Petal.Length` model is the best model - with $BF=2\times 10^{53}$ compared to the null (intercept only). Due to the transitive property of Bayes factors, we can easily change the reference model to the full `Species * Petal.Length` model: ```{r update_models1} update(comparison, reference = 4) ``` As we can see, the `Species + Petal.Length` model is also favored compared to the `Species * Petal.Length` model, though to several orders of magnitude less - is is only supported 23.38 times more!) We can also change the reference model to the `Species` model: ```{r update_models2} update(comparison, reference = 2) ``` Notice that, in the Bayesian framework the compared models *do not* need to be nested models, as happened here when we compared the `Petal.Length`-only model to the `Species`-only model (something that cannot be done in the frequentist framework, where compared models must be nested in one another). We can also get a matrix of Bayes factors of all the pairwise model comparisons: ```{r} as.matrix(comparison) ``` **NOTE:** In order to correctly and precisely estimate Bayes Factors, you always need the 4 P's: **P**roper **P**riors ^[[Robert, 2016](https://doi.org/10.1016/j.jmp.2015.08.002); [Kass & Raftery, 1993](https://doi.org/10.1080/01621459.1995.10476572); [Fernández, Ley, & Steel, 2001](https://doi.org/10.1016/S0304-4076(00)00076-2)], and a **P**lentiful **P**osterior ^[[Gronau, Singmann, & Wagenmakers, 2017](https://arxiv.org/abs/1710.08162)]. ### For Frequentist models via the BIC approximation It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models [@wagenmakers2007practical]. Let's try it out on some **linear mixed-effects models**: ```{r lme4_models} library(lme4) # define models with increasing complexity m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) # model comparison bayesfactor_models(m1, m2, m3, m4, denominator = m0) ``` ### Order restricted models {#bayesfactor_restricted} As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris' sepal from the length of its petal, as well as from its species, with priors: - $b_{petal} \sim N(0,2)$ - $b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)$ ```{r} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0 ) ``` These priors are **unrestricted** - that is, **all values** between $-\infty$ and $\infty$ of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, *a priori* the ordering of the parameters relating to the iris species can have any ordering, such that *a priori* setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa! Does it make sense to let our priors cover all of these possibilities? That depends on our *prior* knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be *negatively* associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as **restricted** priors [@morey_2015_blog; @morey2011bayesinterval]: 1. The novice botanist: $b_{petal} > 0$ 2. The expert botanist: $b_{versicolors} > 0\ \&\ b_{virginica} > 0$ By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with `bayesfactor_restricted()`, that compute a Bayes factor for these restricted model vs the unrestricted model. Let's first specify these restrictions as logical conditions: ```{r} botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ``` Let's test these hypotheses: ```{r} model_prior <- unupdate(iris_model) botanist_BFs <- bayesfactor_restricted( posterior = iris_model, prior = model_prior, hypothesis = botanist_hypotheses ) print(botanist_BFs) ``` We can see that the novice botanist's hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction. What about our expert botanist? He seems to have failed miserably, with a BF favoring the *unrestricted* model many many times over. How is this possible? It seems that when *controlling for petal length*, versicolor and virginica actually have shorter sepals! ```{r plot_iris, echo=FALSE} ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ``` Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so: $$ BF_{\text{restricted vs. NULL}} = \frac {BF_{\text{restricted vs. un-restricted}}} {BF_{\text{un-restricted vs NULL}}} $$ **Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (*a priori*) hypotheses, and should not be used for any post hoc comparisons [@morey_2015_blog].** **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* appendix below. # Bayesian Model Averaging In the previous section, we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider, or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases, we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models. ### Inclusion Bayes factors {#bayesfactor_inclusion} Inclusion Bayes factors answer the question: > **Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?** In other words, on average, are models with predictor $X$ more likely to have produced the observed data than models without predictor $X$?^[A model without predictor $X$ can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.] Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the *prior inclusion probability*), and of all models that do not include that predictor (the *prior exclusion probability*). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models' probabilities to obtain the *posterior inclusion probability* and the *posterior exclusion probability*. Once again, the change from prior inclusion odds to the posterior inclusion odds is the **Inclusion Bayes factor** ["$BF_{Inclusion}$"; @clyde2011bayesian]. Lets use the `brms` example from above: ```{r inclusion_brms} bayesfactor_inclusion(comparison) ``` If we examine the interaction term's inclusion Bayes factor, we can see that across all 5 models, a model with the term is *on average* (1/0.171) 5.84 times less supported than a model without the term. Note that `Species`, a factor represented in the model with several parameters, gets a *single* Bayes factor - inclusion Bayes factors are given **per predictor**! We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effects from which the interaction predictor is comprised (see explanation for why you might want to do this [here](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp)). ```{r inclusion_brms2} bayesfactor_inclusion(comparison, match_models = TRUE) ``` ### Comparison with JASP `bayesfactor_inclusion()` is meant to provide Bayes Factors per predictor, similar to JASP's *Effects* option. Let's compare the two: 1. **Across all models**: ```{r JASP_all} library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose * supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ``` ```{r JASP_all_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.jpg") ``` 2. **Across matched models**: ```{r JASP_matched} bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ``` ```{r JASP_matched_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.jpg") ``` 3. **With Nuisance Effects**: We'll add `dose` to the null model in JASP, and do the same in `R`: ```{r JASP_Nuisance} BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4] / BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ``` ```{r JASP_Nuisance_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.jpg") ``` ## Averaging posteriors {#weighted_posteriors} Similar to how we can average evidence for a predictor across models, we can also average the **posterior estimate** across models. This is useful in situations where Bayes factors seem to support a null effect, yet the *HDI* for the alternative excludes the null value (also see `si()` described above). For example, looking at Motor *Trend Car Road Tests* (`data(mtcars)`), we would naturally predict miles/gallon (`mpg`) from transition type (`am`) and weight (`wt`), but what about number of carburetors (`carb`)? Is this a good predictor? We can determine this by comparing the following models: ```{r} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10, 10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0 ) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10, 10, 20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0 ) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF_carb ``` It seems that the model without `carb` as a predictor is $1/BF=1.2$ times more likely than the model *with* `carb` as a predictor. We might then assume that in the latter model, the `HDI` will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case: ```{r} hdi(mod_carb, ci = .95) ``` How can this be? By estimating the HDI of the effect for `carb` in the full model, we are acting under the assumption that this model is correct. However, as we've just seen, both models are practically tied. If this is the case **why limit our estimation of the effect just to one model?** [@van2019cautionary]. Using Bayesian Model Averaging, we can combine the posteriors samples from several models, weighted by the models' marginal likelihood (done via the `bayesfactor_models()` function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI. In `bayestestR`, we can do this with the `weighted_posteriors()` function: ```{r} BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ``` We can see that across both models under consideration, the posterior of the `carb` effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now **the HDI does contain 0**. Thus we have resolved the conflict between the Bayes factor and the HDI [@rouder2018bayesian]! **Note**: Parameters might play different roles across different models. For example, the parameter `A` plays a different role in the model `Y ~ A + B` (where it is a *main* effect) than it does in the model `Y ~ A + B + A:B` (where it is a *simple* effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via `contr.sum` or orthonormal coding via [`contr.orthonorm`](https://easystats.github.io/bayestestR/reference/contr.orthonorm.html) for factors) can in some cases reduce this issue. # Appendices ## Testing contrasts (with `emmeans` / `modelbased`) Besides testing parameter `bayesfactor_parameters()` can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of `bayesfactor_parameters()` + [**`emmeans`**](https://cran.r-project.org/package=emmeans) to [test Bayesian contrasts](https://easystats.github.io/blog/posts/bayestestr_emmeans/). For example, in the `sleep` example from above, we can estimate the group means and the difference between them: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(emmeans) (group_diff <- emmeans(model, pairwise ~ group)) # pass the original model via prior bayesfactor_parameters(group_diff, prior = model) ``` That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way! We can also use the `easystats`' [**`modelbased`**](https://cran.r-project.org/package=modelbased) package to compute Bayes factors for contrasts: ```{r, echo=FALSE} set.seed(1) ``` ```{r, eval=FALSE} library(modelbased) estimate_contrasts(model, test = "bf", bf_prior = model) ``` **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* section below. ## Specifying correct priors for factors {#contr_bayes} This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.orthonorm`) [as outlined in @rouder2012default, section 7.2]. **Special care should be taken when working with factors with 3 or more levels**. ### Contrasts (and marginal means) The *effects* factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all *a priori* differences to have the same distribution, but... For our example, we will be test all ***prior*** pairwise differences between the 3 species in the `iris` dataset. ```{r} df <- iris contrasts(df$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_sum <- pairs(emmeans(fit_sum, ~Species))) ggplot(stack(insight::get_parameters(pairs_sum)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` Notice that, though the prior estimate for all 3 pairwise contrasts is ~0, the scale or the HDI is much narrower for the prior of the `setosa - versicolor` contrast! **What happened???** This is caused by an inherent bias in the priors introduced by the *effects* coding (it's even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect's parameters). **And since it affects the priors, this bias will also bias the Bayes factors over / understating evidence for some contrasts over others!** The solution is to use *orthonormal* factor coding, a-la `contr.orthonorm`, which can either specify this factor coding per-factor: ```{r} contrasts(df$Species) <- contr.orthonorm ``` Or you can set it globally: ```{r, eval=FALSE} options(contrasts = c("contr.orthonorm", "contr.poly")) ``` Let's again estimate the ***prior*** differences: ```{r} fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0 ) (pairs_bayes <- pairs(emmeans(fit_bayes, ~Species))) ggplot(stack(insight::get_parameters(pairs_bayes)), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that using this coding scheme, we have equal priors on all pairwise contrasts. There are other solutions to this problem of priors. You can read about them in [Solomon Kurz's blog post](https://solomonkurz.netlify.app/post/2020-12-09-multilevel-models-and-the-index-variable-approach/). ### Order restrictions This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the *a priori* probability of obtaining the order A > C > B is 1/6 (reach back to *intro to stats* year 1), but... For our example, we will be interested in the following order restrictions in the `iris` dataset (each line is a separate restriction): ```{r} hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ``` With the default factor coding, this looks like this: ```{r, eval=FALSE} contrasts(df$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian() ) em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(df$Species)[, ] <- contr.sum(3) fit_sum <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_sum <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ***What happened???*** 1. The comparison of 2 levels all have a prior of ~0.5, as expected. 2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** Again, this is solved by using the *orthonormal* factor coding (from above). ```{r, eval=FALSE} contrasts(df$Species) <- contr.orthonorm fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian() ) em_bayes <- emmeans(fit_sum, ~Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(df$Species)[, ] <- contr.orthonorm(3) fit_bayes <- stan_glm(Sepal.Length ~ Species, data = df, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0 ) em_bayes <- emmeans(fit_bayes, ~Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) ``` ### Conclusion When comparing the results from the two factor coding schemes, we find: 1. In both cases, the estimated (posterior) means are quite similar (if not identical). 2. The priors and Bayes factors differ between the two schemes. 3. Only with `contr.orthonorm`, the prior distribution of the difference or the order of 3 (or more) means is balanced. # References bayestestR/inst/doc/guidelines.R0000644000175000017500000000065214135670667016550 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ## ----echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") bayestestR/inst/doc/mediation.Rmd0000644000175000017500000001401014054321374016667 0ustar nileshnilesh--- title: "Mediation Analysis using Bayesian Regression Models" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, mediation] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Mediation Analysis using Bayesian Regression Models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r, SETTINGS-knitr, echo = FALSE, warning = FALSE, message = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", dev = "png", fig.width = 7, fig.height = 5, message = FALSE, warning = FALSE ) options(width = 800) if (!requireNamespace("mediation", quietly = TRUE) || !requireNamespace("httr", quietly = TRUE) || !requireNamespace("lavaan", quietly = TRUE) || !requireNamespace("brms", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("insight", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } ``` This vignettes demonstrates the `mediation()`-function. Before we start, we fit some models, including a mediation-object from the _mediation_-package and a structural equation modelling approach with the _lavaan_-package, both of which we use for comparison with _brms_ and _rstanarm_. ## Mediation Analysis in brms and rstanarm ```{r} library(bayestestR) library(mediation) library(brms) library(rstanarm) # load sample data data(jobs) set.seed(123) # linear models, for mediation analysis b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) # mediation analysis, for comparison with brms m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") ``` ```{r eval=FALSE} # Fit Bayesian mediation model in brms f1 <- bf(job_seek ~ treat + econ_hard + sex + age) f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4) ``` ```{r echo=FALSE} m2 <- insight::download_model("brms_mv_6") ``` ```{r eval=FALSE} # Fit Bayesian mediation model in rstanarm m3 <- stan_mvmer( list(job_seek ~ treat + econ_hard + sex + age + (1 | occp), depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp)), data = jobs, cores = 4, refresh = 0 ) ``` ```{r echo=FALSE} m3 <- insight::download_model("stanmvreg_2") ``` `mediation()` is a summary function, especially for mediation analysis, i.e. for multivariate response models with casual mediation effects. In the models `m2` and `m3`, `treat` is the treatment effect and `job_seek` is the mediator effect. For the *brms* model (`m2`), `f1` describes the mediator model and `f2` describes the outcome model. This is similar for the *rstanarm* model. `mediation()` returns a data frame with information on the _direct effect_ (median value of posterior samples from treatment of the outcome model), _mediator effect_ (median value of posterior samples from mediator of the outcome model), _indirect effect_ (median value of the multiplication of the posterior samples from mediator of the outcome model and the posterior samples from treatment of the mediation model) and the _total effect_ (median value of sums of posterior samples used for the direct and indirect effect). The _proportion mediated_ is the indirect effect divided by the total effect. The simplest call just needs the model-object. ```{r, message=TRUE} # for brms mediation(m2) # for rstanarm mediation(m3) ``` Typically, `mediation()` finds the treatment and mediator variables automatically. If this does not work, use the `treatment` and `mediator` arguments to specify the related variable names. For all values, the 89% credible intervals are calculated by default. Use `ci` to calculate a different interval. ## Comparison to the mediation package Here is a comparison with the _mediation_ package. Note that the `summary()`-output of the _mediation_ package shows the indirect effect first, followed by the direct effect. ```{r} summary(m1) mediation(m2, ci = .95) mediation(m3, ci = .95) ``` If you want to calculate mean instead of median values from the posterior samples, use the `centrality`-argument. Furthermore, there is a `print()`-method, which allows to print more digits. ```{r, message=TRUE} m <- mediation(m2, centrality = "mean", ci = .95) print(m, digits = 4) ``` As you can see, the results are similar to what the _mediation_ package produces for non-Bayesian models. ## Comparison to SEM from the lavaan package Finally, we also compare the results to a SEM model, using *lavaan*. This example should demonstrate how to "translate" the same model in different packages or modeling approached. ```{r} library(lavaan) data(jobs) set.seed(1234) model <- ' # direct effects depress2 ~ c1*treat + c2*econ_hard + c3*sex + c4*age + b*job_seek # mediation job_seek ~ a1*treat + a2*econ_hard + a3*sex + a4*age # indirect effects (a*b) indirect_treat := a1*b indirect_econ_hard := a2*b indirect_sex := a3*b indirect_age := a4*b # total effects total_treat := c1 + (a1*b) total_econ_hard := c2 + (a2*b) total_sex := c3 + (a3*b) total_age := c4 + (a4*b) ' m4 <- sem(model, data = jobs) summary(m4) # just to have the numbers right at hand and you don't need to scroll up mediation(m2, ci = .95) ``` The summary output from *lavaan* is longer, but we can find the related numbers quite easily: - the _direct effect_ of treatment is `treat (c1)`, which is `-0.040` - the _indirect effect_ of treatment is `indirect_treat`, which is `-0.016` - the _mediator effect_ of job_seek is `job_seek (b)`, which is `-0.240` - the _total effect_ is `total_treat`, which is `-0.056` bayestestR/inst/doc/probability_of_direction.R0000644000175000017500000001675214135670777021476 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = "") knitr::opts_chunk$set(comment = ">") options(digits = 2) set.seed(333) ## ----message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate( effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100 ) %>% ggplot(aes(x = p_direction, y = p_value, color = effect_existence)) + geom_point2(alpha = 0.1) + geom_segment(aes(x = 95, y = Inf, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = -Inf, y = 0.1, xend = 95, yend = 0.1), color = "black", linetype = "longdash") + geom_segment(aes(x = 97.5, y = Inf, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + geom_segment(aes(x = -Inf, y = 0.05, xend = 97.5, yend = 0.05), color = "black", linetype = "dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits = 2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values = c("Presence of true effect" = "green", "Absence of true effect" = "red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ## ----message=FALSE, warning=FALSE, fig.align='center'------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for (the_mean in runif(25, 0, 4)) { for (the_sd in runif(25, 0.5, 4)) { x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind( data, data.frame( "direct" = pd(x), "kernel" = pd(x, method = "kernel"), "logspline" = pd(x, method = "logspline"), "KernSmooth" = pd(x, method = "KernSmooth") ) ) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ## ----message=FALSE, warning=FALSE--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- data <- data.frame() for (i in 1:25) { the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for (j in 1:25) { sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind( data, data.frame( "sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method = "kernel") - true_pd, "logspline" = pd(subsample, method = "logspline") - true_pd, "KernSmooth" = pd(subsample, method = "KernSmooth") - true_pd ) ) } } data <- as.data.frame(sapply(data, as.numeric)) ## ----message=FALSE, warning=FALSE, fig.align='center'------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x = sample_size, y = Distance, color = Method, fill = Method)) + geom_point(alpha = 0.3, stroke = 0, shape = 16) + geom_smooth(alpha = 0.2) + geom_hline(yintercept = 0) + theme_classic() + xlab("\nDistribution Size") bayestestR/inst/doc/example3.R0000644000175000017500000000075514135670666016141 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits=2) set.seed(333) ## ----echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") bayestestR/inst/doc/bayestestR.R0000644000175000017500000000333614135670655016544 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) knitr::opts_chunk$set(comment = ">") options(knitr.kable.NA = "") options(digits = 2) if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } else { library(rstanarm) library(bayestestR) } ## ----echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ## ----eval=FALSE, message=FALSE, warning=FALSE--------------------------------- # install.packages("remotes") # remotes::install_github("easystats/easystats") ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # install.packages("rstanarm") # library(rstanarm) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- lm(Sepal.Length ~ Petal.Length, data = iris) # summary(model) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA--------------------- model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) # posteriors <- describe_posterior(model) # # for a nicer table # print_md(posteriors, digits = 2) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA--------------------- set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, refresh = 0) posteriors <- describe_posterior(model) # for a nicer table print_md(posteriors, digits = 2) bayestestR/inst/doc/example2.html0000644000175000017500000127051214135670666016704 0ustar nileshnilesh 2. Confirmation of Bayesian skills

2. Confirmation of Bayesian skills

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Now that describing and understanding posterior distributions of linear regressions is not that mysterious to you, we will take one step back and study some simpler models: correlations and t-tests.

But before we do that, let us take a moment to remind ourselves and appreciate the fact that all basic statistical procedures such as correlations, t-tests, ANOVAs, or chi-square tests are linear regressions (we strongly recommend this excellent demonstration). Nevertheless, these simple models will provide a good pretext to introduce a few more complex indices, such as the Bayes factor.

Correlations

Frequentist version

Once again, let us begin with a frequentist correlation between two continuous variables, the width and the length of the sepals of some flowers. The data is available in R as the iris dataset (the same that was used in the previous tutorial).

We will compute a Pearson’s correlation test, store the results in an object called result, and then display it:

result <- cor.test(iris$Sepal.Width, iris$Sepal.Length)
result
> 
>   Pearson's product-moment correlation
> 
> data:  iris$Sepal.Width and iris$Sepal.Length
> t = -1, df = 148, p-value = 0.2
> alternative hypothesis: true correlation is not equal to 0
> 95 percent confidence interval:
>  -0.273  0.044
> sample estimates:
>   cor 
> -0.12

As you can see in the output, the test actually compared two hypotheses: - the null hypothesis (h0; no correlation), - the alternative hypothesis (h1; a non-null correlation).

Based on the p-value, the null hypothesis cannot be rejected: the correlation between the two variables is negative but non-significant (\(r = -.12, p > .05\)).

Bayesian correlation

To compute a Bayesian correlation test, we will need the BayesFactor package (you can install it by running install.packages("BayesFactor")). We can then load this package, compute the correlation using the correlationBF() function, and store the result.

library(BayesFactor)
result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length)

Now, let us run our describe_posterior() function on that:

describe_posterior(result)
> Summary of Posterior Distribution
> 
> Parameter | Median |        95% CI |     pd |          ROPE | % in ROPE |    BF |         Prior
> -----------------------------------------------------------------------------------------------
> rho       |  -0.11 | [-0.26, 0.05] | 92.25% | [-0.05, 0.05] |    20.84% | 0.509 | Beta (3 +- 3)

We see again many things here, but the important indices for now are the median of the posterior distribution, -.11. This is (again) quite close to the frequentist correlation. We could, as previously, describe the credible interval, the pd or the ROPE percentage, but we will focus here on another index provided by the Bayesian framework, the Bayes Factor (BF).

Bayes Factor (BF)

We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an alternative one (presence of an effect). The Bayes factor (BF) allows the same comparison and determines under which of these two models the observed data are more probable: a model with the effect of interest, and a null model without the effect of interest. So, in the context of our correlation example, the null hypothesis would be no correlation between the two variables (\(h0: \rho = 0\); where \(\rho\) stands for Bayesian correlation coefficient), while the alternative hypothesis would be that there is a correlation different than 0 - positive or negative (\(h1: \rho \neq 0\)).

We can use bayesfactor() to specifically compute the Bayes factor comparing those models:

bayesfactor(result)
> Bayes Factors for Model Comparison
> 
>     Model         BF
> [2] (rho != 0) 0.509
> 
> * Against Denominator: [1] (rho = 0)
> *   Bayes Factor Type: JZS (BayesFactor)

We got a BF of 0.51. What does it mean?

Bayes factors are continuous measures of relative evidence, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as the numerator), and a Bayes factor smaller than 1 giving evidence in favour of the other model (the denominator).

Yes, you heard that right, evidence in favour of the null!

That’s one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the p-value can only be used to reject h0, but not accept it. With the Bayes factor, you can measure evidence against - and in favour of - the null. In other words, in the frequentist framework, if the p-value is not significant, we can conclude that evidence for the effect is absent, but not that there is evidence for the absence of the effect. In Bayesian framework, we can do the latter. This is important since sometimes our hypotheses are about no effect.

BFs representing evidence for the alternative against the null can be reversed using \(BF_{01}=1/BF_{10}\) (the 01 and 10 correspond to h0 against h1 and h1 against h0, respectively) to provide evidence of the null against the alternative. This improves human readability1 in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null).

In our case, BF = 1/0.51 = 2, indicates that the data are 2 times more probable under the null compared to the alternative hypothesis, which, though favouring the null, is considered only anecdotal evidence against the null.

We can thus conclude that there is anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51), which is a much more informative statement that what we can do with frequentist statistics.

And that’s not all!

Visualise the Bayes factor

In general, pie charts are an absolute no-go in data visualisation, as our brain’s perceptive system heavily distorts the information presented in such way2. Nevertheless, there is one exception: pizza charts.

It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise.

Wagenmakers' pizza poking analogy. From the great <www.bayesianspectacles.org> blog.

Wagenmakers’ pizza poking analogy. From the great <www.bayesianspectacles.org> blog.

Such “pizza plots” can be directly created through the see visualisation companion package for easystats (you can install it by running install.packages("see")):

library(see)

plot(bayesfactor(result)) +
  scale_fill_pizza()

So, after seeing this pizza, how much would you be surprised by the outcome of a blinded poke?

t-tests

“I know that I know nothing, and especially not if versicolor and virginica differ in terms of their Sepal.Width” - Socrates.

Time to finally answer this crucial question!

Versicolor vs. virginica

Bayesian t-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the Species factor, versicolor and virginica. We will start by filtering out from iris the non-relevant observations corresponding to the setosa specie, and we will then visualise the observations and the distribution of the Sepal.Width variable.

library(dplyr)
library(ggplot2)

# Select only two relevant species
data <- iris %>%
  filter(Species != "setosa") %>%
  droplevels()

# Visualise distributions and observations
data %>%
  ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) +
  geom_violindot(fill_dots = "black", size_dots = 1) +
  scale_fill_material() +
  theme_modern()

It seems (visually) that virgnica flowers have, on average, a slightly higer width of sepals. Let’s assess this difference statistically by using the ttestBF() function in the BayesFactor package.

Compute the Bayesian t-test

result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data)
describe_posterior(result)
> Summary of Posterior Distribution
> 
> Parameter  | Median |         95% CI |     pd |          ROPE | % in ROPE |    BF |              Prior
> ------------------------------------------------------------------------------------------------------
> Difference |  -0.19 | [-0.31, -0.06] | 99.75% | [-0.03, 0.03] |        0% | 17.72 | Cauchy (0 +- 0.71)

From the indices, we can say that the difference of Sepal.Width between virginica and versicolor has a probability of 100% of being negative [from the pd and the sign of the median] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a strong evidence against the null hypothesis (BF = 18).

Keep that in mind as we will see another way of investigating this question.

Logistic Model

A hypothesis for which one uses a t-test can also be tested using a binomial model (e.g., a logistic model). Indeed, it is possible to reformulate the following hypothesis, “there is an important difference in this variable between the two groups” with the hypothesis “this variable is able to discriminate between (or classify) the two groups.” However, these models are much more powerful than a t-test.

In the case of the difference of Sepal.Width between virginica and versicolor, the question becomes, how well can we classify the two species using only Sepal.Width.

Fit the model

library(rstanarm)

model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0)

Visualise the model

Using the modelbased package.

library(modelbased)

vizdata <- estimate_relation(model)

ggplot(vizdata, aes(x = Sepal.Width, y = Predicted)) +
  geom_ribbon(aes(ymin = CI_low, ymax = CI_high), alpha = 0.5) +
  geom_line() + 
  ylab("Probability of being virginica") +
  theme_modern()

Performance and Parameters

Once again, we can extract all indices of interest for the posterior distribution using our old pal describe_posterior().

describe_posterior(model, test = c("pd", "ROPE", "BF"))
> Summary of Posterior Distribution
> 
> Parameter   | Median |          95% CI |     pd |          ROPE | % in ROPE |  Rhat |     ESS |    BF
> -----------------------------------------------------------------------------------------------------
> (Intercept) |  -6.15 | [-10.26, -2.13] | 99.92% | [-0.18, 0.18] |        0% | 1.001 | 2651.00 |  7.23
> Sepal.Width |   2.13 | [  0.77,  3.60] | 99.95% | [-0.18, 0.18] |        0% | 1.001 | 2639.00 | 20.31
library(performance)

model_performance(model)
> # Indices of model performance
> 
> ELPD    | ELPD_SE |   LOOIC | LOOIC_SE |    WAIC |    R2 |  RMSE | Sigma | Log_loss | Score_log | Score_spherical
> -----------------------------------------------------------------------------------------------------------------
> -66.265 |   3.071 | 132.530 |    6.142 | 132.519 | 0.099 | 0.477 | 1.000 |    0.643 |   -34.666 |           0.014

Visualise the indices

TO DO.

library(see)

plot(rope(result))

Diagnostic Indices

About diagnostic indices such as Rhat and ESS.


  1. If the effect is really strong, the BF values can be extremely high. So don’t be surprised if you see BF values that have been log-transformed to make them more human readable.↩︎

  2. An exception would be when the pie slices are well-labeled so that our brain’s perception system does not have to do the decoding work.↩︎

bayestestR/inst/doc/example3.Rmd0000644000175000017500000000347714054321205016444 0ustar nileshnilesh--- title: "3. Become a Bayesian master" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{3. Become a Bayesian master} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits=2) set.seed(333) ``` ```{r echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") ``` ## Mixed Models TO BE CONTINUED. ### Priors TO BE CONTINUED. ## What's next? The journey to become a true Bayesian master is not yet over. It is merely the beginning. It is now time to leave the `bayestestR` universe and apply the Bayesian framework in a variety of other statistical contexts: - [**Marginal means**](https://easystats.github.io/modelbased/articles/estimate_means.html) - [**Contrast analysis**](https://easystats.github.io/modelbased/articles/estimate_contrasts.html) - [**Testing Contrasts from Bayesian Models with 'emmeans' and 'bayestestR'**](https://easystats.github.io/blog/posts/bayestestr_emmeans/) bayestestR/inst/doc/indicesExistenceComparison.R0000644000175000017500000000065314135670667021742 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75, out.width = "100%") options(digits=2) ## ---- echo=FALSE-------------------------------------------------------------- knitr::include_graphics("https://www.frontiersin.org/files/Articles/498833/fpsyg-10-02767-HTML/image_m/fpsyg-10-02767-t003.jpg") bayestestR/inst/doc/example1.Rmd0000644000175000017500000004670214133140641016441 0ustar nileshnilesh--- title: "1. Initiation to Bayesian models" output: rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{1. Initiation to Bayesian models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r , include=FALSE} library(knitr) library(insight) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x * 100, digits = digits, ...), "%") } ``` Now that you've read the [**Get started**](https://easystats.github.io/bayestestR/articles/bayestestR.html) section, let's dive in the **subtleties of Bayesian modelling using R**. ## Loading the packages Once you've [installed](https://easystats.github.io/bayestestR/articles/bayestestR.html#bayestestr-installation) the necessary packages, we can load `rstanarm` (to fit the models), `bayestestR` (to compute useful indices), and `insight` (to access the parameters). ```{r } library(rstanarm) library(bayestestR) library(insight) ``` ## Simple linear (regression) model We will begin by conducting a simple linear regression to test the relationship between `Petal.Length` (our predictor, or *independent*, variable) and `Sepal.Length` (our response, or *dependent*, variable) from the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset which is included by default in R. ### Fitting the model Let's start by fitting a **frequentist** version of the model, just to have a reference point: ```{r } model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ``` We can also zoom in on the parameters of interest to us: ```{r} insight::get_parameters(model) ``` In this model, the linear relationship between `Petal.Length` and `Sepal.Length` is **positive and significant** ($\beta = 0.41, t(148) = 21.6, p < .001$). This means that for each one-unit increase in `Petal.Length` (the predictor), you can expect `Sepal.Length` (the response) to increase by **0.41**. This effect can be visualized by plotting the predictor values on the `x` axis and the response values as `y` using the `ggplot2` package: ```{r } library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x = Petal.Length, y = Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method = "lm") # This adds a regression line ``` Now let's fit a **Bayesian version** of the model by using the `stan_glm` function in the `rstanarm` package: ```{r , eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ``` ```{r echo=FALSE, comment=NA, results='hide'} library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ``` You can see the sampling algorithm being run. ### Extracting the posterior Once it is done, let us extract the parameters (*i.e.*, coefficients) of the model. ```{r , eval=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` ```{r , echo=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the `intercept` and the effect of `Petal.Length`. These columns contain the **posterior distributions** of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter. Contrast this with the result we saw from the frequentist linear regression mode using `lm`, where the results had **single values** for each effect of the model, and not a distribution of values. This is one of the most important differences between these two frameworks. #### About posterior draws Let's look at the length of the posteriors. ```{r } nrow(posteriors) # Size (number of rows) ``` **Why is the size 4000, and not more or less?** First of all, these observations (the rows) are usually referred to as **posterior draws**. The underlying idea is that the Bayesian sampling algorithm (*e.g.*, **Monte Carlo Markov Chains - MCMC**) will *draw* from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. **Therefore, the more draws you have, the better your estimation of the posterior distribution**. However, increased draws also means longer computation time. If we look at the documentation (`?sampling`) for the `rstanarm`'s `"sampling"` algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are **4** `chains` (you can see it as distinct sampling runs), that each create **2000** `iter` (draws). However, only half of these iterations are kept, as half are used for `warm-up` (the convergence of the algorithm). Thus, the total for posterior draws equals **`4 chains * (2000 iterations - 1000 warm-up) = 4000`**. We can change that, for instance: ```{r , eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250) nrow(insight::get_parameters(model)) # Size (number of rows) ``` ```{r echo=FALSE, , comment=NA, echo=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ``` In this case, as would be expected, we have **`2 chains * (1000 iterations - 250 warm-up) = 1500`** posterior draws. But let's keep our first model with the default setup (as it has more draws). #### Visualizing the posterior distribution Now that we've understood where these values come from, let's look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of `Petal.Length`. ```{r } ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ``` This distribution represents the [probability](https://en.wikipedia.org/wiki/Probability_density_function) (the `y` axis) of different effects (the `x` axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about **0.35 to 0.50**, with the bulk of it being at around **0.41**. > **Congrats! You've just described your first posterior distribution.** And this is the heart of Bayesian analysis. We don't need *p*-values, *t*-values, or degrees of freedom. **Everything we need is contained within this posterior distribution**. Our description above is consistent with the values obtained from the frequentist regression (which resulted in a $\beta$ of **0.41**). This is reassuring! Indeed, **in most cases, Bayesian analysis does not drastically differ from the frequentist results** or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe. We can now go ahead and **precisely characterize** this posterior distribution. ### Describing the Posterior Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a **concise way to summarize it**. We recommend to describe the posterior distribution with **3 elements**: 1. A **point-estimate** which is a one-value summary (similar to the $beta$ in frequentist regressions). 2. A **credible interval** representing the associated uncertainty. 3. Some **indices of significance**, giving information about the relative importance of this effect. #### Point-estimate **What single value can best represent my posterior distribution?** Centrality indices, such as the *mean*, the *median*, or the *mode* are usually used as point-estimates. But what's the difference between them? Let's answer this by first inspecting the **mean**: ```{r } mean(posteriors$Petal.Length) ``` This is close to the frequentist $\beta$. But, as we know, the mean is quite sensitive to outliers or extremes values. Maybe the **median** could be more robust? ```{r } median(posteriors$Petal.Length) ``` Well, this is **very close to the mean** (and identical when rounding the values). Maybe we could take the **mode**, that is, the *peak* of the posterior distribution? In the Bayesian framework, this value is called the **Maximum A Posteriori (MAP)**. Let's see: ```{r } map_estimate(posteriors$Petal.Length) ``` **They are all very close!** Let's visualize these values on the posterior distribution: ```{r } ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept = mean(posteriors$Petal.Length), color = "blue", size = 1) + # The median in red geom_vline(xintercept = median(posteriors$Petal.Length), color = "red", size = 1) + # The MAP in purple geom_vline(xintercept = map_estimate(posteriors$Petal.Length), color = "purple", size = 1) ``` Well, all these values give very similar results. Thus, **we will choose the median**, as this value has a direct meaning from a probabilistic perspective: **there is 50\% chance that the true effect is higher and 50\% chance that the effect is lower** (as it divides the distribution in two equal parts). #### Uncertainty Now that the have a point-estimate, we have to **describe the uncertainty**. We could compute the range: ```{r } range(posteriors$Petal.Length) ``` But does it make sense to include all these extreme values? Probably not. Thus, we will compute a [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html). Long story short, it's kind of similar to a frequentist **confidence interval**, but easier to interpret and easier to compute — *and it makes more sense*. We will compute this **credible interval** based on the [Highest Density Interval (HDI)](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis). It will give us the range containing the 89\% most probable effect values. **Note that we will use 89\% CIs instead of 95\%** CIs (as in the frequentist framework), as the 89\% level gives more [stable results](https://easystats.github.io/bayestestR/articles/credible_interval.html#why-is-the-default-89) [@kruschke2014doing] and reminds us about the arbitrariness of such conventions [@mcelreath2018statistical]. ```{r } hdi(posteriors$Petal.Length, ci = 0.89) ``` Nice, so we can conclude that **the effect has 89\% chance of falling within the `[0.38, 0.44]` range**. We have just computed the two most important pieces of information for describing our effects. #### Effect significance However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is **important**. For instance, is the effect different from 0? So how do we **assess the *significance* of an effect**. How can we do this? Well, in this particular case, it is very eloquent: **all possible effect values (*i.e.*, the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero**. But still, we want some objective decision criterion, to say if **yes or no the effect is 'significant'**. One approach, similar to the frequentist framework, would be to see if the **Credible Interval** contains 0. If it is not the case, that would mean that our **effect is 'significant'**. But this index is not very fine-grained, no? **Can we do better? Yes!** ## A linear model with a categorical predictor Imagine for a moment you are interested in how the weight of chickens varies depending on two different **feed types**. For this example, we will start by selecting from the `chickwts` dataset (available in base R) two feed types of interest for us (*we do have peculiar interests*): **meat meals** and **sunflowers**. ### Data preparation and model fitting ```{r } library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- filter(chickwts, feed %in% c("meatmeal", "sunflower")) ``` Let's run another Bayesian regression to predict the **weight** with the **two types of feed type**. ```{r , eval=FALSE} model <- stan_glm(weight ~ feed, data = data) ``` ```{r echo=FALSE, , comment=NA, results='hide'} model <- stan_glm(weight ~ feed, data = data) ``` ### Posterior description ```{r } posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x = feedsunflower)) + geom_density(fill = "red") ``` This represents the **posterior distribution of the difference** between `meatmeal` and `sunflowers`. It seems that the difference is **positive** (since the values are concentrated on the right side of 0). Eating sunflowers makes you more fat (*at least, if you're a chicken*). But, **by how much?** Let us compute the **median** and the **CI**: ```{r } median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ``` It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: **there is 89\% chance that the difference between the two feed types is between 14 and 91.** > **Is this effect different from 0?** ### ROPE Percentage Testing whether this distribution is different from 0 doesn't make sense, as 0 is a single value (*and the probability that any distribution is different from a single value is infinite*). However, one way to assess **significance** could be to define an area *around* 0, which will consider as *practically equivalent* to zero (*i.e.*, absence of, or a negligible, effect). This is called the [**Region of Practical Equivalence (ROPE)**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), and is one way of testing the significance of parameters. **How can we define this region?** > ***Driing driiiing*** -- ***The easystats team speaking. How can we help?*** -- ***I am Prof. Sanders. An expert in chicks... I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.*** Well, that's convenient. Now we know that we can define the ROPE as the `[-20, 20]` range. All effects within this range are considered as *null* (negligible). We can now compute the **proportion of the 89\% most probable values (the 89\% CI) which are not null**, *i.e.*, which are outside this range. ```{r } rope(posteriors$feedsunflower, range = c(-20, 20), ci = 0.89) ``` **5\% of the 89\% CI can be considered as null**. Is that a lot? Based on our [**guidelines**](https://easystats.github.io/bayestestR/articles/guidelines.html), yes, it is too much. **Based on this particular definition of ROPE**, we conclude that this effect is not significant (the probability of being negligible is too high). That said, to be honest, I have **some doubts about this Prof. Sanders**. I don't really trust **his definition of ROPE**. Is there a more **objective** way of defining it? ```{r echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.jpg") ``` **Yes!** One of the practice is for instance to use the **tenth (`1/10 = 0.1`) of the standard deviation (SD)** of the response variable, which can be considered as a "negligible" effect size [@cohen1988statistical]. ```{r } rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ``` Let's redefine our ROPE as the region within the `[-6.2, 6.2]` range. **Note that this can be directly obtained by the `rope_range` function :)** ```{r } rope_value <- rope_range(model) rope_value ``` Let's recompute the **percentage in ROPE**: ```{r } rope(posteriors$feedsunflower, range = rope_range, ci = 0.89) ``` With this reasonable definition of ROPE, we observe that the 89\% of the posterior distribution of the effect does **not** overlap with the ROPE. Thus, we can conclude that **the effect is significant** (in the sense of *important* enough to be noted). ### Probability of Direction (pd) Maybe we are not interested in whether the effect is non-negligible. Maybe **we just want to know if this effect is positive or negative**. In this case, we can simply compute the proportion of the posterior that is positive, no matter the "size" of the effect. ```{r } n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ``` We can conclude that **the effect is positive with a probability of 98\%**. We call this index the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html). It can, in fact, be computed more easily with the following: ```{r } p_direction(posteriors$feedsunflower) ``` Interestingly, it so happens that **this index is usually highly correlated with the frequentist *p*-value**. We could almost roughly infer the corresponding *p*-value with a simple transformation: ```{r , eval=TRUE} pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ``` If we ran our model in the frequentist framework, we should approximately observe an effect with a *p*-value of `r round(twosided_p, digits=3)`. **Is that true?** #### Comparison to frequentist ```{r } summary(lm(weight ~ feed, data = data)) ``` The frequentist model tells us that the difference is **positive and significant** ($\beta = 52, p = 0.04$). **Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.** ## All with one function And yet, I agree, it was a bit **tedious** to extract and compute all the indices. **But what if I told you that we can do all of this, and more, with only one function?** > **Behold, `describe_posterior`!** This function computes all of the adored mentioned indices, and can be run directly on the model: ```{r } describe_posterior(model, test = c("p_direction", "rope", "bayesfactor")) ``` **Tada!** There we have it! The **median**, the **CI**, the **pd** and the **ROPE percentage**! Understanding and describing posterior distributions is just one aspect of Bayesian modelling. **Are you ready for more?!** [**Click here**](https://easystats.github.io/bayestestR/articles/example2.html) to see the next example. ## References bayestestR/inst/doc/example1.R0000644000175000017500000001464014135670662016131 0ustar nileshnilesh## ---- include=FALSE----------------------------------------------------------- library(knitr) library(insight) options(knitr.kable.NA = "") knitr::opts_chunk$set( comment = ">", message = FALSE, warning = FALSE, out.width = "100%" ) options(digits = 2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x * 100, digits = digits, ...), "%") } ## ----------------------------------------------------------------------------- library(rstanarm) library(bayestestR) library(insight) ## ----------------------------------------------------------------------------- model <- lm(Sepal.Length ~ Petal.Length, data = iris) summary(model) ## ----------------------------------------------------------------------------- insight::get_parameters(model) ## ----------------------------------------------------------------------------- library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x = Petal.Length, y = Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method = "lm") # This adds a regression line ## ---- eval=FALSE-------------------------------------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ## ----echo=FALSE, comment=NA, results='hide'----------------------------------- library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris) ## ---- eval=FALSE-------------------------------------------------------------- # posteriors <- insight::get_parameters(model) # # head(posteriors) # Show the first 6 rows ## ---- echo=FALSE-------------------------------------------------------------- posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ## ----------------------------------------------------------------------------- nrow(posteriors) # Size (number of rows) ## ---- eval=FALSE-------------------------------------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250) # # nrow(insight::get_parameters(model)) # Size (number of rows) ## ----echo=FALSE, , comment=NA, echo=FALSE------------------------------------- model <- stan_glm(Sepal.Length ~ Petal.Length, data = iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ## ----------------------------------------------------------------------------- ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ## ----------------------------------------------------------------------------- mean(posteriors$Petal.Length) ## ----------------------------------------------------------------------------- median(posteriors$Petal.Length) ## ----------------------------------------------------------------------------- map_estimate(posteriors$Petal.Length) ## ----------------------------------------------------------------------------- ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept = mean(posteriors$Petal.Length), color = "blue", size = 1) + # The median in red geom_vline(xintercept = median(posteriors$Petal.Length), color = "red", size = 1) + # The MAP in purple geom_vline(xintercept = map_estimate(posteriors$Petal.Length), color = "purple", size = 1) ## ----------------------------------------------------------------------------- range(posteriors$Petal.Length) ## ----------------------------------------------------------------------------- hdi(posteriors$Petal.Length, ci = 0.89) ## ----------------------------------------------------------------------------- library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- filter(chickwts, feed %in% c("meatmeal", "sunflower")) ## ---- eval=FALSE-------------------------------------------------------------- # model <- stan_glm(weight ~ feed, data = data) ## ----echo=FALSE, , comment=NA, results='hide'--------------------------------- model <- stan_glm(weight ~ feed, data = data) ## ----------------------------------------------------------------------------- posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x = feedsunflower)) + geom_density(fill = "red") ## ----------------------------------------------------------------------------- median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ## ----------------------------------------------------------------------------- rope(posteriors$feedsunflower, range = c(-20, 20), ci = 0.89) ## ----echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.jpg") ## ----------------------------------------------------------------------------- rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ## ----------------------------------------------------------------------------- rope_value <- rope_range(model) rope_value ## ----------------------------------------------------------------------------- rope(posteriors$feedsunflower, range = rope_range, ci = 0.89) ## ----------------------------------------------------------------------------- n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ## ----------------------------------------------------------------------------- p_direction(posteriors$feedsunflower) ## ---- eval=TRUE--------------------------------------------------------------- pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ## ----------------------------------------------------------------------------- summary(lm(weight ~ feed, data = data)) ## ----------------------------------------------------------------------------- describe_posterior(model, test = c("p_direction", "rope", "bayesfactor")) bayestestR/NAMESPACE0000644000175000017500000004176714135670563014001 0ustar nileshnilesh# Generated by roxygen2: do not edit by hand S3method(as.data.frame,bayestestR_mediation) S3method(as.data.frame,density) S3method(as.double,bayesfactor_inclusion) S3method(as.double,bayesfactor_models) S3method(as.double,bayesfactor_parameters) S3method(as.double,bayesfactor_restricted) S3method(as.double,map_estimate) S3method(as.double,p_direction) S3method(as.double,p_map) S3method(as.double,p_rope) S3method(as.double,p_significance) S3method(as.double,rope) S3method(as.list,bayestestR_ci) S3method(as.list,bayestestR_eti) S3method(as.list,bayestestR_hdi) S3method(as.list,bayestestR_si) S3method(as.matrix,bayesfactor_models) S3method(as.numeric,bayesfactor_inclusion) S3method(as.numeric,bayesfactor_models) S3method(as.numeric,bayesfactor_parameters) S3method(as.numeric,bayesfactor_restricted) S3method(as.numeric,map_estimate) S3method(as.numeric,p_direction) S3method(as.numeric,p_map) S3method(as.numeric,p_significance) S3method(bayesfactor_inclusion,BFBayesFactor) S3method(bayesfactor_inclusion,bayesfactor_models) S3method(bayesfactor_models,BFBayesFactor) S3method(bayesfactor_models,blavaan) S3method(bayesfactor_models,brmsfit) S3method(bayesfactor_models,default) S3method(bayesfactor_models,stanreg) S3method(bayesfactor_parameters,bayesfactor_models) S3method(bayesfactor_parameters,blavaan) S3method(bayesfactor_parameters,brmsfit) S3method(bayesfactor_parameters,data.frame) S3method(bayesfactor_parameters,emmGrid) S3method(bayesfactor_parameters,emm_list) S3method(bayesfactor_parameters,numeric) S3method(bayesfactor_parameters,sim) S3method(bayesfactor_parameters,sim.merMod) S3method(bayesfactor_parameters,stanreg) S3method(bayesfactor_restricted,blavaan) S3method(bayesfactor_restricted,brmsfit) S3method(bayesfactor_restricted,data.frame) S3method(bayesfactor_restricted,emmGrid) S3method(bayesfactor_restricted,emm_list) S3method(bayesfactor_restricted,stanreg) S3method(bci,BFBayesFactor) S3method(bci,BGGM) S3method(bci,MCMCglmm) S3method(bci,bamlss) S3method(bci,bayesQR) S3method(bci,bcplm) S3method(bci,blavaan) S3method(bci,blrm) S3method(bci,brmsfit) S3method(bci,data.frame) S3method(bci,emmGrid) S3method(bci,emm_list) S3method(bci,get_predicted) S3method(bci,mcmc) S3method(bci,mcmc.list) S3method(bci,numeric) S3method(bci,sim) S3method(bci,sim.merMod) S3method(bci,stanfit) S3method(bci,stanreg) S3method(check_prior,blavaan) S3method(check_prior,brmsfit) S3method(check_prior,stanreg) S3method(ci,BFBayesFactor) S3method(ci,BGGM) S3method(ci,MCMCglmm) S3method(ci,bamlss) S3method(ci,bcplm) S3method(ci,blavaan) S3method(ci,blrm) S3method(ci,brmsfit) S3method(ci,data.frame) S3method(ci,emmGrid) S3method(ci,emm_list) S3method(ci,get_predicted) S3method(ci,mcmc) S3method(ci,mcmc.list) S3method(ci,numeric) S3method(ci,sim) S3method(ci,sim.merMod) S3method(ci,stanfit) S3method(ci,stanreg) S3method(cwi,data.frame) S3method(describe_posterior,BFBayesFactor) S3method(describe_posterior,BGGM) S3method(describe_posterior,MCMCglmm) S3method(describe_posterior,bamlss) S3method(describe_posterior,bayesQR) S3method(describe_posterior,bcplm) S3method(describe_posterior,blavaan) S3method(describe_posterior,blrm) S3method(describe_posterior,brmsfit) S3method(describe_posterior,data.frame) S3method(describe_posterior,double) S3method(describe_posterior,effectsize_std_params) S3method(describe_posterior,emmGrid) S3method(describe_posterior,emm_list) S3method(describe_posterior,get_predicted) S3method(describe_posterior,mcmc) S3method(describe_posterior,mcmc.list) S3method(describe_posterior,numeric) S3method(describe_posterior,sim) S3method(describe_posterior,sim.merMod) S3method(describe_posterior,stanfit) S3method(describe_posterior,stanmvreg) S3method(describe_posterior,stanreg) S3method(describe_prior,BFBayesFactor) S3method(describe_prior,BGGM) S3method(describe_prior,bamlss) S3method(describe_prior,bcplm) S3method(describe_prior,blavaan) S3method(describe_prior,brmsfit) S3method(describe_prior,mcmc.list) S3method(describe_prior,stanreg) S3method(diagnostic_draws,brmsfit) S3method(diagnostic_posterior,BFBayesFactor) S3method(diagnostic_posterior,blavaan) S3method(diagnostic_posterior,brmsfit) S3method(diagnostic_posterior,data.frame) S3method(diagnostic_posterior,numeric) S3method(diagnostic_posterior,stanfit) S3method(diagnostic_posterior,stanmvreg) S3method(diagnostic_posterior,stanreg) S3method(effective_sample,MCMCglmm) S3method(effective_sample,blavaan) S3method(effective_sample,brmsfit) S3method(effective_sample,stanfit) S3method(effective_sample,stanreg) S3method(equivalence_test,BFBayesFactor) S3method(equivalence_test,bamlss) S3method(equivalence_test,bayesQR) S3method(equivalence_test,bcplm) S3method(equivalence_test,blavaan) S3method(equivalence_test,blrm) S3method(equivalence_test,brmsfit) S3method(equivalence_test,data.frame) S3method(equivalence_test,default) S3method(equivalence_test,emmGrid) S3method(equivalence_test,emm_list) S3method(equivalence_test,mcmc) S3method(equivalence_test,mcmc.list) S3method(equivalence_test,numeric) S3method(equivalence_test,sim) S3method(equivalence_test,sim.merMod) S3method(equivalence_test,stanfit) S3method(equivalence_test,stanreg) S3method(estimate_density,BGGM) S3method(estimate_density,MCMCglmm) S3method(estimate_density,bamlss) S3method(estimate_density,bayesQR) S3method(estimate_density,bcplm) S3method(estimate_density,blavaan) S3method(estimate_density,blrm) S3method(estimate_density,brmsfit) S3method(estimate_density,data.frame) S3method(estimate_density,emmGrid) S3method(estimate_density,emm_list) S3method(estimate_density,grouped_df) S3method(estimate_density,mcmc) S3method(estimate_density,mcmc.list) S3method(estimate_density,numeric) S3method(estimate_density,stanfit) S3method(estimate_density,stanreg) S3method(eti,BFBayesFactor) S3method(eti,BGGM) S3method(eti,MCMCglmm) S3method(eti,bamlss) S3method(eti,bayesQR) S3method(eti,bcplm) S3method(eti,blavaan) S3method(eti,blrm) S3method(eti,brmsfit) S3method(eti,data.frame) S3method(eti,emmGrid) S3method(eti,emm_list) S3method(eti,get_predicted) S3method(eti,mcmc) S3method(eti,mcmc.list) S3method(eti,numeric) S3method(eti,sim) S3method(eti,sim.merMod) S3method(eti,stanfit) S3method(eti,stanreg) S3method(format,bayesfactor_inclusion) S3method(format,bayesfactor_models) S3method(format,bayesfactor_parameters) S3method(format,bayesfactor_restricted) S3method(format,bayestestR_eti) S3method(format,bayestestR_hdi) S3method(format,bayestestR_si) S3method(format,describe_posterior) S3method(format,map_estimate) S3method(format,p_direction) S3method(format,p_map) S3method(format,p_rope) S3method(format,p_significance) S3method(format,point_estimate) S3method(hdi,BFBayesFactor) S3method(hdi,BGGM) S3method(hdi,MCMCglmm) S3method(hdi,bamlss) S3method(hdi,bayesQR) S3method(hdi,bcplm) S3method(hdi,blavaan) S3method(hdi,blrm) S3method(hdi,brmsfit) S3method(hdi,data.frame) S3method(hdi,emmGrid) S3method(hdi,emm_list) S3method(hdi,get_predicted) S3method(hdi,mcmc) S3method(hdi,mcmc.list) S3method(hdi,numeric) S3method(hdi,sim) S3method(hdi,sim.merMod) S3method(hdi,stanfit) S3method(hdi,stanreg) S3method(map_estimate,BGGM) S3method(map_estimate,bamlss) S3method(map_estimate,bayesQR) S3method(map_estimate,bcplm) S3method(map_estimate,blavaan) S3method(map_estimate,blrm) S3method(map_estimate,brmsfit) S3method(map_estimate,data.frame) S3method(map_estimate,emmGrid) S3method(map_estimate,emm_list) S3method(map_estimate,get_predicted) S3method(map_estimate,mcmc) S3method(map_estimate,mcmc.list) S3method(map_estimate,numeric) S3method(map_estimate,stanfit) S3method(map_estimate,stanreg) S3method(mcse,blavaan) S3method(mcse,brmsfit) S3method(mcse,stanfit) S3method(mcse,stanreg) S3method(mediation,brmsfit) S3method(mediation,stanmvreg) S3method(model_to_priors,brmsfit) S3method(p_direction,BFBayesFactor) S3method(p_direction,BGGM) S3method(p_direction,MCMCglmm) S3method(p_direction,bamlss) S3method(p_direction,bayesQR) S3method(p_direction,bcplm) S3method(p_direction,blavaan) S3method(p_direction,blrm) S3method(p_direction,brmsfit) S3method(p_direction,data.frame) S3method(p_direction,emmGrid) S3method(p_direction,emm_list) S3method(p_direction,get_predicted) S3method(p_direction,mcmc) S3method(p_direction,mcmc.list) S3method(p_direction,numeric) S3method(p_direction,sim) S3method(p_direction,sim.merMod) S3method(p_direction,stanfit) S3method(p_direction,stanreg) S3method(p_map,BFBayesFactor) S3method(p_map,BGGM) S3method(p_map,MCMCglmm) S3method(p_map,bamlss) S3method(p_map,bayesQR) S3method(p_map,bcplm) S3method(p_map,blavaan) S3method(p_map,blrm) S3method(p_map,brmsfit) S3method(p_map,data.frame) S3method(p_map,emmGrid) S3method(p_map,emm_list) S3method(p_map,mcmc) S3method(p_map,mcmc.list) S3method(p_map,numeric) S3method(p_map,sim) S3method(p_map,sim.merMod) S3method(p_map,stanfit) S3method(p_map,stanreg) S3method(p_rope,BFBayesFactor) S3method(p_rope,BGGM) S3method(p_rope,MCMCglmm) S3method(p_rope,bamlss) S3method(p_rope,bcplm) S3method(p_rope,blavaan) S3method(p_rope,blrm) S3method(p_rope,brmsfit) S3method(p_rope,data.frame) S3method(p_rope,default) S3method(p_rope,emmGrid) S3method(p_rope,emm_list) S3method(p_rope,mcmc) S3method(p_rope,mcmc.list) S3method(p_rope,numeric) S3method(p_rope,sim) S3method(p_rope,sim.merMod) S3method(p_rope,stanfit) S3method(p_rope,stanreg) S3method(p_significance,BFBayesFactor) S3method(p_significance,BGGM) S3method(p_significance,MCMCglmm) S3method(p_significance,bamlss) S3method(p_significance,bayesQR) S3method(p_significance,bcplm) S3method(p_significance,blavaan) S3method(p_significance,blrm) S3method(p_significance,brmsfit) S3method(p_significance,data.frame) S3method(p_significance,emmGrid) S3method(p_significance,emm_list) S3method(p_significance,mcmc) S3method(p_significance,mcmc.list) S3method(p_significance,numeric) S3method(p_significance,parameters_simulate_model) S3method(p_significance,stanfit) S3method(p_significance,stanreg) S3method(plot,bayesfactor_models) S3method(plot,bayesfactor_parameters) S3method(plot,bayestestR_eti) S3method(plot,bayestestR_hdi) S3method(plot,bayestestR_mediation) S3method(plot,bayestestR_si) S3method(plot,describe_posterior) S3method(plot,equivalence_test) S3method(plot,estimate_density) S3method(plot,estimate_density_df) S3method(plot,map_estimate) S3method(plot,overlap) S3method(plot,p_direction) S3method(plot,p_significance) S3method(plot,point_estimate) S3method(plot,rope) S3method(point_estimate,BFBayesFactor) S3method(point_estimate,BGGM) S3method(point_estimate,MCMCglmm) S3method(point_estimate,bamlss) S3method(point_estimate,bayesQR) S3method(point_estimate,bcplm) S3method(point_estimate,blavaan) S3method(point_estimate,blrm) S3method(point_estimate,brmsfit) S3method(point_estimate,data.frame) S3method(point_estimate,emmGrid) S3method(point_estimate,emm_list) S3method(point_estimate,get_predicted) S3method(point_estimate,matrix) S3method(point_estimate,mcmc) S3method(point_estimate,mcmc.list) S3method(point_estimate,numeric) S3method(point_estimate,sim) S3method(point_estimate,sim.merMod) S3method(point_estimate,stanfit) S3method(point_estimate,stanreg) S3method(print,bayesfactor_inclusion) S3method(print,bayesfactor_models) S3method(print,bayesfactor_models_matrix) S3method(print,bayesfactor_parameters) S3method(print,bayesfactor_restricted) S3method(print,bayestestR_eti) S3method(print,bayestestR_hdi) S3method(print,bayestestR_mediation) S3method(print,bayestestR_si) S3method(print,describe_posterior) S3method(print,equivalence_test) S3method(print,map_estimate) S3method(print,overlap) S3method(print,p_direction) S3method(print,p_map) S3method(print,p_rope) S3method(print,p_significance) S3method(print,point_estimate) S3method(print,rope) S3method(print,sexit) S3method(print_html,bayesfactor_inclusion) S3method(print_html,bayesfactor_models) S3method(print_html,bayesfactor_parameters) S3method(print_html,bayesfactor_restricted) S3method(print_html,bayestestR_eti) S3method(print_html,bayestestR_hdi) S3method(print_html,bayestestR_si) S3method(print_html,describe_posterior) S3method(print_html,map_estimate) S3method(print_html,p_direction) S3method(print_html,p_map) S3method(print_html,p_rope) S3method(print_html,p_significance) S3method(print_html,point_estimate) S3method(print_md,bayesfactor_inclusion) S3method(print_md,bayesfactor_models) S3method(print_md,bayesfactor_parameters) S3method(print_md,bayesfactor_restricted) S3method(print_md,bayestestR_eti) S3method(print_md,bayestestR_hdi) S3method(print_md,bayestestR_si) S3method(print_md,describe_posterior) S3method(print_md,map_estimate) S3method(print_md,p_direction) S3method(print_md,p_map) S3method(print_md,p_rope) S3method(print_md,p_significance) S3method(print_md,point_estimate) S3method(rope,BFBayesFactor) S3method(rope,BGGM) S3method(rope,MCMCglmm) S3method(rope,bamlss) S3method(rope,bayesQR) S3method(rope,bcplm) S3method(rope,blavaan) S3method(rope,blrm) S3method(rope,brmsfit) S3method(rope,data.frame) S3method(rope,default) S3method(rope,emmGrid) S3method(rope,emm_list) S3method(rope,mcmc) S3method(rope,mcmc.list) S3method(rope,numeric) S3method(rope,sim) S3method(rope,sim.merMod) S3method(rope,stanfit) S3method(rope,stanreg) S3method(rope_range,default) S3method(rope_range,mlm) S3method(sensitivity_to_prior,stanreg) S3method(sexit_thresholds,BFBayesFactor) S3method(sexit_thresholds,MixMod) S3method(sexit_thresholds,bayesQR) S3method(sexit_thresholds,brmsfit) S3method(sexit_thresholds,default) S3method(sexit_thresholds,feis) S3method(sexit_thresholds,felm) S3method(sexit_thresholds,fixest) S3method(sexit_thresholds,gee) S3method(sexit_thresholds,geeglm) S3method(sexit_thresholds,glm) S3method(sexit_thresholds,glmmTMB) S3method(sexit_thresholds,gls) S3method(sexit_thresholds,hurdle) S3method(sexit_thresholds,lm) S3method(sexit_thresholds,lme) S3method(sexit_thresholds,merMod) S3method(sexit_thresholds,mixed) S3method(sexit_thresholds,mlm) S3method(sexit_thresholds,stanreg) S3method(sexit_thresholds,wbm) S3method(sexit_thresholds,zeroinfl) S3method(si,blavaan) S3method(si,brmsfit) S3method(si,data.frame) S3method(si,emmGrid) S3method(si,emm_list) S3method(si,get_predicted) S3method(si,numeric) S3method(si,stanfit) S3method(si,stanreg) S3method(simulate_prior,bcplm) S3method(simulate_prior,blavaan) S3method(simulate_prior,brmsfit) S3method(simulate_prior,stanreg) S3method(unupdate,blavaan) S3method(unupdate,brmsfit) S3method(unupdate,brmsfit_multiple) S3method(unupdate,stanreg) S3method(update,bayesfactor_models) S3method(weighted_posteriors,BFBayesFactor) S3method(weighted_posteriors,blavaan) S3method(weighted_posteriors,brmsfit) S3method(weighted_posteriors,data.frame) S3method(weighted_posteriors,stanreg) export(area_under_curve) export(auc) export(bayesfactor) export(bayesfactor_inclusion) export(bayesfactor_models) export(bayesfactor_parameters) export(bayesfactor_pointnull) export(bayesfactor_restricted) export(bayesfactor_rope) export(bayesian_as_frequentist) export(bcai) export(bci) export(bf_inclusion) export(bf_models) export(bf_parameters) export(bf_pointnull) export(bf_restricted) export(bf_rope) export(bic_to_bf) export(check_prior) export(ci) export(contr.bayes) export(contr.orthonorm) export(convert_bayesian_as_frequentist) export(convert_p_to_pd) export(convert_pd_to_p) export(cwi) export(density_at) export(describe_posterior) export(describe_prior) export(diagnostic_draws) export(diagnostic_posterior) export(distribution) export(distribution_beta) export(distribution_binom) export(distribution_binomial) export(distribution_cauchy) export(distribution_chisq) export(distribution_chisquared) export(distribution_custom) export(distribution_gamma) export(distribution_gaussian) export(distribution_mixture_normal) export(distribution_nbinom) export(distribution_normal) export(distribution_poisson) export(distribution_student) export(distribution_student_t) export(distribution_t) export(distribution_tweedie) export(distribution_uniform) export(effective_sample) export(equivalence_test) export(estimate_density) export(eti) export(hdi) export(map_estimate) export(mcse) export(mediation) export(model_to_priors) export(overlap) export(p_direction) export(p_map) export(p_pointnull) export(p_rope) export(p_significance) export(p_to_pd) export(pd) export(pd_to_p) export(point_estimate) export(print_html) export(print_md) export(reshape_ci) export(reshape_draws) export(reshape_iterations) export(rnorm_perfect) export(rope) export(rope_range) export(sensitivity_to_prior) export(sexit) export(sexit_thresholds) export(si) export(simulate_correlation) export(simulate_difference) export(simulate_prior) export(simulate_simpson) export(simulate_ttest) export(unupdate) export(weighted_posteriors) importFrom(datawizard,reshape_ci) importFrom(insight,print_html) importFrom(insight,print_md)